1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
23 __v.iov_len = min(n, __p->iov_len - skip); \
24 if (likely(__v.iov_len)) { \
25 __v.iov_base = __p->iov_base + skip; \
27 __v.iov_len -= left; \
28 skip += __v.iov_len; \
33 while (unlikely(!left && n)) { \
35 __v.iov_len = min(n, __p->iov_len); \
36 if (unlikely(!__v.iov_len)) \
38 __v.iov_base = __p->iov_base; \
40 __v.iov_len -= left; \
47 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
50 __v.iov_len = min(n, __p->iov_len - skip); \
51 if (likely(__v.iov_len)) { \
52 __v.iov_base = __p->iov_base + skip; \
54 skip += __v.iov_len; \
57 while (unlikely(n)) { \
59 __v.iov_len = min(n, __p->iov_len); \
60 if (unlikely(!__v.iov_len)) \
62 __v.iov_base = __p->iov_base; \
70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
71 struct bvec_iter __start; \
72 __start.bi_size = n; \
73 __start.bi_bvec_done = skip; \
75 for_each_bvec(__v, i->bvec, __bi, __start) { \
80 #define iterate_xarray(i, n, __v, skip, STEP) { \
81 struct page *head = NULL; \
82 size_t wanted = n, seg, offset; \
83 loff_t start = i->xarray_start + skip; \
84 pgoff_t index = start >> PAGE_SHIFT; \
87 XA_STATE(xas, i->xarray, index); \
90 xas_for_each(&xas, head, ULONG_MAX) { \
91 if (xas_retry(&xas, head)) \
93 if (WARN_ON(xa_is_value(head))) \
95 if (WARN_ON(PageHuge(head))) \
97 for (j = (head->index < index) ? index - head->index : 0; \
98 j < thp_nr_pages(head); j++) { \
99 __v.bv_page = head + j; \
100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
101 seg = PAGE_SIZE - offset; \
102 __v.bv_offset = offset; \
103 __v.bv_len = min(n, seg); \
106 skip += __v.bv_len; \
117 #define iterate_and_advance(i, n, v, I, B, K, X) { \
118 if (unlikely(i->count < n)) \
121 size_t skip = i->iov_offset; \
122 if (likely(iter_is_iovec(i))) { \
123 const struct iovec *iov; \
125 iterate_iovec(i, n, v, iov, skip, (I)) \
126 if (skip == iov->iov_len) { \
130 i->nr_segs -= iov - i->iov; \
132 } else if (iov_iter_is_bvec(i)) { \
133 const struct bio_vec *bvec = i->bvec; \
135 struct bvec_iter __bi; \
136 iterate_bvec(i, n, v, __bi, skip, (B)) \
137 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
138 i->nr_segs -= i->bvec - bvec; \
139 skip = __bi.bi_bvec_done; \
140 } else if (iov_iter_is_kvec(i)) { \
141 const struct kvec *kvec; \
143 iterate_kvec(i, n, v, kvec, skip, (K)) \
144 if (skip == kvec->iov_len) { \
148 i->nr_segs -= kvec - i->kvec; \
150 } else if (iov_iter_is_xarray(i)) { \
152 iterate_xarray(i, n, v, skip, (X)) \
155 i->iov_offset = skip; \
159 static int copyout(void __user *to, const void *from, size_t n)
161 if (should_fail_usercopy())
163 if (access_ok(to, n)) {
164 instrument_copy_to_user(to, from, n);
165 n = raw_copy_to_user(to, from, n);
170 static int copyin(void *to, const void __user *from, size_t n)
172 if (should_fail_usercopy())
174 if (access_ok(from, n)) {
175 instrument_copy_from_user(to, from, n);
176 n = raw_copy_from_user(to, from, n);
181 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
184 size_t skip, copy, left, wanted;
185 const struct iovec *iov;
189 if (unlikely(bytes > i->count))
192 if (unlikely(!bytes))
198 skip = i->iov_offset;
199 buf = iov->iov_base + skip;
200 copy = min(bytes, iov->iov_len - skip);
202 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
203 kaddr = kmap_atomic(page);
204 from = kaddr + offset;
206 /* first chunk, usually the only one */
207 left = copyout(buf, from, copy);
213 while (unlikely(!left && bytes)) {
216 copy = min(bytes, iov->iov_len);
217 left = copyout(buf, from, copy);
223 if (likely(!bytes)) {
224 kunmap_atomic(kaddr);
227 offset = from - kaddr;
229 kunmap_atomic(kaddr);
230 copy = min(bytes, iov->iov_len - skip);
232 /* Too bad - revert to non-atomic kmap */
235 from = kaddr + offset;
236 left = copyout(buf, from, copy);
241 while (unlikely(!left && bytes)) {
244 copy = min(bytes, iov->iov_len);
245 left = copyout(buf, from, copy);
254 if (skip == iov->iov_len) {
258 i->count -= wanted - bytes;
259 i->nr_segs -= iov - i->iov;
261 i->iov_offset = skip;
262 return wanted - bytes;
265 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
268 size_t skip, copy, left, wanted;
269 const struct iovec *iov;
273 if (unlikely(bytes > i->count))
276 if (unlikely(!bytes))
282 skip = i->iov_offset;
283 buf = iov->iov_base + skip;
284 copy = min(bytes, iov->iov_len - skip);
286 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
287 kaddr = kmap_atomic(page);
290 /* first chunk, usually the only one */
291 left = copyin(to, buf, copy);
297 while (unlikely(!left && bytes)) {
300 copy = min(bytes, iov->iov_len);
301 left = copyin(to, buf, copy);
307 if (likely(!bytes)) {
308 kunmap_atomic(kaddr);
313 kunmap_atomic(kaddr);
314 copy = min(bytes, iov->iov_len - skip);
316 /* Too bad - revert to non-atomic kmap */
320 left = copyin(to, buf, copy);
325 while (unlikely(!left && bytes)) {
328 copy = min(bytes, iov->iov_len);
329 left = copyin(to, buf, copy);
338 if (skip == iov->iov_len) {
342 i->count -= wanted - bytes;
343 i->nr_segs -= iov - i->iov;
345 i->iov_offset = skip;
346 return wanted - bytes;
350 static bool sanity(const struct iov_iter *i)
352 struct pipe_inode_info *pipe = i->pipe;
353 unsigned int p_head = pipe->head;
354 unsigned int p_tail = pipe->tail;
355 unsigned int p_mask = pipe->ring_size - 1;
356 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
357 unsigned int i_head = i->head;
361 struct pipe_buffer *p;
362 if (unlikely(p_occupancy == 0))
363 goto Bad; // pipe must be non-empty
364 if (unlikely(i_head != p_head - 1))
365 goto Bad; // must be at the last buffer...
367 p = &pipe->bufs[i_head & p_mask];
368 if (unlikely(p->offset + p->len != i->iov_offset))
369 goto Bad; // ... at the end of segment
371 if (i_head != p_head)
372 goto Bad; // must be right after the last buffer
376 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
377 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
378 p_head, p_tail, pipe->ring_size);
379 for (idx = 0; idx < pipe->ring_size; idx++)
380 printk(KERN_ERR "[%p %p %d %d]\n",
382 pipe->bufs[idx].page,
383 pipe->bufs[idx].offset,
384 pipe->bufs[idx].len);
389 #define sanity(i) true
392 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
395 struct pipe_inode_info *pipe = i->pipe;
396 struct pipe_buffer *buf;
397 unsigned int p_tail = pipe->tail;
398 unsigned int p_mask = pipe->ring_size - 1;
399 unsigned int i_head = i->head;
402 if (unlikely(bytes > i->count))
405 if (unlikely(!bytes))
412 buf = &pipe->bufs[i_head & p_mask];
414 if (offset == off && buf->page == page) {
415 /* merge with the last one */
417 i->iov_offset += bytes;
421 buf = &pipe->bufs[i_head & p_mask];
423 if (pipe_full(i_head, p_tail, pipe->max_usage))
426 buf->ops = &page_cache_pipe_buf_ops;
429 buf->offset = offset;
432 pipe->head = i_head + 1;
433 i->iov_offset = offset + bytes;
441 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
442 * bytes. For each iovec, fault in each page that constitutes the iovec.
444 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
445 * because it is an invalid address).
447 int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
449 if (iter_is_iovec(i)) {
450 const struct iovec *p;
453 if (bytes > i->count)
455 for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
456 size_t len = min(bytes, p->iov_len - skip);
461 err = fault_in_pages_readable(p->iov_base + skip, len);
469 EXPORT_SYMBOL(iov_iter_fault_in_readable);
471 void iov_iter_init(struct iov_iter *i, unsigned int direction,
472 const struct iovec *iov, unsigned long nr_segs,
475 WARN_ON(direction & ~(READ | WRITE));
476 WARN_ON_ONCE(uaccess_kernel());
477 *i = (struct iov_iter) {
478 .iter_type = ITER_IOVEC,
479 .data_source = direction,
486 EXPORT_SYMBOL(iov_iter_init);
488 static inline bool allocated(struct pipe_buffer *buf)
490 return buf->ops == &default_pipe_buf_ops;
493 static inline void data_start(const struct iov_iter *i,
494 unsigned int *iter_headp, size_t *offp)
496 unsigned int p_mask = i->pipe->ring_size - 1;
497 unsigned int iter_head = i->head;
498 size_t off = i->iov_offset;
500 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
505 *iter_headp = iter_head;
509 static size_t push_pipe(struct iov_iter *i, size_t size,
510 int *iter_headp, size_t *offp)
512 struct pipe_inode_info *pipe = i->pipe;
513 unsigned int p_tail = pipe->tail;
514 unsigned int p_mask = pipe->ring_size - 1;
515 unsigned int iter_head;
519 if (unlikely(size > i->count))
525 data_start(i, &iter_head, &off);
526 *iter_headp = iter_head;
529 left -= PAGE_SIZE - off;
531 pipe->bufs[iter_head & p_mask].len += size;
534 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
537 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
538 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
539 struct page *page = alloc_page(GFP_USER);
543 buf->ops = &default_pipe_buf_ops;
546 buf->len = min_t(ssize_t, left, PAGE_SIZE);
549 pipe->head = iter_head;
557 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
560 struct pipe_inode_info *pipe = i->pipe;
561 unsigned int p_mask = pipe->ring_size - 1;
568 bytes = n = push_pipe(i, bytes, &i_head, &off);
572 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
573 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
575 i->iov_offset = off + chunk;
585 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
586 __wsum sum, size_t off)
588 __wsum next = csum_partial_copy_nocheck(from, to, len);
589 return csum_block_add(sum, next, off);
592 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
593 struct csum_state *csstate,
596 struct pipe_inode_info *pipe = i->pipe;
597 unsigned int p_mask = pipe->ring_size - 1;
598 __wsum sum = csstate->csum;
599 size_t off = csstate->off;
606 bytes = n = push_pipe(i, bytes, &i_head, &r);
610 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
611 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
612 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
615 i->iov_offset = r + chunk;
628 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
630 const char *from = addr;
631 if (unlikely(iov_iter_is_pipe(i)))
632 return copy_pipe_to_iter(addr, bytes, i);
633 if (iter_is_iovec(i))
635 iterate_and_advance(i, bytes, v,
636 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
637 memcpy_to_page(v.bv_page, v.bv_offset,
638 (from += v.bv_len) - v.bv_len, v.bv_len),
639 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
640 memcpy_to_page(v.bv_page, v.bv_offset,
641 (from += v.bv_len) - v.bv_len, v.bv_len)
646 EXPORT_SYMBOL(_copy_to_iter);
648 #ifdef CONFIG_ARCH_HAS_COPY_MC
649 static int copyout_mc(void __user *to, const void *from, size_t n)
651 if (access_ok(to, n)) {
652 instrument_copy_to_user(to, from, n);
653 n = copy_mc_to_user((__force void *) to, from, n);
658 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
659 const char *from, size_t len)
664 to = kmap_atomic(page);
665 ret = copy_mc_to_kernel(to + offset, from, len);
671 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
674 struct pipe_inode_info *pipe = i->pipe;
675 unsigned int p_mask = pipe->ring_size - 1;
677 size_t n, off, xfer = 0;
682 bytes = n = push_pipe(i, bytes, &i_head, &off);
686 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
692 i->iov_offset = off + chunk - rem;
706 * _copy_mc_to_iter - copy to iter with source memory error exception handling
707 * @addr: source kernel address
708 * @bytes: total transfer length
709 * @iter: destination iterator
711 * The pmem driver deploys this for the dax operation
712 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
713 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
714 * successfully copied.
716 * The main differences between this and typical _copy_to_iter().
718 * * Typical tail/residue handling after a fault retries the copy
719 * byte-by-byte until the fault happens again. Re-triggering machine
720 * checks is potentially fatal so the implementation uses source
721 * alignment and poison alignment assumptions to avoid re-triggering
722 * hardware exceptions.
724 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
725 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
728 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
730 const char *from = addr;
731 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
733 if (unlikely(iov_iter_is_pipe(i)))
734 return copy_mc_pipe_to_iter(addr, bytes, i);
735 if (iter_is_iovec(i))
737 iterate_and_advance(i, bytes, v,
738 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
741 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
742 (from += v.bv_len) - v.bv_len, v.bv_len);
744 curr_addr = (unsigned long) from;
745 bytes = curr_addr - s_addr - rem;
750 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
751 - v.iov_len, v.iov_len);
753 curr_addr = (unsigned long) from;
754 bytes = curr_addr - s_addr - rem;
759 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
760 (from += v.bv_len) - v.bv_len, v.bv_len);
762 curr_addr = (unsigned long) from;
763 bytes = curr_addr - s_addr - rem;
765 i->iov_offset += bytes;
774 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
775 #endif /* CONFIG_ARCH_HAS_COPY_MC */
777 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
780 if (unlikely(iov_iter_is_pipe(i))) {
784 if (iter_is_iovec(i))
786 iterate_and_advance(i, bytes, v,
787 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
788 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
789 v.bv_offset, v.bv_len),
790 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
791 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
792 v.bv_offset, v.bv_len)
797 EXPORT_SYMBOL(_copy_from_iter);
799 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
802 if (unlikely(iov_iter_is_pipe(i))) {
806 iterate_and_advance(i, bytes, v,
807 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
808 v.iov_base, v.iov_len),
809 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
810 v.bv_offset, v.bv_len),
811 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
812 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
813 v.bv_offset, v.bv_len)
818 EXPORT_SYMBOL(_copy_from_iter_nocache);
820 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
822 * _copy_from_iter_flushcache - write destination through cpu cache
823 * @addr: destination kernel address
824 * @bytes: total transfer length
825 * @iter: source iterator
827 * The pmem driver arranges for filesystem-dax to use this facility via
828 * dax_copy_from_iter() for ensuring that writes to persistent memory
829 * are flushed through the CPU cache. It is differentiated from
830 * _copy_from_iter_nocache() in that guarantees all data is flushed for
831 * all iterator types. The _copy_from_iter_nocache() only attempts to
832 * bypass the cache for the ITER_IOVEC case, and on some archs may use
833 * instructions that strand dirty-data in the cache.
835 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
838 if (unlikely(iov_iter_is_pipe(i))) {
842 iterate_and_advance(i, bytes, v,
843 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
844 v.iov_base, v.iov_len),
845 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
846 v.bv_offset, v.bv_len),
847 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
849 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
850 v.bv_offset, v.bv_len)
855 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
858 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
861 size_t v = n + offset;
864 * The general case needs to access the page order in order
865 * to compute the page size.
866 * However, we mostly deal with order-0 pages and thus can
867 * avoid a possible cache line miss for requests that fit all
870 if (n <= v && v <= PAGE_SIZE)
873 head = compound_head(page);
874 v += (page - head) << PAGE_SHIFT;
876 if (likely(n <= v && v <= (page_size(head))))
882 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
885 if (likely(iter_is_iovec(i)))
886 return copy_page_to_iter_iovec(page, offset, bytes, i);
887 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
888 void *kaddr = kmap_atomic(page);
889 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
890 kunmap_atomic(kaddr);
893 if (iov_iter_is_pipe(i))
894 return copy_page_to_iter_pipe(page, offset, bytes, i);
895 if (unlikely(iov_iter_is_discard(i))) {
896 if (unlikely(i->count < bytes))
905 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
909 if (unlikely(!page_copy_sane(page, offset, bytes)))
911 page += offset / PAGE_SIZE; // first subpage
914 size_t n = __copy_page_to_iter(page, offset,
915 min(bytes, (size_t)PAGE_SIZE - offset), i);
921 if (offset == PAGE_SIZE) {
928 EXPORT_SYMBOL(copy_page_to_iter);
930 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
933 if (unlikely(!page_copy_sane(page, offset, bytes)))
935 if (likely(iter_is_iovec(i)))
936 return copy_page_from_iter_iovec(page, offset, bytes, i);
937 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
938 void *kaddr = kmap_atomic(page);
939 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
940 kunmap_atomic(kaddr);
946 EXPORT_SYMBOL(copy_page_from_iter);
948 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
950 struct pipe_inode_info *pipe = i->pipe;
951 unsigned int p_mask = pipe->ring_size - 1;
958 bytes = n = push_pipe(i, bytes, &i_head, &off);
963 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
964 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
966 i->iov_offset = off + chunk;
975 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
977 if (unlikely(iov_iter_is_pipe(i)))
978 return pipe_zero(bytes, i);
979 iterate_and_advance(i, bytes, v,
980 clear_user(v.iov_base, v.iov_len),
981 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
982 memset(v.iov_base, 0, v.iov_len),
983 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
988 EXPORT_SYMBOL(iov_iter_zero);
990 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
993 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
994 if (unlikely(!page_copy_sane(page, offset, bytes))) {
995 kunmap_atomic(kaddr);
998 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
999 kunmap_atomic(kaddr);
1003 iterate_and_advance(i, bytes, v,
1004 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1005 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1006 v.bv_offset, v.bv_len),
1007 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1008 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1009 v.bv_offset, v.bv_len)
1011 kunmap_atomic(kaddr);
1014 EXPORT_SYMBOL(copy_page_from_iter_atomic);
1016 static inline void pipe_truncate(struct iov_iter *i)
1018 struct pipe_inode_info *pipe = i->pipe;
1019 unsigned int p_tail = pipe->tail;
1020 unsigned int p_head = pipe->head;
1021 unsigned int p_mask = pipe->ring_size - 1;
1023 if (!pipe_empty(p_head, p_tail)) {
1024 struct pipe_buffer *buf;
1025 unsigned int i_head = i->head;
1026 size_t off = i->iov_offset;
1029 buf = &pipe->bufs[i_head & p_mask];
1030 buf->len = off - buf->offset;
1033 while (p_head != i_head) {
1035 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1038 pipe->head = p_head;
1042 static void pipe_advance(struct iov_iter *i, size_t size)
1044 struct pipe_inode_info *pipe = i->pipe;
1046 struct pipe_buffer *buf;
1047 unsigned int p_mask = pipe->ring_size - 1;
1048 unsigned int i_head = i->head;
1049 size_t off = i->iov_offset, left = size;
1051 if (off) /* make it relative to the beginning of buffer */
1052 left += off - pipe->bufs[i_head & p_mask].offset;
1054 buf = &pipe->bufs[i_head & p_mask];
1055 if (left <= buf->len)
1061 i->iov_offset = buf->offset + left;
1064 /* ... and discard everything past that point */
1068 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1070 struct bvec_iter bi;
1072 bi.bi_size = i->count;
1073 bi.bi_bvec_done = i->iov_offset;
1075 bvec_iter_advance(i->bvec, &bi, size);
1077 i->bvec += bi.bi_idx;
1078 i->nr_segs -= bi.bi_idx;
1079 i->count = bi.bi_size;
1080 i->iov_offset = bi.bi_bvec_done;
1083 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1085 const struct iovec *iov, *end;
1091 size += i->iov_offset; // from beginning of current segment
1092 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1093 if (likely(size < iov->iov_len))
1095 size -= iov->iov_len;
1097 i->iov_offset = size;
1098 i->nr_segs -= iov - i->iov;
1102 void iov_iter_advance(struct iov_iter *i, size_t size)
1104 if (unlikely(i->count < size))
1106 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1107 /* iovec and kvec have identical layouts */
1108 iov_iter_iovec_advance(i, size);
1109 } else if (iov_iter_is_bvec(i)) {
1110 iov_iter_bvec_advance(i, size);
1111 } else if (iov_iter_is_pipe(i)) {
1112 pipe_advance(i, size);
1113 } else if (unlikely(iov_iter_is_xarray(i))) {
1114 i->iov_offset += size;
1116 } else if (iov_iter_is_discard(i)) {
1120 EXPORT_SYMBOL(iov_iter_advance);
1122 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1126 if (WARN_ON(unroll > MAX_RW_COUNT))
1129 if (unlikely(iov_iter_is_pipe(i))) {
1130 struct pipe_inode_info *pipe = i->pipe;
1131 unsigned int p_mask = pipe->ring_size - 1;
1132 unsigned int i_head = i->head;
1133 size_t off = i->iov_offset;
1135 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1136 size_t n = off - b->offset;
1142 if (!unroll && i_head == i->start_head) {
1147 b = &pipe->bufs[i_head & p_mask];
1148 off = b->offset + b->len;
1150 i->iov_offset = off;
1155 if (unlikely(iov_iter_is_discard(i)))
1157 if (unroll <= i->iov_offset) {
1158 i->iov_offset -= unroll;
1161 unroll -= i->iov_offset;
1162 if (iov_iter_is_xarray(i)) {
1163 BUG(); /* We should never go beyond the start of the specified
1164 * range since we might then be straying into pages that
1167 } else if (iov_iter_is_bvec(i)) {
1168 const struct bio_vec *bvec = i->bvec;
1170 size_t n = (--bvec)->bv_len;
1174 i->iov_offset = n - unroll;
1179 } else { /* same logics for iovec and kvec */
1180 const struct iovec *iov = i->iov;
1182 size_t n = (--iov)->iov_len;
1186 i->iov_offset = n - unroll;
1193 EXPORT_SYMBOL(iov_iter_revert);
1196 * Return the count of just the current iov_iter segment.
1198 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1200 if (i->nr_segs > 1) {
1201 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1202 return min(i->count, i->iov->iov_len - i->iov_offset);
1203 if (iov_iter_is_bvec(i))
1204 return min(i->count, i->bvec->bv_len - i->iov_offset);
1208 EXPORT_SYMBOL(iov_iter_single_seg_count);
1210 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1211 const struct kvec *kvec, unsigned long nr_segs,
1214 WARN_ON(direction & ~(READ | WRITE));
1215 *i = (struct iov_iter){
1216 .iter_type = ITER_KVEC,
1217 .data_source = direction,
1224 EXPORT_SYMBOL(iov_iter_kvec);
1226 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1227 const struct bio_vec *bvec, unsigned long nr_segs,
1230 WARN_ON(direction & ~(READ | WRITE));
1231 *i = (struct iov_iter){
1232 .iter_type = ITER_BVEC,
1233 .data_source = direction,
1240 EXPORT_SYMBOL(iov_iter_bvec);
1242 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1243 struct pipe_inode_info *pipe,
1246 BUG_ON(direction != READ);
1247 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1248 *i = (struct iov_iter){
1249 .iter_type = ITER_PIPE,
1250 .data_source = false,
1253 .start_head = pipe->head,
1258 EXPORT_SYMBOL(iov_iter_pipe);
1261 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1262 * @i: The iterator to initialise.
1263 * @direction: The direction of the transfer.
1264 * @xarray: The xarray to access.
1265 * @start: The start file position.
1266 * @count: The size of the I/O buffer in bytes.
1268 * Set up an I/O iterator to either draw data out of the pages attached to an
1269 * inode or to inject data into those pages. The pages *must* be prevented
1270 * from evaporation, either by taking a ref on them or locking them by the
1273 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1274 struct xarray *xarray, loff_t start, size_t count)
1276 BUG_ON(direction & ~1);
1277 *i = (struct iov_iter) {
1278 .iter_type = ITER_XARRAY,
1279 .data_source = direction,
1281 .xarray_start = start,
1286 EXPORT_SYMBOL(iov_iter_xarray);
1289 * iov_iter_discard - Initialise an I/O iterator that discards data
1290 * @i: The iterator to initialise.
1291 * @direction: The direction of the transfer.
1292 * @count: The size of the I/O buffer in bytes.
1294 * Set up an I/O iterator that just discards everything that's written to it.
1295 * It's only available as a READ iterator.
1297 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1299 BUG_ON(direction != READ);
1300 *i = (struct iov_iter){
1301 .iter_type = ITER_DISCARD,
1302 .data_source = false,
1307 EXPORT_SYMBOL(iov_iter_discard);
1309 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1311 unsigned long res = 0;
1312 size_t size = i->count;
1313 size_t skip = i->iov_offset;
1316 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1317 size_t len = i->iov[k].iov_len - skip;
1319 res |= (unsigned long)i->iov[k].iov_base + skip;
1331 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1334 size_t size = i->count;
1335 unsigned skip = i->iov_offset;
1338 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1339 size_t len = i->bvec[k].bv_len - skip;
1340 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1351 unsigned long iov_iter_alignment(const struct iov_iter *i)
1353 /* iovec and kvec have identical layouts */
1354 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1355 return iov_iter_alignment_iovec(i);
1357 if (iov_iter_is_bvec(i))
1358 return iov_iter_alignment_bvec(i);
1360 if (iov_iter_is_pipe(i)) {
1361 unsigned int p_mask = i->pipe->ring_size - 1;
1362 size_t size = i->count;
1364 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1365 return size | i->iov_offset;
1369 if (iov_iter_is_xarray(i))
1370 return (i->xarray_start + i->iov_offset) | i->count;
1374 EXPORT_SYMBOL(iov_iter_alignment);
1376 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1378 unsigned long res = 0;
1379 unsigned long v = 0;
1380 size_t size = i->count;
1383 if (WARN_ON(!iter_is_iovec(i)))
1386 for (k = 0; k < i->nr_segs; k++) {
1387 if (i->iov[k].iov_len) {
1388 unsigned long base = (unsigned long)i->iov[k].iov_base;
1389 if (v) // if not the first one
1390 res |= base | v; // this start | previous end
1391 v = base + i->iov[k].iov_len;
1392 if (size <= i->iov[k].iov_len)
1394 size -= i->iov[k].iov_len;
1399 EXPORT_SYMBOL(iov_iter_gap_alignment);
1401 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1403 struct page **pages,
1407 struct pipe_inode_info *pipe = i->pipe;
1408 unsigned int p_mask = pipe->ring_size - 1;
1409 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1416 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1424 static ssize_t pipe_get_pages(struct iov_iter *i,
1425 struct page **pages, size_t maxsize, unsigned maxpages,
1428 unsigned int iter_head, npages;
1434 data_start(i, &iter_head, start);
1435 /* Amount of free space: some of this one + all after this one */
1436 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1437 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1439 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1442 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1443 pgoff_t index, unsigned int nr_pages)
1445 XA_STATE(xas, xa, index);
1447 unsigned int ret = 0;
1450 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1451 if (xas_retry(&xas, page))
1454 /* Has the page moved or been split? */
1455 if (unlikely(page != xas_reload(&xas))) {
1460 pages[ret] = find_subpage(page, xas.xa_index);
1461 get_page(pages[ret]);
1462 if (++ret == nr_pages)
1469 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1470 struct page **pages, size_t maxsize,
1471 unsigned maxpages, size_t *_start_offset)
1473 unsigned nr, offset;
1474 pgoff_t index, count;
1475 size_t size = maxsize, actual;
1478 if (!size || !maxpages)
1481 pos = i->xarray_start + i->iov_offset;
1482 index = pos >> PAGE_SHIFT;
1483 offset = pos & ~PAGE_MASK;
1484 *_start_offset = offset;
1487 if (size > PAGE_SIZE - offset) {
1488 size -= PAGE_SIZE - offset;
1489 count += size >> PAGE_SHIFT;
1495 if (count > maxpages)
1498 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1502 actual = PAGE_SIZE * nr;
1504 if (nr == count && size > 0) {
1505 unsigned last_offset = (nr > 1) ? 0 : offset;
1506 actual -= PAGE_SIZE - (last_offset + size);
1511 /* must be done on non-empty ITER_IOVEC one */
1512 static unsigned long first_iovec_segment(const struct iov_iter *i,
1513 size_t *size, size_t *start,
1514 size_t maxsize, unsigned maxpages)
1519 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1520 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1521 size_t len = i->iov[k].iov_len - skip;
1527 len += (*start = addr % PAGE_SIZE);
1528 if (len > maxpages * PAGE_SIZE)
1529 len = maxpages * PAGE_SIZE;
1531 return addr & PAGE_MASK;
1533 BUG(); // if it had been empty, we wouldn't get called
1536 /* must be done on non-empty ITER_BVEC one */
1537 static struct page *first_bvec_segment(const struct iov_iter *i,
1538 size_t *size, size_t *start,
1539 size_t maxsize, unsigned maxpages)
1542 size_t skip = i->iov_offset, len;
1544 len = i->bvec->bv_len - skip;
1547 skip += i->bvec->bv_offset;
1548 page = i->bvec->bv_page + skip / PAGE_SIZE;
1549 len += (*start = skip % PAGE_SIZE);
1550 if (len > maxpages * PAGE_SIZE)
1551 len = maxpages * PAGE_SIZE;
1556 ssize_t iov_iter_get_pages(struct iov_iter *i,
1557 struct page **pages, size_t maxsize, unsigned maxpages,
1563 if (maxsize > i->count)
1568 if (likely(iter_is_iovec(i))) {
1571 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1572 n = DIV_ROUND_UP(len, PAGE_SIZE);
1573 res = get_user_pages_fast(addr, n,
1574 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1576 if (unlikely(res < 0))
1578 return (res == n ? len : res * PAGE_SIZE) - *start;
1580 if (iov_iter_is_bvec(i)) {
1583 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1584 n = DIV_ROUND_UP(len, PAGE_SIZE);
1586 get_page(*pages++ = page++);
1587 return len - *start;
1589 if (iov_iter_is_pipe(i))
1590 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1591 if (iov_iter_is_xarray(i))
1592 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1595 EXPORT_SYMBOL(iov_iter_get_pages);
1597 static struct page **get_pages_array(size_t n)
1599 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1602 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1603 struct page ***pages, size_t maxsize,
1607 unsigned int iter_head, npages;
1613 data_start(i, &iter_head, start);
1614 /* Amount of free space: some of this one + all after this one */
1615 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1616 n = npages * PAGE_SIZE - *start;
1620 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1621 p = get_pages_array(npages);
1624 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1632 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1633 struct page ***pages, size_t maxsize,
1634 size_t *_start_offset)
1637 unsigned nr, offset;
1638 pgoff_t index, count;
1639 size_t size = maxsize, actual;
1645 pos = i->xarray_start + i->iov_offset;
1646 index = pos >> PAGE_SHIFT;
1647 offset = pos & ~PAGE_MASK;
1648 *_start_offset = offset;
1651 if (size > PAGE_SIZE - offset) {
1652 size -= PAGE_SIZE - offset;
1653 count += size >> PAGE_SHIFT;
1659 p = get_pages_array(count);
1664 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1668 actual = PAGE_SIZE * nr;
1670 if (nr == count && size > 0) {
1671 unsigned last_offset = (nr > 1) ? 0 : offset;
1672 actual -= PAGE_SIZE - (last_offset + size);
1677 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1678 struct page ***pages, size_t maxsize,
1685 if (maxsize > i->count)
1690 if (likely(iter_is_iovec(i))) {
1693 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1694 n = DIV_ROUND_UP(len, PAGE_SIZE);
1695 p = get_pages_array(n);
1698 res = get_user_pages_fast(addr, n,
1699 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1700 if (unlikely(res < 0)) {
1705 return (res == n ? len : res * PAGE_SIZE) - *start;
1707 if (iov_iter_is_bvec(i)) {
1710 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1711 n = DIV_ROUND_UP(len, PAGE_SIZE);
1712 *pages = p = get_pages_array(n);
1716 get_page(*p++ = page++);
1717 return len - *start;
1719 if (iov_iter_is_pipe(i))
1720 return pipe_get_pages_alloc(i, pages, maxsize, start);
1721 if (iov_iter_is_xarray(i))
1722 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1725 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1727 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1734 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1738 iterate_and_advance(i, bytes, v, ({
1739 next = csum_and_copy_from_user(v.iov_base,
1740 (to += v.iov_len) - v.iov_len,
1743 sum = csum_block_add(sum, next, off);
1746 next ? 0 : v.iov_len;
1748 char *p = kmap_atomic(v.bv_page);
1749 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1750 p + v.bv_offset, v.bv_len,
1755 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1756 v.iov_base, v.iov_len,
1760 char *p = kmap_atomic(v.bv_page);
1761 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1762 p + v.bv_offset, v.bv_len,
1771 EXPORT_SYMBOL(csum_and_copy_from_iter);
1773 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1776 struct csum_state *csstate = _csstate;
1777 const char *from = addr;
1781 if (unlikely(iov_iter_is_pipe(i)))
1782 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1784 sum = csum_shift(csstate->csum, csstate->off);
1786 if (unlikely(iov_iter_is_discard(i))) {
1787 WARN_ON(1); /* for now */
1790 iterate_and_advance(i, bytes, v, ({
1791 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1795 sum = csum_block_add(sum, next, off);
1798 next ? 0 : v.iov_len;
1800 char *p = kmap_atomic(v.bv_page);
1801 sum = csum_and_memcpy(p + v.bv_offset,
1802 (from += v.bv_len) - v.bv_len,
1803 v.bv_len, sum, off);
1807 sum = csum_and_memcpy(v.iov_base,
1808 (from += v.iov_len) - v.iov_len,
1809 v.iov_len, sum, off);
1812 char *p = kmap_atomic(v.bv_page);
1813 sum = csum_and_memcpy(p + v.bv_offset,
1814 (from += v.bv_len) - v.bv_len,
1815 v.bv_len, sum, off);
1820 csstate->csum = csum_shift(sum, csstate->off);
1821 csstate->off += bytes;
1824 EXPORT_SYMBOL(csum_and_copy_to_iter);
1826 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1829 #ifdef CONFIG_CRYPTO_HASH
1830 struct ahash_request *hash = hashp;
1831 struct scatterlist sg;
1834 copied = copy_to_iter(addr, bytes, i);
1835 sg_init_one(&sg, addr, copied);
1836 ahash_request_set_crypt(hash, &sg, NULL, copied);
1837 crypto_ahash_update(hash);
1843 EXPORT_SYMBOL(hash_and_copy_to_iter);
1845 static int iov_npages(const struct iov_iter *i, int maxpages)
1847 size_t skip = i->iov_offset, size = i->count;
1848 const struct iovec *p;
1851 for (p = i->iov; size; skip = 0, p++) {
1852 unsigned offs = offset_in_page(p->iov_base + skip);
1853 size_t len = min(p->iov_len - skip, size);
1857 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1858 if (unlikely(npages > maxpages))
1865 static int bvec_npages(const struct iov_iter *i, int maxpages)
1867 size_t skip = i->iov_offset, size = i->count;
1868 const struct bio_vec *p;
1871 for (p = i->bvec; size; skip = 0, p++) {
1872 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1873 size_t len = min(p->bv_len - skip, size);
1876 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1877 if (unlikely(npages > maxpages))
1883 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1885 if (unlikely(!i->count))
1887 /* iovec and kvec have identical layouts */
1888 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1889 return iov_npages(i, maxpages);
1890 if (iov_iter_is_bvec(i))
1891 return bvec_npages(i, maxpages);
1892 if (iov_iter_is_pipe(i)) {
1893 unsigned int iter_head;
1900 data_start(i, &iter_head, &off);
1901 /* some of this one + all after this one */
1902 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1903 return min(npages, maxpages);
1905 if (iov_iter_is_xarray(i)) {
1906 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1907 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1908 return min(npages, maxpages);
1912 EXPORT_SYMBOL(iov_iter_npages);
1914 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1917 if (unlikely(iov_iter_is_pipe(new))) {
1921 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1923 if (iov_iter_is_bvec(new))
1924 return new->bvec = kmemdup(new->bvec,
1925 new->nr_segs * sizeof(struct bio_vec),
1928 /* iovec and kvec have identical layout */
1929 return new->iov = kmemdup(new->iov,
1930 new->nr_segs * sizeof(struct iovec),
1933 EXPORT_SYMBOL(dup_iter);
1935 static int copy_compat_iovec_from_user(struct iovec *iov,
1936 const struct iovec __user *uvec, unsigned long nr_segs)
1938 const struct compat_iovec __user *uiov =
1939 (const struct compat_iovec __user *)uvec;
1940 int ret = -EFAULT, i;
1942 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1945 for (i = 0; i < nr_segs; i++) {
1949 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1950 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1952 /* check for compat_size_t not fitting in compat_ssize_t .. */
1957 iov[i].iov_base = compat_ptr(buf);
1958 iov[i].iov_len = len;
1967 static int copy_iovec_from_user(struct iovec *iov,
1968 const struct iovec __user *uvec, unsigned long nr_segs)
1972 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1974 for (seg = 0; seg < nr_segs; seg++) {
1975 if ((ssize_t)iov[seg].iov_len < 0)
1982 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1983 unsigned long nr_segs, unsigned long fast_segs,
1984 struct iovec *fast_iov, bool compat)
1986 struct iovec *iov = fast_iov;
1990 * SuS says "The readv() function *may* fail if the iovcnt argument was
1991 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1992 * traditionally returned zero for zero segments, so...
1996 if (nr_segs > UIO_MAXIOV)
1997 return ERR_PTR(-EINVAL);
1998 if (nr_segs > fast_segs) {
1999 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
2001 return ERR_PTR(-ENOMEM);
2005 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
2007 ret = copy_iovec_from_user(iov, uvec, nr_segs);
2009 if (iov != fast_iov)
2011 return ERR_PTR(ret);
2017 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
2018 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
2019 struct iov_iter *i, bool compat)
2021 ssize_t total_len = 0;
2025 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
2028 return PTR_ERR(iov);
2032 * According to the Single Unix Specification we should return EINVAL if
2033 * an element length is < 0 when cast to ssize_t or if the total length
2034 * would overflow the ssize_t return value of the system call.
2036 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
2039 for (seg = 0; seg < nr_segs; seg++) {
2040 ssize_t len = (ssize_t)iov[seg].iov_len;
2042 if (!access_ok(iov[seg].iov_base, len)) {
2049 if (len > MAX_RW_COUNT - total_len) {
2050 len = MAX_RW_COUNT - total_len;
2051 iov[seg].iov_len = len;
2056 iov_iter_init(i, type, iov, nr_segs, total_len);
2065 * import_iovec() - Copy an array of &struct iovec from userspace
2066 * into the kernel, check that it is valid, and initialize a new
2067 * &struct iov_iter iterator to access it.
2069 * @type: One of %READ or %WRITE.
2070 * @uvec: Pointer to the userspace array.
2071 * @nr_segs: Number of elements in userspace array.
2072 * @fast_segs: Number of elements in @iov.
2073 * @iovp: (input and output parameter) Pointer to pointer to (usually small
2074 * on-stack) kernel array.
2075 * @i: Pointer to iterator that will be initialized on success.
2077 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2078 * then this function places %NULL in *@iov on return. Otherwise, a new
2079 * array will be allocated and the result placed in *@iov. This means that
2080 * the caller may call kfree() on *@iov regardless of whether the small
2081 * on-stack array was used or not (and regardless of whether this function
2082 * returns an error or not).
2084 * Return: Negative error code on error, bytes imported on success
2086 ssize_t import_iovec(int type, const struct iovec __user *uvec,
2087 unsigned nr_segs, unsigned fast_segs,
2088 struct iovec **iovp, struct iov_iter *i)
2090 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2091 in_compat_syscall());
2093 EXPORT_SYMBOL(import_iovec);
2095 int import_single_range(int rw, void __user *buf, size_t len,
2096 struct iovec *iov, struct iov_iter *i)
2098 if (len > MAX_RW_COUNT)
2100 if (unlikely(!access_ok(buf, len)))
2103 iov->iov_base = buf;
2105 iov_iter_init(i, rw, iov, 1, len);
2108 EXPORT_SYMBOL(import_single_range);