1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, __v, __off, __p, skip, STEP) { \
23 __v.iov_len = min(n, __p->iov_len - skip); \
24 if (likely(__v.iov_len)) { \
25 __v.iov_base = __p->iov_base + skip; \
26 __v.iov_len -= (STEP); \
27 __off += __v.iov_len; \
28 skip += __v.iov_len; \
30 if (skip < __p->iov_len) \
39 #define iterate_bvec(i, n, __v, __off, p, skip, STEP) { \
42 unsigned offset = p->bv_offset + skip; \
44 void *kaddr = kmap_local_page(p->bv_page + \
45 offset / PAGE_SIZE); \
46 __v.iov_base = kaddr + offset % PAGE_SIZE; \
47 __v.iov_len = min(min(n, p->bv_len - skip), \
48 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
50 kunmap_local(kaddr); \
51 __v.iov_len -= left; \
52 __off += __v.iov_len; \
53 skip += __v.iov_len; \
54 if (skip == p->bv_len) { \
65 #define iterate_xarray(i, n, __v, __off, skip, STEP) { \
68 struct page *head = NULL; \
70 loff_t start = i->xarray_start + skip; \
71 pgoff_t index = start >> PAGE_SHIFT; \
74 XA_STATE(xas, i->xarray, index); \
77 xas_for_each(&xas, head, ULONG_MAX) { \
79 if (xas_retry(&xas, head)) \
81 if (WARN_ON(xa_is_value(head))) \
83 if (WARN_ON(PageHuge(head))) \
85 for (j = (head->index < index) ? index - head->index : 0; \
86 j < thp_nr_pages(head); j++) { \
87 void *kaddr = kmap_local_page(head + j); \
88 offset = (start + __off) % PAGE_SIZE; \
89 __v.iov_base = kaddr + offset; \
90 seg = PAGE_SIZE - offset; \
91 __v.iov_len = min(n, seg); \
93 kunmap_local(kaddr); \
94 __v.iov_len -= left; \
95 __off += __v.iov_len; \
107 #define __iterate_and_advance(i, n, v, off, I, K) { \
108 if (unlikely(i->count < n)) \
111 size_t skip = i->iov_offset; \
112 if (likely(iter_is_iovec(i))) { \
113 const struct iovec *iov = i->iov; \
115 iterate_iovec(i, n, v, off, iov, skip, (I)) \
116 i->nr_segs -= iov - i->iov; \
118 } else if (iov_iter_is_bvec(i)) { \
119 const struct bio_vec *bvec = i->bvec; \
121 iterate_bvec(i, n, v, off, bvec, skip, (K)) \
122 i->nr_segs -= bvec - i->bvec; \
124 } else if (iov_iter_is_kvec(i)) { \
125 const struct kvec *kvec = i->kvec; \
127 iterate_iovec(i, n, v, off, kvec, skip, (K)) \
128 i->nr_segs -= kvec - i->kvec; \
130 } else if (iov_iter_is_xarray(i)) { \
132 iterate_xarray(i, n, v, off, skip, (K)) \
135 i->iov_offset = skip; \
138 #define iterate_and_advance(i, n, v, off, I, K) \
139 __iterate_and_advance(i, n, v, off, I, ((void)(K),0))
141 static int copyout(void __user *to, const void *from, size_t n)
143 if (should_fail_usercopy())
145 if (access_ok(to, n)) {
146 instrument_copy_to_user(to, from, n);
147 n = raw_copy_to_user(to, from, n);
152 static int copyin(void *to, const void __user *from, size_t n)
154 if (should_fail_usercopy())
156 if (access_ok(from, n)) {
157 instrument_copy_from_user(to, from, n);
158 n = raw_copy_from_user(to, from, n);
163 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
166 size_t skip, copy, left, wanted;
167 const struct iovec *iov;
171 if (unlikely(bytes > i->count))
174 if (unlikely(!bytes))
180 skip = i->iov_offset;
181 buf = iov->iov_base + skip;
182 copy = min(bytes, iov->iov_len - skip);
184 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
185 kaddr = kmap_atomic(page);
186 from = kaddr + offset;
188 /* first chunk, usually the only one */
189 left = copyout(buf, from, copy);
195 while (unlikely(!left && bytes)) {
198 copy = min(bytes, iov->iov_len);
199 left = copyout(buf, from, copy);
205 if (likely(!bytes)) {
206 kunmap_atomic(kaddr);
209 offset = from - kaddr;
211 kunmap_atomic(kaddr);
212 copy = min(bytes, iov->iov_len - skip);
214 /* Too bad - revert to non-atomic kmap */
217 from = kaddr + offset;
218 left = copyout(buf, from, copy);
223 while (unlikely(!left && bytes)) {
226 copy = min(bytes, iov->iov_len);
227 left = copyout(buf, from, copy);
236 if (skip == iov->iov_len) {
240 i->count -= wanted - bytes;
241 i->nr_segs -= iov - i->iov;
243 i->iov_offset = skip;
244 return wanted - bytes;
247 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
250 size_t skip, copy, left, wanted;
251 const struct iovec *iov;
255 if (unlikely(bytes > i->count))
258 if (unlikely(!bytes))
264 skip = i->iov_offset;
265 buf = iov->iov_base + skip;
266 copy = min(bytes, iov->iov_len - skip);
268 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
269 kaddr = kmap_atomic(page);
272 /* first chunk, usually the only one */
273 left = copyin(to, buf, copy);
279 while (unlikely(!left && bytes)) {
282 copy = min(bytes, iov->iov_len);
283 left = copyin(to, buf, copy);
289 if (likely(!bytes)) {
290 kunmap_atomic(kaddr);
295 kunmap_atomic(kaddr);
296 copy = min(bytes, iov->iov_len - skip);
298 /* Too bad - revert to non-atomic kmap */
302 left = copyin(to, buf, copy);
307 while (unlikely(!left && bytes)) {
310 copy = min(bytes, iov->iov_len);
311 left = copyin(to, buf, copy);
320 if (skip == iov->iov_len) {
324 i->count -= wanted - bytes;
325 i->nr_segs -= iov - i->iov;
327 i->iov_offset = skip;
328 return wanted - bytes;
332 static bool sanity(const struct iov_iter *i)
334 struct pipe_inode_info *pipe = i->pipe;
335 unsigned int p_head = pipe->head;
336 unsigned int p_tail = pipe->tail;
337 unsigned int p_mask = pipe->ring_size - 1;
338 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
339 unsigned int i_head = i->head;
343 struct pipe_buffer *p;
344 if (unlikely(p_occupancy == 0))
345 goto Bad; // pipe must be non-empty
346 if (unlikely(i_head != p_head - 1))
347 goto Bad; // must be at the last buffer...
349 p = &pipe->bufs[i_head & p_mask];
350 if (unlikely(p->offset + p->len != i->iov_offset))
351 goto Bad; // ... at the end of segment
353 if (i_head != p_head)
354 goto Bad; // must be right after the last buffer
358 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
359 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
360 p_head, p_tail, pipe->ring_size);
361 for (idx = 0; idx < pipe->ring_size; idx++)
362 printk(KERN_ERR "[%p %p %d %d]\n",
364 pipe->bufs[idx].page,
365 pipe->bufs[idx].offset,
366 pipe->bufs[idx].len);
371 #define sanity(i) true
374 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
377 struct pipe_inode_info *pipe = i->pipe;
378 struct pipe_buffer *buf;
379 unsigned int p_tail = pipe->tail;
380 unsigned int p_mask = pipe->ring_size - 1;
381 unsigned int i_head = i->head;
384 if (unlikely(bytes > i->count))
387 if (unlikely(!bytes))
394 buf = &pipe->bufs[i_head & p_mask];
396 if (offset == off && buf->page == page) {
397 /* merge with the last one */
399 i->iov_offset += bytes;
403 buf = &pipe->bufs[i_head & p_mask];
405 if (pipe_full(i_head, p_tail, pipe->max_usage))
408 buf->ops = &page_cache_pipe_buf_ops;
411 buf->offset = offset;
414 pipe->head = i_head + 1;
415 i->iov_offset = offset + bytes;
423 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
424 * bytes. For each iovec, fault in each page that constitutes the iovec.
426 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
427 * because it is an invalid address).
429 int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
431 if (iter_is_iovec(i)) {
432 const struct iovec *p;
435 if (bytes > i->count)
437 for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
438 size_t len = min(bytes, p->iov_len - skip);
443 err = fault_in_pages_readable(p->iov_base + skip, len);
451 EXPORT_SYMBOL(iov_iter_fault_in_readable);
453 void iov_iter_init(struct iov_iter *i, unsigned int direction,
454 const struct iovec *iov, unsigned long nr_segs,
457 WARN_ON(direction & ~(READ | WRITE));
458 WARN_ON_ONCE(uaccess_kernel());
459 *i = (struct iov_iter) {
460 .iter_type = ITER_IOVEC,
461 .data_source = direction,
468 EXPORT_SYMBOL(iov_iter_init);
470 static inline bool allocated(struct pipe_buffer *buf)
472 return buf->ops == &default_pipe_buf_ops;
475 static inline void data_start(const struct iov_iter *i,
476 unsigned int *iter_headp, size_t *offp)
478 unsigned int p_mask = i->pipe->ring_size - 1;
479 unsigned int iter_head = i->head;
480 size_t off = i->iov_offset;
482 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
487 *iter_headp = iter_head;
491 static size_t push_pipe(struct iov_iter *i, size_t size,
492 int *iter_headp, size_t *offp)
494 struct pipe_inode_info *pipe = i->pipe;
495 unsigned int p_tail = pipe->tail;
496 unsigned int p_mask = pipe->ring_size - 1;
497 unsigned int iter_head;
501 if (unlikely(size > i->count))
507 data_start(i, &iter_head, &off);
508 *iter_headp = iter_head;
511 left -= PAGE_SIZE - off;
513 pipe->bufs[iter_head & p_mask].len += size;
516 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
519 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
520 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
521 struct page *page = alloc_page(GFP_USER);
525 buf->ops = &default_pipe_buf_ops;
528 buf->len = min_t(ssize_t, left, PAGE_SIZE);
531 pipe->head = iter_head;
539 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
542 struct pipe_inode_info *pipe = i->pipe;
543 unsigned int p_mask = pipe->ring_size - 1;
550 bytes = n = push_pipe(i, bytes, &i_head, &off);
554 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
555 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
557 i->iov_offset = off + chunk;
567 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
568 __wsum sum, size_t off)
570 __wsum next = csum_partial_copy_nocheck(from, to, len);
571 return csum_block_add(sum, next, off);
574 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
575 struct csum_state *csstate,
578 struct pipe_inode_info *pipe = i->pipe;
579 unsigned int p_mask = pipe->ring_size - 1;
580 __wsum sum = csstate->csum;
581 size_t off = csstate->off;
588 bytes = n = push_pipe(i, bytes, &i_head, &r);
592 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
593 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
594 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
597 i->iov_offset = r + chunk;
610 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
612 if (unlikely(iov_iter_is_pipe(i)))
613 return copy_pipe_to_iter(addr, bytes, i);
614 if (iter_is_iovec(i))
616 iterate_and_advance(i, bytes, v, off,
617 copyout(v.iov_base, addr + off, v.iov_len),
618 memcpy(v.iov_base, addr + off, v.iov_len)
623 EXPORT_SYMBOL(_copy_to_iter);
625 #ifdef CONFIG_ARCH_HAS_COPY_MC
626 static int copyout_mc(void __user *to, const void *from, size_t n)
628 if (access_ok(to, n)) {
629 instrument_copy_to_user(to, from, n);
630 n = copy_mc_to_user((__force void *) to, from, n);
635 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
636 const char *from, size_t len)
641 to = kmap_atomic(page);
642 ret = copy_mc_to_kernel(to + offset, from, len);
648 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
651 struct pipe_inode_info *pipe = i->pipe;
652 unsigned int p_mask = pipe->ring_size - 1;
654 size_t n, off, xfer = 0;
659 bytes = n = push_pipe(i, bytes, &i_head, &off);
663 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
666 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
669 i->iov_offset = off + chunk - rem;
683 * _copy_mc_to_iter - copy to iter with source memory error exception handling
684 * @addr: source kernel address
685 * @bytes: total transfer length
686 * @iter: destination iterator
688 * The pmem driver deploys this for the dax operation
689 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
690 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
691 * successfully copied.
693 * The main differences between this and typical _copy_to_iter().
695 * * Typical tail/residue handling after a fault retries the copy
696 * byte-by-byte until the fault happens again. Re-triggering machine
697 * checks is potentially fatal so the implementation uses source
698 * alignment and poison alignment assumptions to avoid re-triggering
699 * hardware exceptions.
701 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
702 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
705 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
707 if (unlikely(iov_iter_is_pipe(i)))
708 return copy_mc_pipe_to_iter(addr, bytes, i);
709 if (iter_is_iovec(i))
711 __iterate_and_advance(i, bytes, v, off,
712 copyout_mc(v.iov_base, addr + off, v.iov_len),
713 copy_mc_to_kernel(v.iov_base, addr + off, v.iov_len)
718 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
719 #endif /* CONFIG_ARCH_HAS_COPY_MC */
721 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
723 if (unlikely(iov_iter_is_pipe(i))) {
727 if (iter_is_iovec(i))
729 iterate_and_advance(i, bytes, v, off,
730 copyin(addr + off, v.iov_base, v.iov_len),
731 memcpy(addr + off, v.iov_base, v.iov_len)
736 EXPORT_SYMBOL(_copy_from_iter);
738 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
740 if (unlikely(iov_iter_is_pipe(i))) {
744 iterate_and_advance(i, bytes, v, off,
745 __copy_from_user_inatomic_nocache(addr + off,
746 v.iov_base, v.iov_len),
747 memcpy(addr + off, v.iov_base, v.iov_len)
752 EXPORT_SYMBOL(_copy_from_iter_nocache);
754 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
756 * _copy_from_iter_flushcache - write destination through cpu cache
757 * @addr: destination kernel address
758 * @bytes: total transfer length
759 * @iter: source iterator
761 * The pmem driver arranges for filesystem-dax to use this facility via
762 * dax_copy_from_iter() for ensuring that writes to persistent memory
763 * are flushed through the CPU cache. It is differentiated from
764 * _copy_from_iter_nocache() in that guarantees all data is flushed for
765 * all iterator types. The _copy_from_iter_nocache() only attempts to
766 * bypass the cache for the ITER_IOVEC case, and on some archs may use
767 * instructions that strand dirty-data in the cache.
769 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
771 if (unlikely(iov_iter_is_pipe(i))) {
775 iterate_and_advance(i, bytes, v, off,
776 __copy_from_user_flushcache(addr + off, v.iov_base, v.iov_len),
777 memcpy_flushcache(addr + off, v.iov_base, v.iov_len)
782 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
785 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
788 size_t v = n + offset;
791 * The general case needs to access the page order in order
792 * to compute the page size.
793 * However, we mostly deal with order-0 pages and thus can
794 * avoid a possible cache line miss for requests that fit all
797 if (n <= v && v <= PAGE_SIZE)
800 head = compound_head(page);
801 v += (page - head) << PAGE_SHIFT;
803 if (likely(n <= v && v <= (page_size(head))))
809 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
812 if (likely(iter_is_iovec(i)))
813 return copy_page_to_iter_iovec(page, offset, bytes, i);
814 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
815 void *kaddr = kmap_atomic(page);
816 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
817 kunmap_atomic(kaddr);
820 if (iov_iter_is_pipe(i))
821 return copy_page_to_iter_pipe(page, offset, bytes, i);
822 if (unlikely(iov_iter_is_discard(i))) {
823 if (unlikely(i->count < bytes))
832 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
836 if (unlikely(!page_copy_sane(page, offset, bytes)))
838 page += offset / PAGE_SIZE; // first subpage
841 size_t n = __copy_page_to_iter(page, offset,
842 min(bytes, (size_t)PAGE_SIZE - offset), i);
848 if (offset == PAGE_SIZE) {
855 EXPORT_SYMBOL(copy_page_to_iter);
857 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
860 if (unlikely(!page_copy_sane(page, offset, bytes)))
862 if (likely(iter_is_iovec(i)))
863 return copy_page_from_iter_iovec(page, offset, bytes, i);
864 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
865 void *kaddr = kmap_atomic(page);
866 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
867 kunmap_atomic(kaddr);
873 EXPORT_SYMBOL(copy_page_from_iter);
875 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
877 struct pipe_inode_info *pipe = i->pipe;
878 unsigned int p_mask = pipe->ring_size - 1;
885 bytes = n = push_pipe(i, bytes, &i_head, &off);
890 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
891 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
893 i->iov_offset = off + chunk;
902 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
904 if (unlikely(iov_iter_is_pipe(i)))
905 return pipe_zero(bytes, i);
906 iterate_and_advance(i, bytes, v, count,
907 clear_user(v.iov_base, v.iov_len),
908 memset(v.iov_base, 0, v.iov_len)
913 EXPORT_SYMBOL(iov_iter_zero);
915 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
918 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
919 if (unlikely(!page_copy_sane(page, offset, bytes))) {
920 kunmap_atomic(kaddr);
923 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
924 kunmap_atomic(kaddr);
928 iterate_and_advance(i, bytes, v, off,
929 copyin(p + off, v.iov_base, v.iov_len),
930 memcpy(p + off, v.iov_base, v.iov_len)
932 kunmap_atomic(kaddr);
935 EXPORT_SYMBOL(copy_page_from_iter_atomic);
937 static inline void pipe_truncate(struct iov_iter *i)
939 struct pipe_inode_info *pipe = i->pipe;
940 unsigned int p_tail = pipe->tail;
941 unsigned int p_head = pipe->head;
942 unsigned int p_mask = pipe->ring_size - 1;
944 if (!pipe_empty(p_head, p_tail)) {
945 struct pipe_buffer *buf;
946 unsigned int i_head = i->head;
947 size_t off = i->iov_offset;
950 buf = &pipe->bufs[i_head & p_mask];
951 buf->len = off - buf->offset;
954 while (p_head != i_head) {
956 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
963 static void pipe_advance(struct iov_iter *i, size_t size)
965 struct pipe_inode_info *pipe = i->pipe;
967 struct pipe_buffer *buf;
968 unsigned int p_mask = pipe->ring_size - 1;
969 unsigned int i_head = i->head;
970 size_t off = i->iov_offset, left = size;
972 if (off) /* make it relative to the beginning of buffer */
973 left += off - pipe->bufs[i_head & p_mask].offset;
975 buf = &pipe->bufs[i_head & p_mask];
976 if (left <= buf->len)
982 i->iov_offset = buf->offset + left;
985 /* ... and discard everything past that point */
989 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
993 bi.bi_size = i->count;
994 bi.bi_bvec_done = i->iov_offset;
996 bvec_iter_advance(i->bvec, &bi, size);
998 i->bvec += bi.bi_idx;
999 i->nr_segs -= bi.bi_idx;
1000 i->count = bi.bi_size;
1001 i->iov_offset = bi.bi_bvec_done;
1004 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1006 const struct iovec *iov, *end;
1012 size += i->iov_offset; // from beginning of current segment
1013 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1014 if (likely(size < iov->iov_len))
1016 size -= iov->iov_len;
1018 i->iov_offset = size;
1019 i->nr_segs -= iov - i->iov;
1023 void iov_iter_advance(struct iov_iter *i, size_t size)
1025 if (unlikely(i->count < size))
1027 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1028 /* iovec and kvec have identical layouts */
1029 iov_iter_iovec_advance(i, size);
1030 } else if (iov_iter_is_bvec(i)) {
1031 iov_iter_bvec_advance(i, size);
1032 } else if (iov_iter_is_pipe(i)) {
1033 pipe_advance(i, size);
1034 } else if (unlikely(iov_iter_is_xarray(i))) {
1035 i->iov_offset += size;
1037 } else if (iov_iter_is_discard(i)) {
1041 EXPORT_SYMBOL(iov_iter_advance);
1043 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1047 if (WARN_ON(unroll > MAX_RW_COUNT))
1050 if (unlikely(iov_iter_is_pipe(i))) {
1051 struct pipe_inode_info *pipe = i->pipe;
1052 unsigned int p_mask = pipe->ring_size - 1;
1053 unsigned int i_head = i->head;
1054 size_t off = i->iov_offset;
1056 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1057 size_t n = off - b->offset;
1063 if (!unroll && i_head == i->start_head) {
1068 b = &pipe->bufs[i_head & p_mask];
1069 off = b->offset + b->len;
1071 i->iov_offset = off;
1076 if (unlikely(iov_iter_is_discard(i)))
1078 if (unroll <= i->iov_offset) {
1079 i->iov_offset -= unroll;
1082 unroll -= i->iov_offset;
1083 if (iov_iter_is_xarray(i)) {
1084 BUG(); /* We should never go beyond the start of the specified
1085 * range since we might then be straying into pages that
1088 } else if (iov_iter_is_bvec(i)) {
1089 const struct bio_vec *bvec = i->bvec;
1091 size_t n = (--bvec)->bv_len;
1095 i->iov_offset = n - unroll;
1100 } else { /* same logics for iovec and kvec */
1101 const struct iovec *iov = i->iov;
1103 size_t n = (--iov)->iov_len;
1107 i->iov_offset = n - unroll;
1114 EXPORT_SYMBOL(iov_iter_revert);
1117 * Return the count of just the current iov_iter segment.
1119 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1121 if (i->nr_segs > 1) {
1122 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1123 return min(i->count, i->iov->iov_len - i->iov_offset);
1124 if (iov_iter_is_bvec(i))
1125 return min(i->count, i->bvec->bv_len - i->iov_offset);
1129 EXPORT_SYMBOL(iov_iter_single_seg_count);
1131 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1132 const struct kvec *kvec, unsigned long nr_segs,
1135 WARN_ON(direction & ~(READ | WRITE));
1136 *i = (struct iov_iter){
1137 .iter_type = ITER_KVEC,
1138 .data_source = direction,
1145 EXPORT_SYMBOL(iov_iter_kvec);
1147 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1148 const struct bio_vec *bvec, unsigned long nr_segs,
1151 WARN_ON(direction & ~(READ | WRITE));
1152 *i = (struct iov_iter){
1153 .iter_type = ITER_BVEC,
1154 .data_source = direction,
1161 EXPORT_SYMBOL(iov_iter_bvec);
1163 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1164 struct pipe_inode_info *pipe,
1167 BUG_ON(direction != READ);
1168 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1169 *i = (struct iov_iter){
1170 .iter_type = ITER_PIPE,
1171 .data_source = false,
1174 .start_head = pipe->head,
1179 EXPORT_SYMBOL(iov_iter_pipe);
1182 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1183 * @i: The iterator to initialise.
1184 * @direction: The direction of the transfer.
1185 * @xarray: The xarray to access.
1186 * @start: The start file position.
1187 * @count: The size of the I/O buffer in bytes.
1189 * Set up an I/O iterator to either draw data out of the pages attached to an
1190 * inode or to inject data into those pages. The pages *must* be prevented
1191 * from evaporation, either by taking a ref on them or locking them by the
1194 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1195 struct xarray *xarray, loff_t start, size_t count)
1197 BUG_ON(direction & ~1);
1198 *i = (struct iov_iter) {
1199 .iter_type = ITER_XARRAY,
1200 .data_source = direction,
1202 .xarray_start = start,
1207 EXPORT_SYMBOL(iov_iter_xarray);
1210 * iov_iter_discard - Initialise an I/O iterator that discards data
1211 * @i: The iterator to initialise.
1212 * @direction: The direction of the transfer.
1213 * @count: The size of the I/O buffer in bytes.
1215 * Set up an I/O iterator that just discards everything that's written to it.
1216 * It's only available as a READ iterator.
1218 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1220 BUG_ON(direction != READ);
1221 *i = (struct iov_iter){
1222 .iter_type = ITER_DISCARD,
1223 .data_source = false,
1228 EXPORT_SYMBOL(iov_iter_discard);
1230 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1232 unsigned long res = 0;
1233 size_t size = i->count;
1234 size_t skip = i->iov_offset;
1237 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1238 size_t len = i->iov[k].iov_len - skip;
1240 res |= (unsigned long)i->iov[k].iov_base + skip;
1252 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1255 size_t size = i->count;
1256 unsigned skip = i->iov_offset;
1259 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1260 size_t len = i->bvec[k].bv_len - skip;
1261 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1272 unsigned long iov_iter_alignment(const struct iov_iter *i)
1274 /* iovec and kvec have identical layouts */
1275 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1276 return iov_iter_alignment_iovec(i);
1278 if (iov_iter_is_bvec(i))
1279 return iov_iter_alignment_bvec(i);
1281 if (iov_iter_is_pipe(i)) {
1282 unsigned int p_mask = i->pipe->ring_size - 1;
1283 size_t size = i->count;
1285 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1286 return size | i->iov_offset;
1290 if (iov_iter_is_xarray(i))
1291 return (i->xarray_start + i->iov_offset) | i->count;
1295 EXPORT_SYMBOL(iov_iter_alignment);
1297 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1299 unsigned long res = 0;
1300 unsigned long v = 0;
1301 size_t size = i->count;
1304 if (WARN_ON(!iter_is_iovec(i)))
1307 for (k = 0; k < i->nr_segs; k++) {
1308 if (i->iov[k].iov_len) {
1309 unsigned long base = (unsigned long)i->iov[k].iov_base;
1310 if (v) // if not the first one
1311 res |= base | v; // this start | previous end
1312 v = base + i->iov[k].iov_len;
1313 if (size <= i->iov[k].iov_len)
1315 size -= i->iov[k].iov_len;
1320 EXPORT_SYMBOL(iov_iter_gap_alignment);
1322 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1324 struct page **pages,
1328 struct pipe_inode_info *pipe = i->pipe;
1329 unsigned int p_mask = pipe->ring_size - 1;
1330 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1337 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1345 static ssize_t pipe_get_pages(struct iov_iter *i,
1346 struct page **pages, size_t maxsize, unsigned maxpages,
1349 unsigned int iter_head, npages;
1355 data_start(i, &iter_head, start);
1356 /* Amount of free space: some of this one + all after this one */
1357 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1358 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1360 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1363 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1364 pgoff_t index, unsigned int nr_pages)
1366 XA_STATE(xas, xa, index);
1368 unsigned int ret = 0;
1371 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1372 if (xas_retry(&xas, page))
1375 /* Has the page moved or been split? */
1376 if (unlikely(page != xas_reload(&xas))) {
1381 pages[ret] = find_subpage(page, xas.xa_index);
1382 get_page(pages[ret]);
1383 if (++ret == nr_pages)
1390 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1391 struct page **pages, size_t maxsize,
1392 unsigned maxpages, size_t *_start_offset)
1394 unsigned nr, offset;
1395 pgoff_t index, count;
1396 size_t size = maxsize, actual;
1399 if (!size || !maxpages)
1402 pos = i->xarray_start + i->iov_offset;
1403 index = pos >> PAGE_SHIFT;
1404 offset = pos & ~PAGE_MASK;
1405 *_start_offset = offset;
1408 if (size > PAGE_SIZE - offset) {
1409 size -= PAGE_SIZE - offset;
1410 count += size >> PAGE_SHIFT;
1416 if (count > maxpages)
1419 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1423 actual = PAGE_SIZE * nr;
1425 if (nr == count && size > 0) {
1426 unsigned last_offset = (nr > 1) ? 0 : offset;
1427 actual -= PAGE_SIZE - (last_offset + size);
1432 /* must be done on non-empty ITER_IOVEC one */
1433 static unsigned long first_iovec_segment(const struct iov_iter *i,
1434 size_t *size, size_t *start,
1435 size_t maxsize, unsigned maxpages)
1440 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1441 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1442 size_t len = i->iov[k].iov_len - skip;
1448 len += (*start = addr % PAGE_SIZE);
1449 if (len > maxpages * PAGE_SIZE)
1450 len = maxpages * PAGE_SIZE;
1452 return addr & PAGE_MASK;
1454 BUG(); // if it had been empty, we wouldn't get called
1457 /* must be done on non-empty ITER_BVEC one */
1458 static struct page *first_bvec_segment(const struct iov_iter *i,
1459 size_t *size, size_t *start,
1460 size_t maxsize, unsigned maxpages)
1463 size_t skip = i->iov_offset, len;
1465 len = i->bvec->bv_len - skip;
1468 skip += i->bvec->bv_offset;
1469 page = i->bvec->bv_page + skip / PAGE_SIZE;
1470 len += (*start = skip % PAGE_SIZE);
1471 if (len > maxpages * PAGE_SIZE)
1472 len = maxpages * PAGE_SIZE;
1477 ssize_t iov_iter_get_pages(struct iov_iter *i,
1478 struct page **pages, size_t maxsize, unsigned maxpages,
1484 if (maxsize > i->count)
1489 if (likely(iter_is_iovec(i))) {
1492 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1493 n = DIV_ROUND_UP(len, PAGE_SIZE);
1494 res = get_user_pages_fast(addr, n,
1495 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1497 if (unlikely(res < 0))
1499 return (res == n ? len : res * PAGE_SIZE) - *start;
1501 if (iov_iter_is_bvec(i)) {
1504 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1505 n = DIV_ROUND_UP(len, PAGE_SIZE);
1507 get_page(*pages++ = page++);
1508 return len - *start;
1510 if (iov_iter_is_pipe(i))
1511 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1512 if (iov_iter_is_xarray(i))
1513 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1516 EXPORT_SYMBOL(iov_iter_get_pages);
1518 static struct page **get_pages_array(size_t n)
1520 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1523 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1524 struct page ***pages, size_t maxsize,
1528 unsigned int iter_head, npages;
1534 data_start(i, &iter_head, start);
1535 /* Amount of free space: some of this one + all after this one */
1536 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1537 n = npages * PAGE_SIZE - *start;
1541 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1542 p = get_pages_array(npages);
1545 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1553 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1554 struct page ***pages, size_t maxsize,
1555 size_t *_start_offset)
1558 unsigned nr, offset;
1559 pgoff_t index, count;
1560 size_t size = maxsize, actual;
1566 pos = i->xarray_start + i->iov_offset;
1567 index = pos >> PAGE_SHIFT;
1568 offset = pos & ~PAGE_MASK;
1569 *_start_offset = offset;
1572 if (size > PAGE_SIZE - offset) {
1573 size -= PAGE_SIZE - offset;
1574 count += size >> PAGE_SHIFT;
1580 p = get_pages_array(count);
1585 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1589 actual = PAGE_SIZE * nr;
1591 if (nr == count && size > 0) {
1592 unsigned last_offset = (nr > 1) ? 0 : offset;
1593 actual -= PAGE_SIZE - (last_offset + size);
1598 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1599 struct page ***pages, size_t maxsize,
1606 if (maxsize > i->count)
1611 if (likely(iter_is_iovec(i))) {
1614 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1615 n = DIV_ROUND_UP(len, PAGE_SIZE);
1616 p = get_pages_array(n);
1619 res = get_user_pages_fast(addr, n,
1620 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1621 if (unlikely(res < 0)) {
1626 return (res == n ? len : res * PAGE_SIZE) - *start;
1628 if (iov_iter_is_bvec(i)) {
1631 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1632 n = DIV_ROUND_UP(len, PAGE_SIZE);
1633 *pages = p = get_pages_array(n);
1637 get_page(*p++ = page++);
1638 return len - *start;
1640 if (iov_iter_is_pipe(i))
1641 return pipe_get_pages_alloc(i, pages, maxsize, start);
1642 if (iov_iter_is_xarray(i))
1643 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1646 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1648 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1653 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1657 iterate_and_advance(i, bytes, v, off, ({
1658 next = csum_and_copy_from_user(v.iov_base,
1662 sum = csum_block_add(sum, next, off);
1663 next ? 0 : v.iov_len;
1665 sum = csum_and_memcpy(addr + off, v.iov_base, v.iov_len,
1672 EXPORT_SYMBOL(csum_and_copy_from_iter);
1674 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1677 struct csum_state *csstate = _csstate;
1680 if (unlikely(iov_iter_is_pipe(i)))
1681 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1683 sum = csum_shift(csstate->csum, csstate->off);
1684 if (unlikely(iov_iter_is_discard(i))) {
1685 WARN_ON(1); /* for now */
1688 iterate_and_advance(i, bytes, v, off, ({
1689 next = csum_and_copy_to_user(addr + off,
1693 sum = csum_block_add(sum, next, off);
1694 next ? 0 : v.iov_len;
1696 sum = csum_and_memcpy(v.iov_base,
1698 v.iov_len, sum, off);
1701 csstate->csum = csum_shift(sum, csstate->off);
1702 csstate->off += bytes;
1705 EXPORT_SYMBOL(csum_and_copy_to_iter);
1707 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1710 #ifdef CONFIG_CRYPTO_HASH
1711 struct ahash_request *hash = hashp;
1712 struct scatterlist sg;
1715 copied = copy_to_iter(addr, bytes, i);
1716 sg_init_one(&sg, addr, copied);
1717 ahash_request_set_crypt(hash, &sg, NULL, copied);
1718 crypto_ahash_update(hash);
1724 EXPORT_SYMBOL(hash_and_copy_to_iter);
1726 static int iov_npages(const struct iov_iter *i, int maxpages)
1728 size_t skip = i->iov_offset, size = i->count;
1729 const struct iovec *p;
1732 for (p = i->iov; size; skip = 0, p++) {
1733 unsigned offs = offset_in_page(p->iov_base + skip);
1734 size_t len = min(p->iov_len - skip, size);
1738 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1739 if (unlikely(npages > maxpages))
1746 static int bvec_npages(const struct iov_iter *i, int maxpages)
1748 size_t skip = i->iov_offset, size = i->count;
1749 const struct bio_vec *p;
1752 for (p = i->bvec; size; skip = 0, p++) {
1753 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1754 size_t len = min(p->bv_len - skip, size);
1757 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1758 if (unlikely(npages > maxpages))
1764 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1766 if (unlikely(!i->count))
1768 /* iovec and kvec have identical layouts */
1769 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1770 return iov_npages(i, maxpages);
1771 if (iov_iter_is_bvec(i))
1772 return bvec_npages(i, maxpages);
1773 if (iov_iter_is_pipe(i)) {
1774 unsigned int iter_head;
1781 data_start(i, &iter_head, &off);
1782 /* some of this one + all after this one */
1783 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1784 return min(npages, maxpages);
1786 if (iov_iter_is_xarray(i)) {
1787 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1788 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1789 return min(npages, maxpages);
1793 EXPORT_SYMBOL(iov_iter_npages);
1795 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1798 if (unlikely(iov_iter_is_pipe(new))) {
1802 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1804 if (iov_iter_is_bvec(new))
1805 return new->bvec = kmemdup(new->bvec,
1806 new->nr_segs * sizeof(struct bio_vec),
1809 /* iovec and kvec have identical layout */
1810 return new->iov = kmemdup(new->iov,
1811 new->nr_segs * sizeof(struct iovec),
1814 EXPORT_SYMBOL(dup_iter);
1816 static int copy_compat_iovec_from_user(struct iovec *iov,
1817 const struct iovec __user *uvec, unsigned long nr_segs)
1819 const struct compat_iovec __user *uiov =
1820 (const struct compat_iovec __user *)uvec;
1821 int ret = -EFAULT, i;
1823 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1826 for (i = 0; i < nr_segs; i++) {
1830 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1831 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1833 /* check for compat_size_t not fitting in compat_ssize_t .. */
1838 iov[i].iov_base = compat_ptr(buf);
1839 iov[i].iov_len = len;
1848 static int copy_iovec_from_user(struct iovec *iov,
1849 const struct iovec __user *uvec, unsigned long nr_segs)
1853 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1855 for (seg = 0; seg < nr_segs; seg++) {
1856 if ((ssize_t)iov[seg].iov_len < 0)
1863 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1864 unsigned long nr_segs, unsigned long fast_segs,
1865 struct iovec *fast_iov, bool compat)
1867 struct iovec *iov = fast_iov;
1871 * SuS says "The readv() function *may* fail if the iovcnt argument was
1872 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1873 * traditionally returned zero for zero segments, so...
1877 if (nr_segs > UIO_MAXIOV)
1878 return ERR_PTR(-EINVAL);
1879 if (nr_segs > fast_segs) {
1880 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1882 return ERR_PTR(-ENOMEM);
1886 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1888 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1890 if (iov != fast_iov)
1892 return ERR_PTR(ret);
1898 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1899 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1900 struct iov_iter *i, bool compat)
1902 ssize_t total_len = 0;
1906 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1909 return PTR_ERR(iov);
1913 * According to the Single Unix Specification we should return EINVAL if
1914 * an element length is < 0 when cast to ssize_t or if the total length
1915 * would overflow the ssize_t return value of the system call.
1917 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1920 for (seg = 0; seg < nr_segs; seg++) {
1921 ssize_t len = (ssize_t)iov[seg].iov_len;
1923 if (!access_ok(iov[seg].iov_base, len)) {
1930 if (len > MAX_RW_COUNT - total_len) {
1931 len = MAX_RW_COUNT - total_len;
1932 iov[seg].iov_len = len;
1937 iov_iter_init(i, type, iov, nr_segs, total_len);
1946 * import_iovec() - Copy an array of &struct iovec from userspace
1947 * into the kernel, check that it is valid, and initialize a new
1948 * &struct iov_iter iterator to access it.
1950 * @type: One of %READ or %WRITE.
1951 * @uvec: Pointer to the userspace array.
1952 * @nr_segs: Number of elements in userspace array.
1953 * @fast_segs: Number of elements in @iov.
1954 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1955 * on-stack) kernel array.
1956 * @i: Pointer to iterator that will be initialized on success.
1958 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1959 * then this function places %NULL in *@iov on return. Otherwise, a new
1960 * array will be allocated and the result placed in *@iov. This means that
1961 * the caller may call kfree() on *@iov regardless of whether the small
1962 * on-stack array was used or not (and regardless of whether this function
1963 * returns an error or not).
1965 * Return: Negative error code on error, bytes imported on success
1967 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1968 unsigned nr_segs, unsigned fast_segs,
1969 struct iovec **iovp, struct iov_iter *i)
1971 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1972 in_compat_syscall());
1974 EXPORT_SYMBOL(import_iovec);
1976 int import_single_range(int rw, void __user *buf, size_t len,
1977 struct iovec *iov, struct iov_iter *i)
1979 if (len > MAX_RW_COUNT)
1981 if (unlikely(!access_ok(buf, len)))
1984 iov->iov_base = buf;
1986 iov_iter_init(i, rw, iov, 1, len);
1989 EXPORT_SYMBOL(import_single_range);