1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
22 size_t skip = i->iov_offset; \
24 len = min(n, __p->iov_len - skip); \
26 base = __p->iov_base + skip; \
31 if (skip < __p->iov_len) \
37 i->iov_offset = skip; \
41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
43 unsigned skip = i->iov_offset; \
45 unsigned offset = p->bv_offset + skip; \
47 void *kaddr = kmap_local_page(p->bv_page + \
48 offset / PAGE_SIZE); \
49 base = kaddr + offset % PAGE_SIZE; \
50 len = min(min(n, (size_t)(p->bv_len - skip)), \
51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
53 kunmap_local(kaddr); \
57 if (skip == p->bv_len) { \
65 i->iov_offset = skip; \
69 #define iterate_xarray(i, n, base, len, __off, STEP) { \
72 struct folio *folio; \
73 loff_t start = i->xarray_start + i->iov_offset; \
74 pgoff_t index = start / PAGE_SIZE; \
75 XA_STATE(xas, i->xarray, index); \
77 len = PAGE_SIZE - offset_in_page(start); \
79 xas_for_each(&xas, folio, ULONG_MAX) { \
82 if (xas_retry(&xas, folio)) \
84 if (WARN_ON(xa_is_value(folio))) \
86 if (WARN_ON(folio_test_hugetlb(folio))) \
88 offset = offset_in_folio(folio, start + __off); \
89 while (offset < folio_size(folio)) { \
90 base = kmap_local_folio(folio, offset); \
105 i->iov_offset += __off; \
109 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
110 if (unlikely(i->count < n)) \
113 if (likely(iter_is_iovec(i))) { \
114 const struct iovec *iov = i->iov; \
117 iterate_iovec(i, n, base, len, off, \
119 i->nr_segs -= iov - i->iov; \
121 } else if (iov_iter_is_bvec(i)) { \
122 const struct bio_vec *bvec = i->bvec; \
125 iterate_bvec(i, n, base, len, off, \
127 i->nr_segs -= bvec - i->bvec; \
129 } else if (iov_iter_is_kvec(i)) { \
130 const struct kvec *kvec = i->kvec; \
133 iterate_iovec(i, n, base, len, off, \
135 i->nr_segs -= kvec - i->kvec; \
137 } else if (iov_iter_is_xarray(i)) { \
140 iterate_xarray(i, n, base, len, off, \
146 #define iterate_and_advance(i, n, base, len, off, I, K) \
147 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
149 static int copyout(void __user *to, const void *from, size_t n)
151 if (should_fail_usercopy())
153 if (access_ok(to, n)) {
154 instrument_copy_to_user(to, from, n);
155 n = raw_copy_to_user(to, from, n);
160 static int copyin(void *to, const void __user *from, size_t n)
162 if (should_fail_usercopy())
164 if (access_ok(from, n)) {
165 instrument_copy_from_user(to, from, n);
166 n = raw_copy_from_user(to, from, n);
171 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
174 size_t skip, copy, left, wanted;
175 const struct iovec *iov;
179 if (unlikely(bytes > i->count))
182 if (unlikely(!bytes))
188 skip = i->iov_offset;
189 buf = iov->iov_base + skip;
190 copy = min(bytes, iov->iov_len - skip);
192 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
193 kaddr = kmap_atomic(page);
194 from = kaddr + offset;
196 /* first chunk, usually the only one */
197 left = copyout(buf, from, copy);
203 while (unlikely(!left && bytes)) {
206 copy = min(bytes, iov->iov_len);
207 left = copyout(buf, from, copy);
213 if (likely(!bytes)) {
214 kunmap_atomic(kaddr);
217 offset = from - kaddr;
219 kunmap_atomic(kaddr);
220 copy = min(bytes, iov->iov_len - skip);
222 /* Too bad - revert to non-atomic kmap */
225 from = kaddr + offset;
226 left = copyout(buf, from, copy);
231 while (unlikely(!left && bytes)) {
234 copy = min(bytes, iov->iov_len);
235 left = copyout(buf, from, copy);
244 if (skip == iov->iov_len) {
248 i->count -= wanted - bytes;
249 i->nr_segs -= iov - i->iov;
251 i->iov_offset = skip;
252 return wanted - bytes;
255 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
258 size_t skip, copy, left, wanted;
259 const struct iovec *iov;
263 if (unlikely(bytes > i->count))
266 if (unlikely(!bytes))
272 skip = i->iov_offset;
273 buf = iov->iov_base + skip;
274 copy = min(bytes, iov->iov_len - skip);
276 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
277 kaddr = kmap_atomic(page);
280 /* first chunk, usually the only one */
281 left = copyin(to, buf, copy);
287 while (unlikely(!left && bytes)) {
290 copy = min(bytes, iov->iov_len);
291 left = copyin(to, buf, copy);
297 if (likely(!bytes)) {
298 kunmap_atomic(kaddr);
303 kunmap_atomic(kaddr);
304 copy = min(bytes, iov->iov_len - skip);
306 /* Too bad - revert to non-atomic kmap */
310 left = copyin(to, buf, copy);
315 while (unlikely(!left && bytes)) {
318 copy = min(bytes, iov->iov_len);
319 left = copyin(to, buf, copy);
328 if (skip == iov->iov_len) {
332 i->count -= wanted - bytes;
333 i->nr_segs -= iov - i->iov;
335 i->iov_offset = skip;
336 return wanted - bytes;
340 static bool sanity(const struct iov_iter *i)
342 struct pipe_inode_info *pipe = i->pipe;
343 unsigned int p_head = pipe->head;
344 unsigned int p_tail = pipe->tail;
345 unsigned int p_mask = pipe->ring_size - 1;
346 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
347 unsigned int i_head = i->head;
351 struct pipe_buffer *p;
352 if (unlikely(p_occupancy == 0))
353 goto Bad; // pipe must be non-empty
354 if (unlikely(i_head != p_head - 1))
355 goto Bad; // must be at the last buffer...
357 p = &pipe->bufs[i_head & p_mask];
358 if (unlikely(p->offset + p->len != i->iov_offset))
359 goto Bad; // ... at the end of segment
361 if (i_head != p_head)
362 goto Bad; // must be right after the last buffer
366 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
367 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
368 p_head, p_tail, pipe->ring_size);
369 for (idx = 0; idx < pipe->ring_size; idx++)
370 printk(KERN_ERR "[%p %p %d %d]\n",
372 pipe->bufs[idx].page,
373 pipe->bufs[idx].offset,
374 pipe->bufs[idx].len);
379 #define sanity(i) true
382 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
385 struct pipe_inode_info *pipe = i->pipe;
386 struct pipe_buffer *buf;
387 unsigned int p_tail = pipe->tail;
388 unsigned int p_mask = pipe->ring_size - 1;
389 unsigned int i_head = i->head;
392 if (unlikely(bytes > i->count))
395 if (unlikely(!bytes))
402 buf = &pipe->bufs[i_head & p_mask];
404 if (offset == off && buf->page == page) {
405 /* merge with the last one */
407 i->iov_offset += bytes;
411 buf = &pipe->bufs[i_head & p_mask];
413 if (pipe_full(i_head, p_tail, pipe->max_usage))
416 buf->ops = &page_cache_pipe_buf_ops;
419 buf->offset = offset;
422 pipe->head = i_head + 1;
423 i->iov_offset = offset + bytes;
431 * fault_in_iov_iter_readable - fault in iov iterator for reading
433 * @size: maximum length
435 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
436 * @size. For each iovec, fault in each page that constitutes the iovec.
438 * Returns the number of bytes not faulted in (like copy_to_user() and
441 * Always returns 0 for non-userspace iterators.
443 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
445 if (iter_is_iovec(i)) {
446 size_t count = min(size, iov_iter_count(i));
447 const struct iovec *p;
451 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
452 size_t len = min(count, p->iov_len - skip);
457 ret = fault_in_readable(p->iov_base + skip, len);
466 EXPORT_SYMBOL(fault_in_iov_iter_readable);
469 * fault_in_iov_iter_writeable - fault in iov iterator for writing
471 * @size: maximum length
473 * Faults in the iterator using get_user_pages(), i.e., without triggering
474 * hardware page faults. This is primarily useful when we already know that
475 * some or all of the pages in @i aren't in memory.
477 * Returns the number of bytes not faulted in, like copy_to_user() and
480 * Always returns 0 for non-user-space iterators.
482 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
484 if (iter_is_iovec(i)) {
485 size_t count = min(size, iov_iter_count(i));
486 const struct iovec *p;
490 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
491 size_t len = min(count, p->iov_len - skip);
496 ret = fault_in_safe_writeable(p->iov_base + skip, len);
505 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
507 void iov_iter_init(struct iov_iter *i, unsigned int direction,
508 const struct iovec *iov, unsigned long nr_segs,
511 WARN_ON(direction & ~(READ | WRITE));
512 *i = (struct iov_iter) {
513 .iter_type = ITER_IOVEC,
515 .data_source = direction,
522 EXPORT_SYMBOL(iov_iter_init);
524 static inline bool allocated(struct pipe_buffer *buf)
526 return buf->ops == &default_pipe_buf_ops;
529 static inline void data_start(const struct iov_iter *i,
530 unsigned int *iter_headp, size_t *offp)
532 unsigned int p_mask = i->pipe->ring_size - 1;
533 unsigned int iter_head = i->head;
534 size_t off = i->iov_offset;
536 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
541 *iter_headp = iter_head;
545 static size_t push_pipe(struct iov_iter *i, size_t size,
546 int *iter_headp, size_t *offp)
548 struct pipe_inode_info *pipe = i->pipe;
549 unsigned int p_tail = pipe->tail;
550 unsigned int p_mask = pipe->ring_size - 1;
551 unsigned int iter_head;
555 if (unlikely(size > i->count))
561 data_start(i, &iter_head, &off);
562 *iter_headp = iter_head;
565 left -= PAGE_SIZE - off;
567 pipe->bufs[iter_head & p_mask].len += size;
570 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
573 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
574 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
575 struct page *page = alloc_page(GFP_USER);
579 buf->ops = &default_pipe_buf_ops;
582 buf->len = min_t(ssize_t, left, PAGE_SIZE);
585 pipe->head = iter_head;
593 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
596 struct pipe_inode_info *pipe = i->pipe;
597 unsigned int p_mask = pipe->ring_size - 1;
604 bytes = n = push_pipe(i, bytes, &i_head, &off);
608 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
609 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
611 i->iov_offset = off + chunk;
621 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
622 __wsum sum, size_t off)
624 __wsum next = csum_partial_copy_nocheck(from, to, len);
625 return csum_block_add(sum, next, off);
628 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
629 struct iov_iter *i, __wsum *sump)
631 struct pipe_inode_info *pipe = i->pipe;
632 unsigned int p_mask = pipe->ring_size - 1;
641 bytes = push_pipe(i, bytes, &i_head, &r);
643 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
644 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
645 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
648 i->iov_offset = r + chunk;
659 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
661 if (unlikely(iov_iter_is_pipe(i)))
662 return copy_pipe_to_iter(addr, bytes, i);
663 if (iter_is_iovec(i))
665 iterate_and_advance(i, bytes, base, len, off,
666 copyout(base, addr + off, len),
667 memcpy(base, addr + off, len)
672 EXPORT_SYMBOL(_copy_to_iter);
674 #ifdef CONFIG_ARCH_HAS_COPY_MC
675 static int copyout_mc(void __user *to, const void *from, size_t n)
677 if (access_ok(to, n)) {
678 instrument_copy_to_user(to, from, n);
679 n = copy_mc_to_user((__force void *) to, from, n);
684 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
687 struct pipe_inode_info *pipe = i->pipe;
688 unsigned int p_mask = pipe->ring_size - 1;
690 size_t n, off, xfer = 0;
695 n = push_pipe(i, bytes, &i_head, &off);
697 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
698 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
700 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
704 i->iov_offset = off + chunk;
717 * _copy_mc_to_iter - copy to iter with source memory error exception handling
718 * @addr: source kernel address
719 * @bytes: total transfer length
720 * @i: destination iterator
722 * The pmem driver deploys this for the dax operation
723 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
724 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
725 * successfully copied.
727 * The main differences between this and typical _copy_to_iter().
729 * * Typical tail/residue handling after a fault retries the copy
730 * byte-by-byte until the fault happens again. Re-triggering machine
731 * checks is potentially fatal so the implementation uses source
732 * alignment and poison alignment assumptions to avoid re-triggering
733 * hardware exceptions.
735 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
736 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
739 * Return: number of bytes copied (may be %0)
741 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
743 if (unlikely(iov_iter_is_pipe(i)))
744 return copy_mc_pipe_to_iter(addr, bytes, i);
745 if (iter_is_iovec(i))
747 __iterate_and_advance(i, bytes, base, len, off,
748 copyout_mc(base, addr + off, len),
749 copy_mc_to_kernel(base, addr + off, len)
754 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
755 #endif /* CONFIG_ARCH_HAS_COPY_MC */
757 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
759 if (unlikely(iov_iter_is_pipe(i))) {
763 if (iter_is_iovec(i))
765 iterate_and_advance(i, bytes, base, len, off,
766 copyin(addr + off, base, len),
767 memcpy(addr + off, base, len)
772 EXPORT_SYMBOL(_copy_from_iter);
774 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
776 if (unlikely(iov_iter_is_pipe(i))) {
780 iterate_and_advance(i, bytes, base, len, off,
781 __copy_from_user_inatomic_nocache(addr + off, base, len),
782 memcpy(addr + off, base, len)
787 EXPORT_SYMBOL(_copy_from_iter_nocache);
789 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
791 * _copy_from_iter_flushcache - write destination through cpu cache
792 * @addr: destination kernel address
793 * @bytes: total transfer length
794 * @i: source iterator
796 * The pmem driver arranges for filesystem-dax to use this facility via
797 * dax_copy_from_iter() for ensuring that writes to persistent memory
798 * are flushed through the CPU cache. It is differentiated from
799 * _copy_from_iter_nocache() in that guarantees all data is flushed for
800 * all iterator types. The _copy_from_iter_nocache() only attempts to
801 * bypass the cache for the ITER_IOVEC case, and on some archs may use
802 * instructions that strand dirty-data in the cache.
804 * Return: number of bytes copied (may be %0)
806 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
808 if (unlikely(iov_iter_is_pipe(i))) {
812 iterate_and_advance(i, bytes, base, len, off,
813 __copy_from_user_flushcache(addr + off, base, len),
814 memcpy_flushcache(addr + off, base, len)
819 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
822 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
825 size_t v = n + offset;
828 * The general case needs to access the page order in order
829 * to compute the page size.
830 * However, we mostly deal with order-0 pages and thus can
831 * avoid a possible cache line miss for requests that fit all
834 if (n <= v && v <= PAGE_SIZE)
837 head = compound_head(page);
838 v += (page - head) << PAGE_SHIFT;
840 if (likely(n <= v && v <= (page_size(head))))
846 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
849 if (likely(iter_is_iovec(i)))
850 return copy_page_to_iter_iovec(page, offset, bytes, i);
851 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
852 void *kaddr = kmap_local_page(page);
853 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
857 if (iov_iter_is_pipe(i))
858 return copy_page_to_iter_pipe(page, offset, bytes, i);
859 if (unlikely(iov_iter_is_discard(i))) {
860 if (unlikely(i->count < bytes))
869 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
873 if (unlikely(!page_copy_sane(page, offset, bytes)))
875 page += offset / PAGE_SIZE; // first subpage
878 size_t n = __copy_page_to_iter(page, offset,
879 min(bytes, (size_t)PAGE_SIZE - offset), i);
885 if (offset == PAGE_SIZE) {
892 EXPORT_SYMBOL(copy_page_to_iter);
894 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
897 if (unlikely(!page_copy_sane(page, offset, bytes)))
899 if (likely(iter_is_iovec(i)))
900 return copy_page_from_iter_iovec(page, offset, bytes, i);
901 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
902 void *kaddr = kmap_local_page(page);
903 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
910 EXPORT_SYMBOL(copy_page_from_iter);
912 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
914 struct pipe_inode_info *pipe = i->pipe;
915 unsigned int p_mask = pipe->ring_size - 1;
922 bytes = n = push_pipe(i, bytes, &i_head, &off);
927 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
928 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
929 memset(p + off, 0, chunk);
932 i->iov_offset = off + chunk;
941 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
943 if (unlikely(iov_iter_is_pipe(i)))
944 return pipe_zero(bytes, i);
945 iterate_and_advance(i, bytes, base, len, count,
946 clear_user(base, len),
952 EXPORT_SYMBOL(iov_iter_zero);
954 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
957 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
958 if (unlikely(!page_copy_sane(page, offset, bytes))) {
959 kunmap_atomic(kaddr);
962 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
963 kunmap_atomic(kaddr);
967 iterate_and_advance(i, bytes, base, len, off,
968 copyin(p + off, base, len),
969 memcpy(p + off, base, len)
971 kunmap_atomic(kaddr);
974 EXPORT_SYMBOL(copy_page_from_iter_atomic);
976 static inline void pipe_truncate(struct iov_iter *i)
978 struct pipe_inode_info *pipe = i->pipe;
979 unsigned int p_tail = pipe->tail;
980 unsigned int p_head = pipe->head;
981 unsigned int p_mask = pipe->ring_size - 1;
983 if (!pipe_empty(p_head, p_tail)) {
984 struct pipe_buffer *buf;
985 unsigned int i_head = i->head;
986 size_t off = i->iov_offset;
989 buf = &pipe->bufs[i_head & p_mask];
990 buf->len = off - buf->offset;
993 while (p_head != i_head) {
995 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1002 static void pipe_advance(struct iov_iter *i, size_t size)
1004 struct pipe_inode_info *pipe = i->pipe;
1006 struct pipe_buffer *buf;
1007 unsigned int p_mask = pipe->ring_size - 1;
1008 unsigned int i_head = i->head;
1009 size_t off = i->iov_offset, left = size;
1011 if (off) /* make it relative to the beginning of buffer */
1012 left += off - pipe->bufs[i_head & p_mask].offset;
1014 buf = &pipe->bufs[i_head & p_mask];
1015 if (left <= buf->len)
1021 i->iov_offset = buf->offset + left;
1024 /* ... and discard everything past that point */
1028 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1030 struct bvec_iter bi;
1032 bi.bi_size = i->count;
1033 bi.bi_bvec_done = i->iov_offset;
1035 bvec_iter_advance(i->bvec, &bi, size);
1037 i->bvec += bi.bi_idx;
1038 i->nr_segs -= bi.bi_idx;
1039 i->count = bi.bi_size;
1040 i->iov_offset = bi.bi_bvec_done;
1043 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1045 const struct iovec *iov, *end;
1051 size += i->iov_offset; // from beginning of current segment
1052 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1053 if (likely(size < iov->iov_len))
1055 size -= iov->iov_len;
1057 i->iov_offset = size;
1058 i->nr_segs -= iov - i->iov;
1062 void iov_iter_advance(struct iov_iter *i, size_t size)
1064 if (unlikely(i->count < size))
1066 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1067 /* iovec and kvec have identical layouts */
1068 iov_iter_iovec_advance(i, size);
1069 } else if (iov_iter_is_bvec(i)) {
1070 iov_iter_bvec_advance(i, size);
1071 } else if (iov_iter_is_pipe(i)) {
1072 pipe_advance(i, size);
1073 } else if (unlikely(iov_iter_is_xarray(i))) {
1074 i->iov_offset += size;
1076 } else if (iov_iter_is_discard(i)) {
1080 EXPORT_SYMBOL(iov_iter_advance);
1082 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1086 if (WARN_ON(unroll > MAX_RW_COUNT))
1089 if (unlikely(iov_iter_is_pipe(i))) {
1090 struct pipe_inode_info *pipe = i->pipe;
1091 unsigned int p_mask = pipe->ring_size - 1;
1092 unsigned int i_head = i->head;
1093 size_t off = i->iov_offset;
1095 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1096 size_t n = off - b->offset;
1102 if (!unroll && i_head == i->start_head) {
1107 b = &pipe->bufs[i_head & p_mask];
1108 off = b->offset + b->len;
1110 i->iov_offset = off;
1115 if (unlikely(iov_iter_is_discard(i)))
1117 if (unroll <= i->iov_offset) {
1118 i->iov_offset -= unroll;
1121 unroll -= i->iov_offset;
1122 if (iov_iter_is_xarray(i)) {
1123 BUG(); /* We should never go beyond the start of the specified
1124 * range since we might then be straying into pages that
1127 } else if (iov_iter_is_bvec(i)) {
1128 const struct bio_vec *bvec = i->bvec;
1130 size_t n = (--bvec)->bv_len;
1134 i->iov_offset = n - unroll;
1139 } else { /* same logics for iovec and kvec */
1140 const struct iovec *iov = i->iov;
1142 size_t n = (--iov)->iov_len;
1146 i->iov_offset = n - unroll;
1153 EXPORT_SYMBOL(iov_iter_revert);
1156 * Return the count of just the current iov_iter segment.
1158 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1160 if (i->nr_segs > 1) {
1161 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1162 return min(i->count, i->iov->iov_len - i->iov_offset);
1163 if (iov_iter_is_bvec(i))
1164 return min(i->count, i->bvec->bv_len - i->iov_offset);
1168 EXPORT_SYMBOL(iov_iter_single_seg_count);
1170 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1171 const struct kvec *kvec, unsigned long nr_segs,
1174 WARN_ON(direction & ~(READ | WRITE));
1175 *i = (struct iov_iter){
1176 .iter_type = ITER_KVEC,
1177 .data_source = direction,
1184 EXPORT_SYMBOL(iov_iter_kvec);
1186 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1187 const struct bio_vec *bvec, unsigned long nr_segs,
1190 WARN_ON(direction & ~(READ | WRITE));
1191 *i = (struct iov_iter){
1192 .iter_type = ITER_BVEC,
1193 .data_source = direction,
1200 EXPORT_SYMBOL(iov_iter_bvec);
1202 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1203 struct pipe_inode_info *pipe,
1206 BUG_ON(direction != READ);
1207 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1208 *i = (struct iov_iter){
1209 .iter_type = ITER_PIPE,
1210 .data_source = false,
1213 .start_head = pipe->head,
1218 EXPORT_SYMBOL(iov_iter_pipe);
1221 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1222 * @i: The iterator to initialise.
1223 * @direction: The direction of the transfer.
1224 * @xarray: The xarray to access.
1225 * @start: The start file position.
1226 * @count: The size of the I/O buffer in bytes.
1228 * Set up an I/O iterator to either draw data out of the pages attached to an
1229 * inode or to inject data into those pages. The pages *must* be prevented
1230 * from evaporation, either by taking a ref on them or locking them by the
1233 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1234 struct xarray *xarray, loff_t start, size_t count)
1236 BUG_ON(direction & ~1);
1237 *i = (struct iov_iter) {
1238 .iter_type = ITER_XARRAY,
1239 .data_source = direction,
1241 .xarray_start = start,
1246 EXPORT_SYMBOL(iov_iter_xarray);
1249 * iov_iter_discard - Initialise an I/O iterator that discards data
1250 * @i: The iterator to initialise.
1251 * @direction: The direction of the transfer.
1252 * @count: The size of the I/O buffer in bytes.
1254 * Set up an I/O iterator that just discards everything that's written to it.
1255 * It's only available as a READ iterator.
1257 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1259 BUG_ON(direction != READ);
1260 *i = (struct iov_iter){
1261 .iter_type = ITER_DISCARD,
1262 .data_source = false,
1267 EXPORT_SYMBOL(iov_iter_discard);
1269 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1271 unsigned long res = 0;
1272 size_t size = i->count;
1273 size_t skip = i->iov_offset;
1276 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1277 size_t len = i->iov[k].iov_len - skip;
1279 res |= (unsigned long)i->iov[k].iov_base + skip;
1291 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1294 size_t size = i->count;
1295 unsigned skip = i->iov_offset;
1298 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1299 size_t len = i->bvec[k].bv_len - skip;
1300 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1311 unsigned long iov_iter_alignment(const struct iov_iter *i)
1313 /* iovec and kvec have identical layouts */
1314 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1315 return iov_iter_alignment_iovec(i);
1317 if (iov_iter_is_bvec(i))
1318 return iov_iter_alignment_bvec(i);
1320 if (iov_iter_is_pipe(i)) {
1321 unsigned int p_mask = i->pipe->ring_size - 1;
1322 size_t size = i->count;
1324 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1325 return size | i->iov_offset;
1329 if (iov_iter_is_xarray(i))
1330 return (i->xarray_start + i->iov_offset) | i->count;
1334 EXPORT_SYMBOL(iov_iter_alignment);
1336 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1338 unsigned long res = 0;
1339 unsigned long v = 0;
1340 size_t size = i->count;
1343 if (WARN_ON(!iter_is_iovec(i)))
1346 for (k = 0; k < i->nr_segs; k++) {
1347 if (i->iov[k].iov_len) {
1348 unsigned long base = (unsigned long)i->iov[k].iov_base;
1349 if (v) // if not the first one
1350 res |= base | v; // this start | previous end
1351 v = base + i->iov[k].iov_len;
1352 if (size <= i->iov[k].iov_len)
1354 size -= i->iov[k].iov_len;
1359 EXPORT_SYMBOL(iov_iter_gap_alignment);
1361 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1363 struct page **pages,
1367 struct pipe_inode_info *pipe = i->pipe;
1368 unsigned int p_mask = pipe->ring_size - 1;
1369 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1376 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1384 static ssize_t pipe_get_pages(struct iov_iter *i,
1385 struct page **pages, size_t maxsize, unsigned maxpages,
1388 unsigned int iter_head, npages;
1394 data_start(i, &iter_head, start);
1395 /* Amount of free space: some of this one + all after this one */
1396 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1397 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1399 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1402 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1403 pgoff_t index, unsigned int nr_pages)
1405 XA_STATE(xas, xa, index);
1407 unsigned int ret = 0;
1410 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1411 if (xas_retry(&xas, page))
1414 /* Has the page moved or been split? */
1415 if (unlikely(page != xas_reload(&xas))) {
1420 pages[ret] = find_subpage(page, xas.xa_index);
1421 get_page(pages[ret]);
1422 if (++ret == nr_pages)
1429 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1430 struct page **pages, size_t maxsize,
1431 unsigned maxpages, size_t *_start_offset)
1433 unsigned nr, offset;
1434 pgoff_t index, count;
1435 size_t size = maxsize, actual;
1438 if (!size || !maxpages)
1441 pos = i->xarray_start + i->iov_offset;
1442 index = pos >> PAGE_SHIFT;
1443 offset = pos & ~PAGE_MASK;
1444 *_start_offset = offset;
1447 if (size > PAGE_SIZE - offset) {
1448 size -= PAGE_SIZE - offset;
1449 count += size >> PAGE_SHIFT;
1455 if (count > maxpages)
1458 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1462 actual = PAGE_SIZE * nr;
1464 if (nr == count && size > 0) {
1465 unsigned last_offset = (nr > 1) ? 0 : offset;
1466 actual -= PAGE_SIZE - (last_offset + size);
1471 /* must be done on non-empty ITER_IOVEC one */
1472 static unsigned long first_iovec_segment(const struct iov_iter *i,
1473 size_t *size, size_t *start,
1474 size_t maxsize, unsigned maxpages)
1479 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1480 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1481 size_t len = i->iov[k].iov_len - skip;
1487 len += (*start = addr % PAGE_SIZE);
1488 if (len > maxpages * PAGE_SIZE)
1489 len = maxpages * PAGE_SIZE;
1491 return addr & PAGE_MASK;
1493 BUG(); // if it had been empty, we wouldn't get called
1496 /* must be done on non-empty ITER_BVEC one */
1497 static struct page *first_bvec_segment(const struct iov_iter *i,
1498 size_t *size, size_t *start,
1499 size_t maxsize, unsigned maxpages)
1502 size_t skip = i->iov_offset, len;
1504 len = i->bvec->bv_len - skip;
1507 skip += i->bvec->bv_offset;
1508 page = i->bvec->bv_page + skip / PAGE_SIZE;
1509 len += (*start = skip % PAGE_SIZE);
1510 if (len > maxpages * PAGE_SIZE)
1511 len = maxpages * PAGE_SIZE;
1516 ssize_t iov_iter_get_pages(struct iov_iter *i,
1517 struct page **pages, size_t maxsize, unsigned maxpages,
1523 if (maxsize > i->count)
1528 if (likely(iter_is_iovec(i))) {
1529 unsigned int gup_flags = 0;
1532 if (iov_iter_rw(i) != WRITE)
1533 gup_flags |= FOLL_WRITE;
1535 gup_flags |= FOLL_NOFAULT;
1537 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1538 n = DIV_ROUND_UP(len, PAGE_SIZE);
1539 res = get_user_pages_fast(addr, n, gup_flags, pages);
1540 if (unlikely(res <= 0))
1542 return (res == n ? len : res * PAGE_SIZE) - *start;
1544 if (iov_iter_is_bvec(i)) {
1547 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1548 n = DIV_ROUND_UP(len, PAGE_SIZE);
1550 get_page(*pages++ = page++);
1551 return len - *start;
1553 if (iov_iter_is_pipe(i))
1554 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1555 if (iov_iter_is_xarray(i))
1556 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1559 EXPORT_SYMBOL(iov_iter_get_pages);
1561 static struct page **get_pages_array(size_t n)
1563 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1566 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1567 struct page ***pages, size_t maxsize,
1571 unsigned int iter_head, npages;
1577 data_start(i, &iter_head, start);
1578 /* Amount of free space: some of this one + all after this one */
1579 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1580 n = npages * PAGE_SIZE - *start;
1584 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1585 p = get_pages_array(npages);
1588 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1596 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1597 struct page ***pages, size_t maxsize,
1598 size_t *_start_offset)
1601 unsigned nr, offset;
1602 pgoff_t index, count;
1603 size_t size = maxsize, actual;
1609 pos = i->xarray_start + i->iov_offset;
1610 index = pos >> PAGE_SHIFT;
1611 offset = pos & ~PAGE_MASK;
1612 *_start_offset = offset;
1615 if (size > PAGE_SIZE - offset) {
1616 size -= PAGE_SIZE - offset;
1617 count += size >> PAGE_SHIFT;
1623 p = get_pages_array(count);
1628 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1632 actual = PAGE_SIZE * nr;
1634 if (nr == count && size > 0) {
1635 unsigned last_offset = (nr > 1) ? 0 : offset;
1636 actual -= PAGE_SIZE - (last_offset + size);
1641 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1642 struct page ***pages, size_t maxsize,
1649 if (maxsize > i->count)
1654 if (likely(iter_is_iovec(i))) {
1655 unsigned int gup_flags = 0;
1658 if (iov_iter_rw(i) != WRITE)
1659 gup_flags |= FOLL_WRITE;
1661 gup_flags |= FOLL_NOFAULT;
1663 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1664 n = DIV_ROUND_UP(len, PAGE_SIZE);
1665 p = get_pages_array(n);
1668 res = get_user_pages_fast(addr, n, gup_flags, p);
1669 if (unlikely(res <= 0)) {
1675 return (res == n ? len : res * PAGE_SIZE) - *start;
1677 if (iov_iter_is_bvec(i)) {
1680 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1681 n = DIV_ROUND_UP(len, PAGE_SIZE);
1682 *pages = p = get_pages_array(n);
1686 get_page(*p++ = page++);
1687 return len - *start;
1689 if (iov_iter_is_pipe(i))
1690 return pipe_get_pages_alloc(i, pages, maxsize, start);
1691 if (iov_iter_is_xarray(i))
1692 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1695 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1697 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1702 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1706 iterate_and_advance(i, bytes, base, len, off, ({
1707 next = csum_and_copy_from_user(base, addr + off, len);
1708 sum = csum_block_add(sum, next, off);
1711 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1717 EXPORT_SYMBOL(csum_and_copy_from_iter);
1719 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1722 struct csum_state *csstate = _csstate;
1725 if (unlikely(iov_iter_is_discard(i))) {
1726 WARN_ON(1); /* for now */
1730 sum = csum_shift(csstate->csum, csstate->off);
1731 if (unlikely(iov_iter_is_pipe(i)))
1732 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1733 else iterate_and_advance(i, bytes, base, len, off, ({
1734 next = csum_and_copy_to_user(addr + off, base, len);
1735 sum = csum_block_add(sum, next, off);
1738 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1741 csstate->csum = csum_shift(sum, csstate->off);
1742 csstate->off += bytes;
1745 EXPORT_SYMBOL(csum_and_copy_to_iter);
1747 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1750 #ifdef CONFIG_CRYPTO_HASH
1751 struct ahash_request *hash = hashp;
1752 struct scatterlist sg;
1755 copied = copy_to_iter(addr, bytes, i);
1756 sg_init_one(&sg, addr, copied);
1757 ahash_request_set_crypt(hash, &sg, NULL, copied);
1758 crypto_ahash_update(hash);
1764 EXPORT_SYMBOL(hash_and_copy_to_iter);
1766 static int iov_npages(const struct iov_iter *i, int maxpages)
1768 size_t skip = i->iov_offset, size = i->count;
1769 const struct iovec *p;
1772 for (p = i->iov; size; skip = 0, p++) {
1773 unsigned offs = offset_in_page(p->iov_base + skip);
1774 size_t len = min(p->iov_len - skip, size);
1778 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1779 if (unlikely(npages > maxpages))
1786 static int bvec_npages(const struct iov_iter *i, int maxpages)
1788 size_t skip = i->iov_offset, size = i->count;
1789 const struct bio_vec *p;
1792 for (p = i->bvec; size; skip = 0, p++) {
1793 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1794 size_t len = min(p->bv_len - skip, size);
1797 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1798 if (unlikely(npages > maxpages))
1804 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1806 if (unlikely(!i->count))
1808 /* iovec and kvec have identical layouts */
1809 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1810 return iov_npages(i, maxpages);
1811 if (iov_iter_is_bvec(i))
1812 return bvec_npages(i, maxpages);
1813 if (iov_iter_is_pipe(i)) {
1814 unsigned int iter_head;
1821 data_start(i, &iter_head, &off);
1822 /* some of this one + all after this one */
1823 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1824 return min(npages, maxpages);
1826 if (iov_iter_is_xarray(i)) {
1827 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1828 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1829 return min(npages, maxpages);
1833 EXPORT_SYMBOL(iov_iter_npages);
1835 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1838 if (unlikely(iov_iter_is_pipe(new))) {
1842 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1844 if (iov_iter_is_bvec(new))
1845 return new->bvec = kmemdup(new->bvec,
1846 new->nr_segs * sizeof(struct bio_vec),
1849 /* iovec and kvec have identical layout */
1850 return new->iov = kmemdup(new->iov,
1851 new->nr_segs * sizeof(struct iovec),
1854 EXPORT_SYMBOL(dup_iter);
1856 static int copy_compat_iovec_from_user(struct iovec *iov,
1857 const struct iovec __user *uvec, unsigned long nr_segs)
1859 const struct compat_iovec __user *uiov =
1860 (const struct compat_iovec __user *)uvec;
1861 int ret = -EFAULT, i;
1863 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1866 for (i = 0; i < nr_segs; i++) {
1870 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1871 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1873 /* check for compat_size_t not fitting in compat_ssize_t .. */
1878 iov[i].iov_base = compat_ptr(buf);
1879 iov[i].iov_len = len;
1888 static int copy_iovec_from_user(struct iovec *iov,
1889 const struct iovec __user *uvec, unsigned long nr_segs)
1893 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1895 for (seg = 0; seg < nr_segs; seg++) {
1896 if ((ssize_t)iov[seg].iov_len < 0)
1903 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1904 unsigned long nr_segs, unsigned long fast_segs,
1905 struct iovec *fast_iov, bool compat)
1907 struct iovec *iov = fast_iov;
1911 * SuS says "The readv() function *may* fail if the iovcnt argument was
1912 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1913 * traditionally returned zero for zero segments, so...
1917 if (nr_segs > UIO_MAXIOV)
1918 return ERR_PTR(-EINVAL);
1919 if (nr_segs > fast_segs) {
1920 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1922 return ERR_PTR(-ENOMEM);
1926 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1928 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1930 if (iov != fast_iov)
1932 return ERR_PTR(ret);
1938 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1939 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1940 struct iov_iter *i, bool compat)
1942 ssize_t total_len = 0;
1946 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1949 return PTR_ERR(iov);
1953 * According to the Single Unix Specification we should return EINVAL if
1954 * an element length is < 0 when cast to ssize_t or if the total length
1955 * would overflow the ssize_t return value of the system call.
1957 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1960 for (seg = 0; seg < nr_segs; seg++) {
1961 ssize_t len = (ssize_t)iov[seg].iov_len;
1963 if (!access_ok(iov[seg].iov_base, len)) {
1970 if (len > MAX_RW_COUNT - total_len) {
1971 len = MAX_RW_COUNT - total_len;
1972 iov[seg].iov_len = len;
1977 iov_iter_init(i, type, iov, nr_segs, total_len);
1986 * import_iovec() - Copy an array of &struct iovec from userspace
1987 * into the kernel, check that it is valid, and initialize a new
1988 * &struct iov_iter iterator to access it.
1990 * @type: One of %READ or %WRITE.
1991 * @uvec: Pointer to the userspace array.
1992 * @nr_segs: Number of elements in userspace array.
1993 * @fast_segs: Number of elements in @iov.
1994 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1995 * on-stack) kernel array.
1996 * @i: Pointer to iterator that will be initialized on success.
1998 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1999 * then this function places %NULL in *@iov on return. Otherwise, a new
2000 * array will be allocated and the result placed in *@iov. This means that
2001 * the caller may call kfree() on *@iov regardless of whether the small
2002 * on-stack array was used or not (and regardless of whether this function
2003 * returns an error or not).
2005 * Return: Negative error code on error, bytes imported on success
2007 ssize_t import_iovec(int type, const struct iovec __user *uvec,
2008 unsigned nr_segs, unsigned fast_segs,
2009 struct iovec **iovp, struct iov_iter *i)
2011 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2012 in_compat_syscall());
2014 EXPORT_SYMBOL(import_iovec);
2016 int import_single_range(int rw, void __user *buf, size_t len,
2017 struct iovec *iov, struct iov_iter *i)
2019 if (len > MAX_RW_COUNT)
2021 if (unlikely(!access_ok(buf, len)))
2024 iov->iov_base = buf;
2026 iov_iter_init(i, rw, iov, 1, len);
2029 EXPORT_SYMBOL(import_single_range);
2032 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
2033 * iov_iter_save_state() was called.
2035 * @i: &struct iov_iter to restore
2036 * @state: state to restore from
2038 * Used after iov_iter_save_state() to bring restore @i, if operations may
2041 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2043 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2045 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
2046 !iov_iter_is_kvec(i))
2048 i->iov_offset = state->iov_offset;
2049 i->count = state->count;
2051 * For the *vec iters, nr_segs + iov is constant - if we increment
2052 * the vec, then we also decrement the nr_segs count. Hence we don't
2053 * need to track both of these, just one is enough and we can deduct
2054 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2055 * size, so we can just increment the iov pointer as they are unionzed.
2056 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2057 * not. Be safe and handle it separately.
2059 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2060 if (iov_iter_is_bvec(i))
2061 i->bvec -= state->nr_segs - i->nr_segs;
2063 i->iov -= state->nr_segs - i->nr_segs;
2064 i->nr_segs = state->nr_segs;