1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/splice.h>
10 #include <linux/compat.h>
11 #include <net/checksum.h>
12 #include <linux/scatterlist.h>
13 #include <linux/instrumented.h>
15 #define PIPE_PARANOIA /* for now */
17 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
21 __v.iov_len = min(n, __p->iov_len - skip); \
22 if (likely(__v.iov_len)) { \
23 __v.iov_base = __p->iov_base + skip; \
25 __v.iov_len -= left; \
26 skip += __v.iov_len; \
31 while (unlikely(!left && n)) { \
33 __v.iov_len = min(n, __p->iov_len); \
34 if (unlikely(!__v.iov_len)) \
36 __v.iov_base = __p->iov_base; \
38 __v.iov_len -= left; \
45 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
48 __v.iov_len = min(n, __p->iov_len - skip); \
49 if (likely(__v.iov_len)) { \
50 __v.iov_base = __p->iov_base + skip; \
52 skip += __v.iov_len; \
55 while (unlikely(n)) { \
57 __v.iov_len = min(n, __p->iov_len); \
58 if (unlikely(!__v.iov_len)) \
60 __v.iov_base = __p->iov_base; \
68 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
69 struct bvec_iter __start; \
70 __start.bi_size = n; \
71 __start.bi_bvec_done = skip; \
73 for_each_bvec(__v, i->bvec, __bi, __start) { \
80 #define iterate_all_kinds(i, n, v, I, B, K) { \
82 size_t skip = i->iov_offset; \
83 if (unlikely(i->type & ITER_BVEC)) { \
85 struct bvec_iter __bi; \
86 iterate_bvec(i, n, v, __bi, skip, (B)) \
87 } else if (unlikely(i->type & ITER_KVEC)) { \
88 const struct kvec *kvec; \
90 iterate_kvec(i, n, v, kvec, skip, (K)) \
91 } else if (unlikely(i->type & ITER_DISCARD)) { \
93 const struct iovec *iov; \
95 iterate_iovec(i, n, v, iov, skip, (I)) \
100 #define iterate_and_advance(i, n, v, I, B, K) { \
101 if (unlikely(i->count < n)) \
104 size_t skip = i->iov_offset; \
105 if (unlikely(i->type & ITER_BVEC)) { \
106 const struct bio_vec *bvec = i->bvec; \
108 struct bvec_iter __bi; \
109 iterate_bvec(i, n, v, __bi, skip, (B)) \
110 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
111 i->nr_segs -= i->bvec - bvec; \
112 skip = __bi.bi_bvec_done; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
121 i->nr_segs -= kvec - i->kvec; \
123 } else if (unlikely(i->type & ITER_DISCARD)) { \
126 const struct iovec *iov; \
128 iterate_iovec(i, n, v, iov, skip, (I)) \
129 if (skip == iov->iov_len) { \
133 i->nr_segs -= iov - i->iov; \
137 i->iov_offset = skip; \
141 static int copyout(void __user *to, const void *from, size_t n)
143 if (access_ok(to, n)) {
144 instrument_copy_to_user(to, from, n);
145 n = raw_copy_to_user(to, from, n);
150 static int copyin(void *to, const void __user *from, size_t n)
152 if (access_ok(from, n)) {
153 instrument_copy_from_user(to, from, n);
154 n = raw_copy_from_user(to, from, n);
159 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
162 size_t skip, copy, left, wanted;
163 const struct iovec *iov;
167 if (unlikely(bytes > i->count))
170 if (unlikely(!bytes))
176 skip = i->iov_offset;
177 buf = iov->iov_base + skip;
178 copy = min(bytes, iov->iov_len - skip);
180 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
181 kaddr = kmap_atomic(page);
182 from = kaddr + offset;
184 /* first chunk, usually the only one */
185 left = copyout(buf, from, copy);
191 while (unlikely(!left && bytes)) {
194 copy = min(bytes, iov->iov_len);
195 left = copyout(buf, from, copy);
201 if (likely(!bytes)) {
202 kunmap_atomic(kaddr);
205 offset = from - kaddr;
207 kunmap_atomic(kaddr);
208 copy = min(bytes, iov->iov_len - skip);
210 /* Too bad - revert to non-atomic kmap */
213 from = kaddr + offset;
214 left = copyout(buf, from, copy);
219 while (unlikely(!left && bytes)) {
222 copy = min(bytes, iov->iov_len);
223 left = copyout(buf, from, copy);
232 if (skip == iov->iov_len) {
236 i->count -= wanted - bytes;
237 i->nr_segs -= iov - i->iov;
239 i->iov_offset = skip;
240 return wanted - bytes;
243 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
246 size_t skip, copy, left, wanted;
247 const struct iovec *iov;
251 if (unlikely(bytes > i->count))
254 if (unlikely(!bytes))
260 skip = i->iov_offset;
261 buf = iov->iov_base + skip;
262 copy = min(bytes, iov->iov_len - skip);
264 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
265 kaddr = kmap_atomic(page);
268 /* first chunk, usually the only one */
269 left = copyin(to, buf, copy);
275 while (unlikely(!left && bytes)) {
278 copy = min(bytes, iov->iov_len);
279 left = copyin(to, buf, copy);
285 if (likely(!bytes)) {
286 kunmap_atomic(kaddr);
291 kunmap_atomic(kaddr);
292 copy = min(bytes, iov->iov_len - skip);
294 /* Too bad - revert to non-atomic kmap */
298 left = copyin(to, buf, copy);
303 while (unlikely(!left && bytes)) {
306 copy = min(bytes, iov->iov_len);
307 left = copyin(to, buf, copy);
316 if (skip == iov->iov_len) {
320 i->count -= wanted - bytes;
321 i->nr_segs -= iov - i->iov;
323 i->iov_offset = skip;
324 return wanted - bytes;
328 static bool sanity(const struct iov_iter *i)
330 struct pipe_inode_info *pipe = i->pipe;
331 unsigned int p_head = pipe->head;
332 unsigned int p_tail = pipe->tail;
333 unsigned int p_mask = pipe->ring_size - 1;
334 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
335 unsigned int i_head = i->head;
339 struct pipe_buffer *p;
340 if (unlikely(p_occupancy == 0))
341 goto Bad; // pipe must be non-empty
342 if (unlikely(i_head != p_head - 1))
343 goto Bad; // must be at the last buffer...
345 p = &pipe->bufs[i_head & p_mask];
346 if (unlikely(p->offset + p->len != i->iov_offset))
347 goto Bad; // ... at the end of segment
349 if (i_head != p_head)
350 goto Bad; // must be right after the last buffer
354 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
355 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
356 p_head, p_tail, pipe->ring_size);
357 for (idx = 0; idx < pipe->ring_size; idx++)
358 printk(KERN_ERR "[%p %p %d %d]\n",
360 pipe->bufs[idx].page,
361 pipe->bufs[idx].offset,
362 pipe->bufs[idx].len);
367 #define sanity(i) true
370 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
373 struct pipe_inode_info *pipe = i->pipe;
374 struct pipe_buffer *buf;
375 unsigned int p_tail = pipe->tail;
376 unsigned int p_mask = pipe->ring_size - 1;
377 unsigned int i_head = i->head;
380 if (unlikely(bytes > i->count))
383 if (unlikely(!bytes))
390 buf = &pipe->bufs[i_head & p_mask];
392 if (offset == off && buf->page == page) {
393 /* merge with the last one */
395 i->iov_offset += bytes;
399 buf = &pipe->bufs[i_head & p_mask];
401 if (pipe_full(i_head, p_tail, pipe->max_usage))
404 buf->ops = &page_cache_pipe_buf_ops;
407 buf->offset = offset;
410 pipe->head = i_head + 1;
411 i->iov_offset = offset + bytes;
419 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
420 * bytes. For each iovec, fault in each page that constitutes the iovec.
422 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
423 * because it is an invalid address).
425 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
427 size_t skip = i->iov_offset;
428 const struct iovec *iov;
432 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
433 iterate_iovec(i, bytes, v, iov, skip, ({
434 err = fault_in_pages_readable(v.iov_base, v.iov_len);
441 EXPORT_SYMBOL(iov_iter_fault_in_readable);
443 void iov_iter_init(struct iov_iter *i, unsigned int direction,
444 const struct iovec *iov, unsigned long nr_segs,
447 WARN_ON(direction & ~(READ | WRITE));
448 direction &= READ | WRITE;
450 /* It will get better. Eventually... */
451 if (uaccess_kernel()) {
452 i->type = ITER_KVEC | direction;
453 i->kvec = (struct kvec *)iov;
455 i->type = ITER_IOVEC | direction;
458 i->nr_segs = nr_segs;
462 EXPORT_SYMBOL(iov_iter_init);
464 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
466 char *from = kmap_atomic(page);
467 memcpy(to, from + offset, len);
471 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
473 char *to = kmap_atomic(page);
474 memcpy(to + offset, from, len);
478 static void memzero_page(struct page *page, size_t offset, size_t len)
480 char *addr = kmap_atomic(page);
481 memset(addr + offset, 0, len);
485 static inline bool allocated(struct pipe_buffer *buf)
487 return buf->ops == &default_pipe_buf_ops;
490 static inline void data_start(const struct iov_iter *i,
491 unsigned int *iter_headp, size_t *offp)
493 unsigned int p_mask = i->pipe->ring_size - 1;
494 unsigned int iter_head = i->head;
495 size_t off = i->iov_offset;
497 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
502 *iter_headp = iter_head;
506 static size_t push_pipe(struct iov_iter *i, size_t size,
507 int *iter_headp, size_t *offp)
509 struct pipe_inode_info *pipe = i->pipe;
510 unsigned int p_tail = pipe->tail;
511 unsigned int p_mask = pipe->ring_size - 1;
512 unsigned int iter_head;
516 if (unlikely(size > i->count))
522 data_start(i, &iter_head, &off);
523 *iter_headp = iter_head;
526 left -= PAGE_SIZE - off;
528 pipe->bufs[iter_head & p_mask].len += size;
531 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
534 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
535 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
536 struct page *page = alloc_page(GFP_USER);
540 buf->ops = &default_pipe_buf_ops;
543 buf->len = min_t(ssize_t, left, PAGE_SIZE);
546 pipe->head = iter_head;
554 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
557 struct pipe_inode_info *pipe = i->pipe;
558 unsigned int p_mask = pipe->ring_size - 1;
565 bytes = n = push_pipe(i, bytes, &i_head, &off);
569 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
570 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
572 i->iov_offset = off + chunk;
582 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
583 __wsum sum, size_t off)
585 __wsum next = csum_partial_copy_nocheck(from, to, len);
586 return csum_block_add(sum, next, off);
589 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
590 __wsum *csum, struct iov_iter *i)
592 struct pipe_inode_info *pipe = i->pipe;
593 unsigned int p_mask = pipe->ring_size - 1;
602 bytes = n = push_pipe(i, bytes, &i_head, &r);
606 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
607 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
608 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
611 i->iov_offset = r + chunk;
623 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
625 const char *from = addr;
626 if (unlikely(iov_iter_is_pipe(i)))
627 return copy_pipe_to_iter(addr, bytes, i);
628 if (iter_is_iovec(i))
630 iterate_and_advance(i, bytes, v,
631 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
632 memcpy_to_page(v.bv_page, v.bv_offset,
633 (from += v.bv_len) - v.bv_len, v.bv_len),
634 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
639 EXPORT_SYMBOL(_copy_to_iter);
641 #ifdef CONFIG_ARCH_HAS_COPY_MC
642 static int copyout_mc(void __user *to, const void *from, size_t n)
644 if (access_ok(to, n)) {
645 instrument_copy_to_user(to, from, n);
646 n = copy_mc_to_user((__force void *) to, from, n);
651 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
652 const char *from, size_t len)
657 to = kmap_atomic(page);
658 ret = copy_mc_to_kernel(to + offset, from, len);
664 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
667 struct pipe_inode_info *pipe = i->pipe;
668 unsigned int p_mask = pipe->ring_size - 1;
670 size_t n, off, xfer = 0;
675 bytes = n = push_pipe(i, bytes, &i_head, &off);
679 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
682 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
685 i->iov_offset = off + chunk - rem;
699 * _copy_mc_to_iter - copy to iter with source memory error exception handling
700 * @addr: source kernel address
701 * @bytes: total transfer length
702 * @iter: destination iterator
704 * The pmem driver deploys this for the dax operation
705 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
706 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
707 * successfully copied.
709 * The main differences between this and typical _copy_to_iter().
711 * * Typical tail/residue handling after a fault retries the copy
712 * byte-by-byte until the fault happens again. Re-triggering machine
713 * checks is potentially fatal so the implementation uses source
714 * alignment and poison alignment assumptions to avoid re-triggering
715 * hardware exceptions.
717 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
718 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
721 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
723 const char *from = addr;
724 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
726 if (unlikely(iov_iter_is_pipe(i)))
727 return copy_mc_pipe_to_iter(addr, bytes, i);
728 if (iter_is_iovec(i))
730 iterate_and_advance(i, bytes, v,
731 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
734 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
735 (from += v.bv_len) - v.bv_len, v.bv_len);
737 curr_addr = (unsigned long) from;
738 bytes = curr_addr - s_addr - rem;
743 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
744 - v.iov_len, v.iov_len);
746 curr_addr = (unsigned long) from;
747 bytes = curr_addr - s_addr - rem;
755 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
756 #endif /* CONFIG_ARCH_HAS_COPY_MC */
758 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
761 if (unlikely(iov_iter_is_pipe(i))) {
765 if (iter_is_iovec(i))
767 iterate_and_advance(i, bytes, v,
768 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
769 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
770 v.bv_offset, v.bv_len),
771 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
776 EXPORT_SYMBOL(_copy_from_iter);
778 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
781 if (unlikely(iov_iter_is_pipe(i))) {
785 if (unlikely(i->count < bytes))
788 if (iter_is_iovec(i))
790 iterate_all_kinds(i, bytes, v, ({
791 if (copyin((to += v.iov_len) - v.iov_len,
792 v.iov_base, v.iov_len))
795 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
796 v.bv_offset, v.bv_len),
797 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
800 iov_iter_advance(i, bytes);
803 EXPORT_SYMBOL(_copy_from_iter_full);
805 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
808 if (unlikely(iov_iter_is_pipe(i))) {
812 iterate_and_advance(i, bytes, v,
813 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
814 v.iov_base, v.iov_len),
815 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
816 v.bv_offset, v.bv_len),
817 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
822 EXPORT_SYMBOL(_copy_from_iter_nocache);
824 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
826 * _copy_from_iter_flushcache - write destination through cpu cache
827 * @addr: destination kernel address
828 * @bytes: total transfer length
829 * @iter: source iterator
831 * The pmem driver arranges for filesystem-dax to use this facility via
832 * dax_copy_from_iter() for ensuring that writes to persistent memory
833 * are flushed through the CPU cache. It is differentiated from
834 * _copy_from_iter_nocache() in that guarantees all data is flushed for
835 * all iterator types. The _copy_from_iter_nocache() only attempts to
836 * bypass the cache for the ITER_IOVEC case, and on some archs may use
837 * instructions that strand dirty-data in the cache.
839 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
842 if (unlikely(iov_iter_is_pipe(i))) {
846 iterate_and_advance(i, bytes, v,
847 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
848 v.iov_base, v.iov_len),
849 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
850 v.bv_offset, v.bv_len),
851 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
857 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
860 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
863 if (unlikely(iov_iter_is_pipe(i))) {
867 if (unlikely(i->count < bytes))
869 iterate_all_kinds(i, bytes, v, ({
870 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
871 v.iov_base, v.iov_len))
874 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
875 v.bv_offset, v.bv_len),
876 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
879 iov_iter_advance(i, bytes);
882 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
884 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
887 size_t v = n + offset;
890 * The general case needs to access the page order in order
891 * to compute the page size.
892 * However, we mostly deal with order-0 pages and thus can
893 * avoid a possible cache line miss for requests that fit all
896 if (n <= v && v <= PAGE_SIZE)
899 head = compound_head(page);
900 v += (page - head) << PAGE_SHIFT;
902 if (likely(n <= v && v <= (page_size(head))))
908 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
911 if (unlikely(!page_copy_sane(page, offset, bytes)))
913 if (i->type & (ITER_BVEC|ITER_KVEC)) {
914 void *kaddr = kmap_atomic(page);
915 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
916 kunmap_atomic(kaddr);
918 } else if (unlikely(iov_iter_is_discard(i)))
920 else if (likely(!iov_iter_is_pipe(i)))
921 return copy_page_to_iter_iovec(page, offset, bytes, i);
923 return copy_page_to_iter_pipe(page, offset, bytes, i);
925 EXPORT_SYMBOL(copy_page_to_iter);
927 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
930 if (unlikely(!page_copy_sane(page, offset, bytes)))
932 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
936 if (i->type & (ITER_BVEC|ITER_KVEC)) {
937 void *kaddr = kmap_atomic(page);
938 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
939 kunmap_atomic(kaddr);
942 return copy_page_from_iter_iovec(page, offset, bytes, i);
944 EXPORT_SYMBOL(copy_page_from_iter);
946 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
948 struct pipe_inode_info *pipe = i->pipe;
949 unsigned int p_mask = pipe->ring_size - 1;
956 bytes = n = push_pipe(i, bytes, &i_head, &off);
961 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
962 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
964 i->iov_offset = off + chunk;
973 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
975 if (unlikely(iov_iter_is_pipe(i)))
976 return pipe_zero(bytes, i);
977 iterate_and_advance(i, bytes, v,
978 clear_user(v.iov_base, v.iov_len),
979 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
980 memset(v.iov_base, 0, v.iov_len)
985 EXPORT_SYMBOL(iov_iter_zero);
987 size_t iov_iter_copy_from_user_atomic(struct page *page,
988 struct iov_iter *i, unsigned long offset, size_t bytes)
990 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
991 if (unlikely(!page_copy_sane(page, offset, bytes))) {
992 kunmap_atomic(kaddr);
995 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
996 kunmap_atomic(kaddr);
1000 iterate_all_kinds(i, bytes, v,
1001 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1002 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1003 v.bv_offset, v.bv_len),
1004 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1006 kunmap_atomic(kaddr);
1009 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1011 static inline void pipe_truncate(struct iov_iter *i)
1013 struct pipe_inode_info *pipe = i->pipe;
1014 unsigned int p_tail = pipe->tail;
1015 unsigned int p_head = pipe->head;
1016 unsigned int p_mask = pipe->ring_size - 1;
1018 if (!pipe_empty(p_head, p_tail)) {
1019 struct pipe_buffer *buf;
1020 unsigned int i_head = i->head;
1021 size_t off = i->iov_offset;
1024 buf = &pipe->bufs[i_head & p_mask];
1025 buf->len = off - buf->offset;
1028 while (p_head != i_head) {
1030 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1033 pipe->head = p_head;
1037 static void pipe_advance(struct iov_iter *i, size_t size)
1039 struct pipe_inode_info *pipe = i->pipe;
1040 if (unlikely(i->count < size))
1043 struct pipe_buffer *buf;
1044 unsigned int p_mask = pipe->ring_size - 1;
1045 unsigned int i_head = i->head;
1046 size_t off = i->iov_offset, left = size;
1048 if (off) /* make it relative to the beginning of buffer */
1049 left += off - pipe->bufs[i_head & p_mask].offset;
1051 buf = &pipe->bufs[i_head & p_mask];
1052 if (left <= buf->len)
1058 i->iov_offset = buf->offset + left;
1061 /* ... and discard everything past that point */
1065 void iov_iter_advance(struct iov_iter *i, size_t size)
1067 if (unlikely(iov_iter_is_pipe(i))) {
1068 pipe_advance(i, size);
1071 if (unlikely(iov_iter_is_discard(i))) {
1075 iterate_and_advance(i, size, v, 0, 0, 0)
1077 EXPORT_SYMBOL(iov_iter_advance);
1079 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1083 if (WARN_ON(unroll > MAX_RW_COUNT))
1086 if (unlikely(iov_iter_is_pipe(i))) {
1087 struct pipe_inode_info *pipe = i->pipe;
1088 unsigned int p_mask = pipe->ring_size - 1;
1089 unsigned int i_head = i->head;
1090 size_t off = i->iov_offset;
1092 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1093 size_t n = off - b->offset;
1099 if (!unroll && i_head == i->start_head) {
1104 b = &pipe->bufs[i_head & p_mask];
1105 off = b->offset + b->len;
1107 i->iov_offset = off;
1112 if (unlikely(iov_iter_is_discard(i)))
1114 if (unroll <= i->iov_offset) {
1115 i->iov_offset -= unroll;
1118 unroll -= i->iov_offset;
1119 if (iov_iter_is_bvec(i)) {
1120 const struct bio_vec *bvec = i->bvec;
1122 size_t n = (--bvec)->bv_len;
1126 i->iov_offset = n - unroll;
1131 } else { /* same logics for iovec and kvec */
1132 const struct iovec *iov = i->iov;
1134 size_t n = (--iov)->iov_len;
1138 i->iov_offset = n - unroll;
1145 EXPORT_SYMBOL(iov_iter_revert);
1148 * Return the count of just the current iov_iter segment.
1150 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1152 if (unlikely(iov_iter_is_pipe(i)))
1153 return i->count; // it is a silly place, anyway
1154 if (i->nr_segs == 1)
1156 if (unlikely(iov_iter_is_discard(i)))
1158 else if (iov_iter_is_bvec(i))
1159 return min(i->count, i->bvec->bv_len - i->iov_offset);
1161 return min(i->count, i->iov->iov_len - i->iov_offset);
1163 EXPORT_SYMBOL(iov_iter_single_seg_count);
1165 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1166 const struct kvec *kvec, unsigned long nr_segs,
1169 WARN_ON(direction & ~(READ | WRITE));
1170 i->type = ITER_KVEC | (direction & (READ | WRITE));
1172 i->nr_segs = nr_segs;
1176 EXPORT_SYMBOL(iov_iter_kvec);
1178 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1179 const struct bio_vec *bvec, unsigned long nr_segs,
1182 WARN_ON(direction & ~(READ | WRITE));
1183 i->type = ITER_BVEC | (direction & (READ | WRITE));
1185 i->nr_segs = nr_segs;
1189 EXPORT_SYMBOL(iov_iter_bvec);
1191 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1192 struct pipe_inode_info *pipe,
1195 BUG_ON(direction != READ);
1196 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1197 i->type = ITER_PIPE | READ;
1199 i->head = pipe->head;
1202 i->start_head = i->head;
1204 EXPORT_SYMBOL(iov_iter_pipe);
1207 * iov_iter_discard - Initialise an I/O iterator that discards data
1208 * @i: The iterator to initialise.
1209 * @direction: The direction of the transfer.
1210 * @count: The size of the I/O buffer in bytes.
1212 * Set up an I/O iterator that just discards everything that's written to it.
1213 * It's only available as a READ iterator.
1215 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1217 BUG_ON(direction != READ);
1218 i->type = ITER_DISCARD | READ;
1222 EXPORT_SYMBOL(iov_iter_discard);
1224 unsigned long iov_iter_alignment(const struct iov_iter *i)
1226 unsigned long res = 0;
1227 size_t size = i->count;
1229 if (unlikely(iov_iter_is_pipe(i))) {
1230 unsigned int p_mask = i->pipe->ring_size - 1;
1232 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1233 return size | i->iov_offset;
1236 iterate_all_kinds(i, size, v,
1237 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1238 res |= v.bv_offset | v.bv_len,
1239 res |= (unsigned long)v.iov_base | v.iov_len
1243 EXPORT_SYMBOL(iov_iter_alignment);
1245 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1247 unsigned long res = 0;
1248 size_t size = i->count;
1250 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1255 iterate_all_kinds(i, size, v,
1256 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1257 (size != v.iov_len ? size : 0), 0),
1258 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1259 (size != v.bv_len ? size : 0)),
1260 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1261 (size != v.iov_len ? size : 0))
1265 EXPORT_SYMBOL(iov_iter_gap_alignment);
1267 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1269 struct page **pages,
1273 struct pipe_inode_info *pipe = i->pipe;
1274 unsigned int p_mask = pipe->ring_size - 1;
1275 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1282 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1290 static ssize_t pipe_get_pages(struct iov_iter *i,
1291 struct page **pages, size_t maxsize, unsigned maxpages,
1294 unsigned int iter_head, npages;
1303 data_start(i, &iter_head, start);
1304 /* Amount of free space: some of this one + all after this one */
1305 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1306 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1308 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1311 ssize_t iov_iter_get_pages(struct iov_iter *i,
1312 struct page **pages, size_t maxsize, unsigned maxpages,
1315 if (maxsize > i->count)
1318 if (unlikely(iov_iter_is_pipe(i)))
1319 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1320 if (unlikely(iov_iter_is_discard(i)))
1323 iterate_all_kinds(i, maxsize, v, ({
1324 unsigned long addr = (unsigned long)v.iov_base;
1325 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1329 if (len > maxpages * PAGE_SIZE)
1330 len = maxpages * PAGE_SIZE;
1331 addr &= ~(PAGE_SIZE - 1);
1332 n = DIV_ROUND_UP(len, PAGE_SIZE);
1333 res = get_user_pages_fast(addr, n,
1334 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1336 if (unlikely(res < 0))
1338 return (res == n ? len : res * PAGE_SIZE) - *start;
1340 /* can't be more than PAGE_SIZE */
1341 *start = v.bv_offset;
1342 get_page(*pages = v.bv_page);
1350 EXPORT_SYMBOL(iov_iter_get_pages);
1352 static struct page **get_pages_array(size_t n)
1354 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1357 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1358 struct page ***pages, size_t maxsize,
1362 unsigned int iter_head, npages;
1371 data_start(i, &iter_head, start);
1372 /* Amount of free space: some of this one + all after this one */
1373 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1374 n = npages * PAGE_SIZE - *start;
1378 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1379 p = get_pages_array(npages);
1382 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1390 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1391 struct page ***pages, size_t maxsize,
1396 if (maxsize > i->count)
1399 if (unlikely(iov_iter_is_pipe(i)))
1400 return pipe_get_pages_alloc(i, pages, maxsize, start);
1401 if (unlikely(iov_iter_is_discard(i)))
1404 iterate_all_kinds(i, maxsize, v, ({
1405 unsigned long addr = (unsigned long)v.iov_base;
1406 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1410 addr &= ~(PAGE_SIZE - 1);
1411 n = DIV_ROUND_UP(len, PAGE_SIZE);
1412 p = get_pages_array(n);
1415 res = get_user_pages_fast(addr, n,
1416 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1417 if (unlikely(res < 0)) {
1422 return (res == n ? len : res * PAGE_SIZE) - *start;
1424 /* can't be more than PAGE_SIZE */
1425 *start = v.bv_offset;
1426 *pages = p = get_pages_array(1);
1429 get_page(*p = v.bv_page);
1437 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1439 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1446 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1450 iterate_and_advance(i, bytes, v, ({
1451 next = csum_and_copy_from_user(v.iov_base,
1452 (to += v.iov_len) - v.iov_len,
1455 sum = csum_block_add(sum, next, off);
1458 next ? 0 : v.iov_len;
1460 char *p = kmap_atomic(v.bv_page);
1461 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1462 p + v.bv_offset, v.bv_len,
1467 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1468 v.iov_base, v.iov_len,
1476 EXPORT_SYMBOL(csum_and_copy_from_iter);
1478 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1485 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1489 if (unlikely(i->count < bytes))
1491 iterate_all_kinds(i, bytes, v, ({
1492 next = csum_and_copy_from_user(v.iov_base,
1493 (to += v.iov_len) - v.iov_len,
1497 sum = csum_block_add(sum, next, off);
1501 char *p = kmap_atomic(v.bv_page);
1502 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1503 p + v.bv_offset, v.bv_len,
1508 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1509 v.iov_base, v.iov_len,
1515 iov_iter_advance(i, bytes);
1518 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1520 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1523 const char *from = addr;
1524 __wsum *csum = csump;
1528 if (unlikely(iov_iter_is_pipe(i)))
1529 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1532 if (unlikely(iov_iter_is_discard(i))) {
1533 WARN_ON(1); /* for now */
1536 iterate_and_advance(i, bytes, v, ({
1537 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1541 sum = csum_block_add(sum, next, off);
1544 next ? 0 : v.iov_len;
1546 char *p = kmap_atomic(v.bv_page);
1547 sum = csum_and_memcpy(p + v.bv_offset,
1548 (from += v.bv_len) - v.bv_len,
1549 v.bv_len, sum, off);
1553 sum = csum_and_memcpy(v.iov_base,
1554 (from += v.iov_len) - v.iov_len,
1555 v.iov_len, sum, off);
1562 EXPORT_SYMBOL(csum_and_copy_to_iter);
1564 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1567 #ifdef CONFIG_CRYPTO_HASH
1568 struct ahash_request *hash = hashp;
1569 struct scatterlist sg;
1572 copied = copy_to_iter(addr, bytes, i);
1573 sg_init_one(&sg, addr, copied);
1574 ahash_request_set_crypt(hash, &sg, NULL, copied);
1575 crypto_ahash_update(hash);
1581 EXPORT_SYMBOL(hash_and_copy_to_iter);
1583 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1585 size_t size = i->count;
1590 if (unlikely(iov_iter_is_discard(i)))
1593 if (unlikely(iov_iter_is_pipe(i))) {
1594 struct pipe_inode_info *pipe = i->pipe;
1595 unsigned int iter_head;
1601 data_start(i, &iter_head, &off);
1602 /* some of this one + all after this one */
1603 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1604 if (npages >= maxpages)
1606 } else iterate_all_kinds(i, size, v, ({
1607 unsigned long p = (unsigned long)v.iov_base;
1608 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1610 if (npages >= maxpages)
1614 if (npages >= maxpages)
1617 unsigned long p = (unsigned long)v.iov_base;
1618 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1620 if (npages >= maxpages)
1626 EXPORT_SYMBOL(iov_iter_npages);
1628 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1631 if (unlikely(iov_iter_is_pipe(new))) {
1635 if (unlikely(iov_iter_is_discard(new)))
1637 if (iov_iter_is_bvec(new))
1638 return new->bvec = kmemdup(new->bvec,
1639 new->nr_segs * sizeof(struct bio_vec),
1642 /* iovec and kvec have identical layout */
1643 return new->iov = kmemdup(new->iov,
1644 new->nr_segs * sizeof(struct iovec),
1647 EXPORT_SYMBOL(dup_iter);
1649 static int copy_compat_iovec_from_user(struct iovec *iov,
1650 const struct iovec __user *uvec, unsigned long nr_segs)
1652 const struct compat_iovec __user *uiov =
1653 (const struct compat_iovec __user *)uvec;
1654 int ret = -EFAULT, i;
1656 if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
1659 for (i = 0; i < nr_segs; i++) {
1663 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1664 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1666 /* check for compat_size_t not fitting in compat_ssize_t .. */
1671 iov[i].iov_base = compat_ptr(buf);
1672 iov[i].iov_len = len;
1681 static int copy_iovec_from_user(struct iovec *iov,
1682 const struct iovec __user *uvec, unsigned long nr_segs)
1686 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1688 for (seg = 0; seg < nr_segs; seg++) {
1689 if ((ssize_t)iov[seg].iov_len < 0)
1696 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1697 unsigned long nr_segs, unsigned long fast_segs,
1698 struct iovec *fast_iov, bool compat)
1700 struct iovec *iov = fast_iov;
1704 * SuS says "The readv() function *may* fail if the iovcnt argument was
1705 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1706 * traditionally returned zero for zero segments, so...
1710 if (nr_segs > UIO_MAXIOV)
1711 return ERR_PTR(-EINVAL);
1712 if (nr_segs > fast_segs) {
1713 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1715 return ERR_PTR(-ENOMEM);
1719 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1721 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1723 if (iov != fast_iov)
1725 return ERR_PTR(ret);
1731 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1732 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1733 struct iov_iter *i, bool compat)
1735 ssize_t total_len = 0;
1739 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1742 return PTR_ERR(iov);
1746 * According to the Single Unix Specification we should return EINVAL if
1747 * an element length is < 0 when cast to ssize_t or if the total length
1748 * would overflow the ssize_t return value of the system call.
1750 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1753 for (seg = 0; seg < nr_segs; seg++) {
1754 ssize_t len = (ssize_t)iov[seg].iov_len;
1756 if (!access_ok(iov[seg].iov_base, len)) {
1763 if (len > MAX_RW_COUNT - total_len) {
1764 len = MAX_RW_COUNT - total_len;
1765 iov[seg].iov_len = len;
1770 iov_iter_init(i, type, iov, nr_segs, total_len);
1779 * import_iovec() - Copy an array of &struct iovec from userspace
1780 * into the kernel, check that it is valid, and initialize a new
1781 * &struct iov_iter iterator to access it.
1783 * @type: One of %READ or %WRITE.
1784 * @uvec: Pointer to the userspace array.
1785 * @nr_segs: Number of elements in userspace array.
1786 * @fast_segs: Number of elements in @iov.
1787 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1788 * on-stack) kernel array.
1789 * @i: Pointer to iterator that will be initialized on success.
1791 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1792 * then this function places %NULL in *@iov on return. Otherwise, a new
1793 * array will be allocated and the result placed in *@iov. This means that
1794 * the caller may call kfree() on *@iov regardless of whether the small
1795 * on-stack array was used or not (and regardless of whether this function
1796 * returns an error or not).
1798 * Return: Negative error code on error, bytes imported on success
1800 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1801 unsigned nr_segs, unsigned fast_segs,
1802 struct iovec **iovp, struct iov_iter *i)
1804 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1805 in_compat_syscall());
1807 EXPORT_SYMBOL(import_iovec);
1809 int import_single_range(int rw, void __user *buf, size_t len,
1810 struct iovec *iov, struct iov_iter *i)
1812 if (len > MAX_RW_COUNT)
1814 if (unlikely(!access_ok(buf, len)))
1817 iov->iov_base = buf;
1819 iov_iter_init(i, rw, iov, 1, len);
1822 EXPORT_SYMBOL(import_single_range);
1824 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1825 int (*f)(struct kvec *vec, void *context),
1833 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1834 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1835 w.iov_len = v.bv_len;
1836 err = f(&w, context);
1840 err = f(&w, context);})
1844 EXPORT_SYMBOL(iov_iter_for_each_range);