Merge tag 'leds-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel...
[linux-2.6-microblaze.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16
17 #define PIPE_PARANOIA /* for now */
18
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) {        \
21         size_t off = 0;                                         \
22         size_t skip = i->iov_offset;                            \
23         do {                                                    \
24                 len = min(n, __p->iov_len - skip);              \
25                 if (likely(len)) {                              \
26                         base = __p->iov_base + skip;            \
27                         len -= (STEP);                          \
28                         off += len;                             \
29                         skip += len;                            \
30                         n -= len;                               \
31                         if (skip < __p->iov_len)                \
32                                 break;                          \
33                 }                                               \
34                 __p++;                                          \
35                 skip = 0;                                       \
36         } while (n);                                            \
37         i->iov_offset = skip;                                   \
38         n = off;                                                \
39 }
40
41 #define iterate_bvec(i, n, base, len, off, p, STEP) {           \
42         size_t off = 0;                                         \
43         unsigned skip = i->iov_offset;                          \
44         while (n) {                                             \
45                 unsigned offset = p->bv_offset + skip;          \
46                 unsigned left;                                  \
47                 void *kaddr = kmap_local_page(p->bv_page +      \
48                                         offset / PAGE_SIZE);    \
49                 base = kaddr + offset % PAGE_SIZE;              \
50                 len = min(min(n, (size_t)(p->bv_len - skip)),   \
51                      (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
52                 left = (STEP);                                  \
53                 kunmap_local(kaddr);                            \
54                 len -= left;                                    \
55                 off += len;                                     \
56                 skip += len;                                    \
57                 if (skip == p->bv_len) {                        \
58                         skip = 0;                               \
59                         p++;                                    \
60                 }                                               \
61                 n -= len;                                       \
62                 if (left)                                       \
63                         break;                                  \
64         }                                                       \
65         i->iov_offset = skip;                                   \
66         n = off;                                                \
67 }
68
69 #define iterate_xarray(i, n, base, len, __off, STEP) {          \
70         __label__ __out;                                        \
71         size_t __off = 0;                                       \
72         struct page *head = NULL;                               \
73         loff_t start = i->xarray_start + i->iov_offset;         \
74         unsigned offset = start % PAGE_SIZE;                    \
75         pgoff_t index = start / PAGE_SIZE;                      \
76         int j;                                                  \
77                                                                 \
78         XA_STATE(xas, i->xarray, index);                        \
79                                                                 \
80         rcu_read_lock();                                        \
81         xas_for_each(&xas, head, ULONG_MAX) {                   \
82                 unsigned left;                                  \
83                 if (xas_retry(&xas, head))                      \
84                         continue;                               \
85                 if (WARN_ON(xa_is_value(head)))                 \
86                         break;                                  \
87                 if (WARN_ON(PageHuge(head)))                    \
88                         break;                                  \
89                 for (j = (head->index < index) ? index - head->index : 0; \
90                      j < thp_nr_pages(head); j++) {             \
91                         void *kaddr = kmap_local_page(head + j);        \
92                         base = kaddr + offset;                  \
93                         len = PAGE_SIZE - offset;               \
94                         len = min(n, len);                      \
95                         left = (STEP);                          \
96                         kunmap_local(kaddr);                    \
97                         len -= left;                            \
98                         __off += len;                           \
99                         n -= len;                               \
100                         if (left || n == 0)                     \
101                                 goto __out;                     \
102                         offset = 0;                             \
103                 }                                               \
104         }                                                       \
105 __out:                                                          \
106         rcu_read_unlock();                                      \
107         i->iov_offset += __off;                                         \
108         n = __off;                                              \
109 }
110
111 #define __iterate_and_advance(i, n, base, len, off, I, K) {     \
112         if (unlikely(i->count < n))                             \
113                 n = i->count;                                   \
114         if (likely(n)) {                                        \
115                 if (likely(iter_is_iovec(i))) {                 \
116                         const struct iovec *iov = i->iov;       \
117                         void __user *base;                      \
118                         size_t len;                             \
119                         iterate_iovec(i, n, base, len, off,     \
120                                                 iov, (I))       \
121                         i->nr_segs -= iov - i->iov;             \
122                         i->iov = iov;                           \
123                 } else if (iov_iter_is_bvec(i)) {               \
124                         const struct bio_vec *bvec = i->bvec;   \
125                         void *base;                             \
126                         size_t len;                             \
127                         iterate_bvec(i, n, base, len, off,      \
128                                                 bvec, (K))      \
129                         i->nr_segs -= bvec - i->bvec;           \
130                         i->bvec = bvec;                         \
131                 } else if (iov_iter_is_kvec(i)) {               \
132                         const struct kvec *kvec = i->kvec;      \
133                         void *base;                             \
134                         size_t len;                             \
135                         iterate_iovec(i, n, base, len, off,     \
136                                                 kvec, (K))      \
137                         i->nr_segs -= kvec - i->kvec;           \
138                         i->kvec = kvec;                         \
139                 } else if (iov_iter_is_xarray(i)) {             \
140                         void *base;                             \
141                         size_t len;                             \
142                         iterate_xarray(i, n, base, len, off,    \
143                                                         (K))    \
144                 }                                               \
145                 i->count -= n;                                  \
146         }                                                       \
147 }
148 #define iterate_and_advance(i, n, base, len, off, I, K) \
149         __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
150
151 static int copyout(void __user *to, const void *from, size_t n)
152 {
153         if (should_fail_usercopy())
154                 return n;
155         if (access_ok(to, n)) {
156                 instrument_copy_to_user(to, from, n);
157                 n = raw_copy_to_user(to, from, n);
158         }
159         return n;
160 }
161
162 static int copyin(void *to, const void __user *from, size_t n)
163 {
164         if (should_fail_usercopy())
165                 return n;
166         if (access_ok(from, n)) {
167                 instrument_copy_from_user(to, from, n);
168                 n = raw_copy_from_user(to, from, n);
169         }
170         return n;
171 }
172
173 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
174                          struct iov_iter *i)
175 {
176         size_t skip, copy, left, wanted;
177         const struct iovec *iov;
178         char __user *buf;
179         void *kaddr, *from;
180
181         if (unlikely(bytes > i->count))
182                 bytes = i->count;
183
184         if (unlikely(!bytes))
185                 return 0;
186
187         might_fault();
188         wanted = bytes;
189         iov = i->iov;
190         skip = i->iov_offset;
191         buf = iov->iov_base + skip;
192         copy = min(bytes, iov->iov_len - skip);
193
194         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
195                 kaddr = kmap_atomic(page);
196                 from = kaddr + offset;
197
198                 /* first chunk, usually the only one */
199                 left = copyout(buf, from, copy);
200                 copy -= left;
201                 skip += copy;
202                 from += copy;
203                 bytes -= copy;
204
205                 while (unlikely(!left && bytes)) {
206                         iov++;
207                         buf = iov->iov_base;
208                         copy = min(bytes, iov->iov_len);
209                         left = copyout(buf, from, copy);
210                         copy -= left;
211                         skip = copy;
212                         from += copy;
213                         bytes -= copy;
214                 }
215                 if (likely(!bytes)) {
216                         kunmap_atomic(kaddr);
217                         goto done;
218                 }
219                 offset = from - kaddr;
220                 buf += copy;
221                 kunmap_atomic(kaddr);
222                 copy = min(bytes, iov->iov_len - skip);
223         }
224         /* Too bad - revert to non-atomic kmap */
225
226         kaddr = kmap(page);
227         from = kaddr + offset;
228         left = copyout(buf, from, copy);
229         copy -= left;
230         skip += copy;
231         from += copy;
232         bytes -= copy;
233         while (unlikely(!left && bytes)) {
234                 iov++;
235                 buf = iov->iov_base;
236                 copy = min(bytes, iov->iov_len);
237                 left = copyout(buf, from, copy);
238                 copy -= left;
239                 skip = copy;
240                 from += copy;
241                 bytes -= copy;
242         }
243         kunmap(page);
244
245 done:
246         if (skip == iov->iov_len) {
247                 iov++;
248                 skip = 0;
249         }
250         i->count -= wanted - bytes;
251         i->nr_segs -= iov - i->iov;
252         i->iov = iov;
253         i->iov_offset = skip;
254         return wanted - bytes;
255 }
256
257 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
258                          struct iov_iter *i)
259 {
260         size_t skip, copy, left, wanted;
261         const struct iovec *iov;
262         char __user *buf;
263         void *kaddr, *to;
264
265         if (unlikely(bytes > i->count))
266                 bytes = i->count;
267
268         if (unlikely(!bytes))
269                 return 0;
270
271         might_fault();
272         wanted = bytes;
273         iov = i->iov;
274         skip = i->iov_offset;
275         buf = iov->iov_base + skip;
276         copy = min(bytes, iov->iov_len - skip);
277
278         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
279                 kaddr = kmap_atomic(page);
280                 to = kaddr + offset;
281
282                 /* first chunk, usually the only one */
283                 left = copyin(to, buf, copy);
284                 copy -= left;
285                 skip += copy;
286                 to += copy;
287                 bytes -= copy;
288
289                 while (unlikely(!left && bytes)) {
290                         iov++;
291                         buf = iov->iov_base;
292                         copy = min(bytes, iov->iov_len);
293                         left = copyin(to, buf, copy);
294                         copy -= left;
295                         skip = copy;
296                         to += copy;
297                         bytes -= copy;
298                 }
299                 if (likely(!bytes)) {
300                         kunmap_atomic(kaddr);
301                         goto done;
302                 }
303                 offset = to - kaddr;
304                 buf += copy;
305                 kunmap_atomic(kaddr);
306                 copy = min(bytes, iov->iov_len - skip);
307         }
308         /* Too bad - revert to non-atomic kmap */
309
310         kaddr = kmap(page);
311         to = kaddr + offset;
312         left = copyin(to, buf, copy);
313         copy -= left;
314         skip += copy;
315         to += copy;
316         bytes -= copy;
317         while (unlikely(!left && bytes)) {
318                 iov++;
319                 buf = iov->iov_base;
320                 copy = min(bytes, iov->iov_len);
321                 left = copyin(to, buf, copy);
322                 copy -= left;
323                 skip = copy;
324                 to += copy;
325                 bytes -= copy;
326         }
327         kunmap(page);
328
329 done:
330         if (skip == iov->iov_len) {
331                 iov++;
332                 skip = 0;
333         }
334         i->count -= wanted - bytes;
335         i->nr_segs -= iov - i->iov;
336         i->iov = iov;
337         i->iov_offset = skip;
338         return wanted - bytes;
339 }
340
341 #ifdef PIPE_PARANOIA
342 static bool sanity(const struct iov_iter *i)
343 {
344         struct pipe_inode_info *pipe = i->pipe;
345         unsigned int p_head = pipe->head;
346         unsigned int p_tail = pipe->tail;
347         unsigned int p_mask = pipe->ring_size - 1;
348         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
349         unsigned int i_head = i->head;
350         unsigned int idx;
351
352         if (i->iov_offset) {
353                 struct pipe_buffer *p;
354                 if (unlikely(p_occupancy == 0))
355                         goto Bad;       // pipe must be non-empty
356                 if (unlikely(i_head != p_head - 1))
357                         goto Bad;       // must be at the last buffer...
358
359                 p = &pipe->bufs[i_head & p_mask];
360                 if (unlikely(p->offset + p->len != i->iov_offset))
361                         goto Bad;       // ... at the end of segment
362         } else {
363                 if (i_head != p_head)
364                         goto Bad;       // must be right after the last buffer
365         }
366         return true;
367 Bad:
368         printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
369         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
370                         p_head, p_tail, pipe->ring_size);
371         for (idx = 0; idx < pipe->ring_size; idx++)
372                 printk(KERN_ERR "[%p %p %d %d]\n",
373                         pipe->bufs[idx].ops,
374                         pipe->bufs[idx].page,
375                         pipe->bufs[idx].offset,
376                         pipe->bufs[idx].len);
377         WARN_ON(1);
378         return false;
379 }
380 #else
381 #define sanity(i) true
382 #endif
383
384 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
385                          struct iov_iter *i)
386 {
387         struct pipe_inode_info *pipe = i->pipe;
388         struct pipe_buffer *buf;
389         unsigned int p_tail = pipe->tail;
390         unsigned int p_mask = pipe->ring_size - 1;
391         unsigned int i_head = i->head;
392         size_t off;
393
394         if (unlikely(bytes > i->count))
395                 bytes = i->count;
396
397         if (unlikely(!bytes))
398                 return 0;
399
400         if (!sanity(i))
401                 return 0;
402
403         off = i->iov_offset;
404         buf = &pipe->bufs[i_head & p_mask];
405         if (off) {
406                 if (offset == off && buf->page == page) {
407                         /* merge with the last one */
408                         buf->len += bytes;
409                         i->iov_offset += bytes;
410                         goto out;
411                 }
412                 i_head++;
413                 buf = &pipe->bufs[i_head & p_mask];
414         }
415         if (pipe_full(i_head, p_tail, pipe->max_usage))
416                 return 0;
417
418         buf->ops = &page_cache_pipe_buf_ops;
419         get_page(page);
420         buf->page = page;
421         buf->offset = offset;
422         buf->len = bytes;
423
424         pipe->head = i_head + 1;
425         i->iov_offset = offset + bytes;
426         i->head = i_head;
427 out:
428         i->count -= bytes;
429         return bytes;
430 }
431
432 /*
433  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
434  * bytes.  For each iovec, fault in each page that constitutes the iovec.
435  *
436  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
437  * because it is an invalid address).
438  */
439 int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
440 {
441         if (iter_is_iovec(i)) {
442                 const struct iovec *p;
443                 size_t skip;
444
445                 if (bytes > i->count)
446                         bytes = i->count;
447                 for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
448                         size_t len = min(bytes, p->iov_len - skip);
449                         int err;
450
451                         if (unlikely(!len))
452                                 continue;
453                         err = fault_in_pages_readable(p->iov_base + skip, len);
454                         if (unlikely(err))
455                                 return err;
456                         bytes -= len;
457                 }
458         }
459         return 0;
460 }
461 EXPORT_SYMBOL(iov_iter_fault_in_readable);
462
463 void iov_iter_init(struct iov_iter *i, unsigned int direction,
464                         const struct iovec *iov, unsigned long nr_segs,
465                         size_t count)
466 {
467         WARN_ON(direction & ~(READ | WRITE));
468         WARN_ON_ONCE(uaccess_kernel());
469         *i = (struct iov_iter) {
470                 .iter_type = ITER_IOVEC,
471                 .data_source = direction,
472                 .iov = iov,
473                 .nr_segs = nr_segs,
474                 .iov_offset = 0,
475                 .count = count
476         };
477 }
478 EXPORT_SYMBOL(iov_iter_init);
479
480 static inline bool allocated(struct pipe_buffer *buf)
481 {
482         return buf->ops == &default_pipe_buf_ops;
483 }
484
485 static inline void data_start(const struct iov_iter *i,
486                               unsigned int *iter_headp, size_t *offp)
487 {
488         unsigned int p_mask = i->pipe->ring_size - 1;
489         unsigned int iter_head = i->head;
490         size_t off = i->iov_offset;
491
492         if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
493                     off == PAGE_SIZE)) {
494                 iter_head++;
495                 off = 0;
496         }
497         *iter_headp = iter_head;
498         *offp = off;
499 }
500
501 static size_t push_pipe(struct iov_iter *i, size_t size,
502                         int *iter_headp, size_t *offp)
503 {
504         struct pipe_inode_info *pipe = i->pipe;
505         unsigned int p_tail = pipe->tail;
506         unsigned int p_mask = pipe->ring_size - 1;
507         unsigned int iter_head;
508         size_t off;
509         ssize_t left;
510
511         if (unlikely(size > i->count))
512                 size = i->count;
513         if (unlikely(!size))
514                 return 0;
515
516         left = size;
517         data_start(i, &iter_head, &off);
518         *iter_headp = iter_head;
519         *offp = off;
520         if (off) {
521                 left -= PAGE_SIZE - off;
522                 if (left <= 0) {
523                         pipe->bufs[iter_head & p_mask].len += size;
524                         return size;
525                 }
526                 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
527                 iter_head++;
528         }
529         while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
530                 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
531                 struct page *page = alloc_page(GFP_USER);
532                 if (!page)
533                         break;
534
535                 buf->ops = &default_pipe_buf_ops;
536                 buf->page = page;
537                 buf->offset = 0;
538                 buf->len = min_t(ssize_t, left, PAGE_SIZE);
539                 left -= buf->len;
540                 iter_head++;
541                 pipe->head = iter_head;
542
543                 if (left == 0)
544                         return size;
545         }
546         return size - left;
547 }
548
549 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
550                                 struct iov_iter *i)
551 {
552         struct pipe_inode_info *pipe = i->pipe;
553         unsigned int p_mask = pipe->ring_size - 1;
554         unsigned int i_head;
555         size_t n, off;
556
557         if (!sanity(i))
558                 return 0;
559
560         bytes = n = push_pipe(i, bytes, &i_head, &off);
561         if (unlikely(!n))
562                 return 0;
563         do {
564                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
565                 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
566                 i->head = i_head;
567                 i->iov_offset = off + chunk;
568                 n -= chunk;
569                 addr += chunk;
570                 off = 0;
571                 i_head++;
572         } while (n);
573         i->count -= bytes;
574         return bytes;
575 }
576
577 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
578                               __wsum sum, size_t off)
579 {
580         __wsum next = csum_partial_copy_nocheck(from, to, len);
581         return csum_block_add(sum, next, off);
582 }
583
584 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
585                                          struct iov_iter *i, __wsum *sump)
586 {
587         struct pipe_inode_info *pipe = i->pipe;
588         unsigned int p_mask = pipe->ring_size - 1;
589         __wsum sum = *sump;
590         size_t off = 0;
591         unsigned int i_head;
592         size_t r;
593
594         if (!sanity(i))
595                 return 0;
596
597         bytes = push_pipe(i, bytes, &i_head, &r);
598         while (bytes) {
599                 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
600                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
601                 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
602                 kunmap_local(p);
603                 i->head = i_head;
604                 i->iov_offset = r + chunk;
605                 bytes -= chunk;
606                 off += chunk;
607                 r = 0;
608                 i_head++;
609         }
610         *sump = sum;
611         i->count -= off;
612         return off;
613 }
614
615 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
616 {
617         if (unlikely(iov_iter_is_pipe(i)))
618                 return copy_pipe_to_iter(addr, bytes, i);
619         if (iter_is_iovec(i))
620                 might_fault();
621         iterate_and_advance(i, bytes, base, len, off,
622                 copyout(base, addr + off, len),
623                 memcpy(base, addr + off, len)
624         )
625
626         return bytes;
627 }
628 EXPORT_SYMBOL(_copy_to_iter);
629
630 #ifdef CONFIG_ARCH_HAS_COPY_MC
631 static int copyout_mc(void __user *to, const void *from, size_t n)
632 {
633         if (access_ok(to, n)) {
634                 instrument_copy_to_user(to, from, n);
635                 n = copy_mc_to_user((__force void *) to, from, n);
636         }
637         return n;
638 }
639
640 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
641                                 struct iov_iter *i)
642 {
643         struct pipe_inode_info *pipe = i->pipe;
644         unsigned int p_mask = pipe->ring_size - 1;
645         unsigned int i_head;
646         size_t n, off, xfer = 0;
647
648         if (!sanity(i))
649                 return 0;
650
651         n = push_pipe(i, bytes, &i_head, &off);
652         while (n) {
653                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
654                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
655                 unsigned long rem;
656                 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
657                 chunk -= rem;
658                 kunmap_local(p);
659                 i->head = i_head;
660                 i->iov_offset = off + chunk;
661                 xfer += chunk;
662                 if (rem)
663                         break;
664                 n -= chunk;
665                 off = 0;
666                 i_head++;
667         }
668         i->count -= xfer;
669         return xfer;
670 }
671
672 /**
673  * _copy_mc_to_iter - copy to iter with source memory error exception handling
674  * @addr: source kernel address
675  * @bytes: total transfer length
676  * @iter: destination iterator
677  *
678  * The pmem driver deploys this for the dax operation
679  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
680  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
681  * successfully copied.
682  *
683  * The main differences between this and typical _copy_to_iter().
684  *
685  * * Typical tail/residue handling after a fault retries the copy
686  *   byte-by-byte until the fault happens again. Re-triggering machine
687  *   checks is potentially fatal so the implementation uses source
688  *   alignment and poison alignment assumptions to avoid re-triggering
689  *   hardware exceptions.
690  *
691  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
692  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
693  *   a short copy.
694  */
695 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
696 {
697         if (unlikely(iov_iter_is_pipe(i)))
698                 return copy_mc_pipe_to_iter(addr, bytes, i);
699         if (iter_is_iovec(i))
700                 might_fault();
701         __iterate_and_advance(i, bytes, base, len, off,
702                 copyout_mc(base, addr + off, len),
703                 copy_mc_to_kernel(base, addr + off, len)
704         )
705
706         return bytes;
707 }
708 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
709 #endif /* CONFIG_ARCH_HAS_COPY_MC */
710
711 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
712 {
713         if (unlikely(iov_iter_is_pipe(i))) {
714                 WARN_ON(1);
715                 return 0;
716         }
717         if (iter_is_iovec(i))
718                 might_fault();
719         iterate_and_advance(i, bytes, base, len, off,
720                 copyin(addr + off, base, len),
721                 memcpy(addr + off, base, len)
722         )
723
724         return bytes;
725 }
726 EXPORT_SYMBOL(_copy_from_iter);
727
728 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
729 {
730         if (unlikely(iov_iter_is_pipe(i))) {
731                 WARN_ON(1);
732                 return 0;
733         }
734         iterate_and_advance(i, bytes, base, len, off,
735                 __copy_from_user_inatomic_nocache(addr + off, base, len),
736                 memcpy(addr + off, base, len)
737         )
738
739         return bytes;
740 }
741 EXPORT_SYMBOL(_copy_from_iter_nocache);
742
743 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
744 /**
745  * _copy_from_iter_flushcache - write destination through cpu cache
746  * @addr: destination kernel address
747  * @bytes: total transfer length
748  * @iter: source iterator
749  *
750  * The pmem driver arranges for filesystem-dax to use this facility via
751  * dax_copy_from_iter() for ensuring that writes to persistent memory
752  * are flushed through the CPU cache. It is differentiated from
753  * _copy_from_iter_nocache() in that guarantees all data is flushed for
754  * all iterator types. The _copy_from_iter_nocache() only attempts to
755  * bypass the cache for the ITER_IOVEC case, and on some archs may use
756  * instructions that strand dirty-data in the cache.
757  */
758 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
759 {
760         if (unlikely(iov_iter_is_pipe(i))) {
761                 WARN_ON(1);
762                 return 0;
763         }
764         iterate_and_advance(i, bytes, base, len, off,
765                 __copy_from_user_flushcache(addr + off, base, len),
766                 memcpy_flushcache(addr + off, base, len)
767         )
768
769         return bytes;
770 }
771 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
772 #endif
773
774 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
775 {
776         struct page *head;
777         size_t v = n + offset;
778
779         /*
780          * The general case needs to access the page order in order
781          * to compute the page size.
782          * However, we mostly deal with order-0 pages and thus can
783          * avoid a possible cache line miss for requests that fit all
784          * page orders.
785          */
786         if (n <= v && v <= PAGE_SIZE)
787                 return true;
788
789         head = compound_head(page);
790         v += (page - head) << PAGE_SHIFT;
791
792         if (likely(n <= v && v <= (page_size(head))))
793                 return true;
794         WARN_ON(1);
795         return false;
796 }
797
798 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
799                          struct iov_iter *i)
800 {
801         if (likely(iter_is_iovec(i)))
802                 return copy_page_to_iter_iovec(page, offset, bytes, i);
803         if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
804                 void *kaddr = kmap_local_page(page);
805                 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
806                 kunmap_local(kaddr);
807                 return wanted;
808         }
809         if (iov_iter_is_pipe(i))
810                 return copy_page_to_iter_pipe(page, offset, bytes, i);
811         if (unlikely(iov_iter_is_discard(i))) {
812                 if (unlikely(i->count < bytes))
813                         bytes = i->count;
814                 i->count -= bytes;
815                 return bytes;
816         }
817         WARN_ON(1);
818         return 0;
819 }
820
821 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
822                          struct iov_iter *i)
823 {
824         size_t res = 0;
825         if (unlikely(!page_copy_sane(page, offset, bytes)))
826                 return 0;
827         page += offset / PAGE_SIZE; // first subpage
828         offset %= PAGE_SIZE;
829         while (1) {
830                 size_t n = __copy_page_to_iter(page, offset,
831                                 min(bytes, (size_t)PAGE_SIZE - offset), i);
832                 res += n;
833                 bytes -= n;
834                 if (!bytes || !n)
835                         break;
836                 offset += n;
837                 if (offset == PAGE_SIZE) {
838                         page++;
839                         offset = 0;
840                 }
841         }
842         return res;
843 }
844 EXPORT_SYMBOL(copy_page_to_iter);
845
846 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
847                          struct iov_iter *i)
848 {
849         if (unlikely(!page_copy_sane(page, offset, bytes)))
850                 return 0;
851         if (likely(iter_is_iovec(i)))
852                 return copy_page_from_iter_iovec(page, offset, bytes, i);
853         if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
854                 void *kaddr = kmap_local_page(page);
855                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
856                 kunmap_local(kaddr);
857                 return wanted;
858         }
859         WARN_ON(1);
860         return 0;
861 }
862 EXPORT_SYMBOL(copy_page_from_iter);
863
864 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
865 {
866         struct pipe_inode_info *pipe = i->pipe;
867         unsigned int p_mask = pipe->ring_size - 1;
868         unsigned int i_head;
869         size_t n, off;
870
871         if (!sanity(i))
872                 return 0;
873
874         bytes = n = push_pipe(i, bytes, &i_head, &off);
875         if (unlikely(!n))
876                 return 0;
877
878         do {
879                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
880                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
881                 memset(p + off, 0, chunk);
882                 kunmap_local(p);
883                 i->head = i_head;
884                 i->iov_offset = off + chunk;
885                 n -= chunk;
886                 off = 0;
887                 i_head++;
888         } while (n);
889         i->count -= bytes;
890         return bytes;
891 }
892
893 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
894 {
895         if (unlikely(iov_iter_is_pipe(i)))
896                 return pipe_zero(bytes, i);
897         iterate_and_advance(i, bytes, base, len, count,
898                 clear_user(base, len),
899                 memset(base, 0, len)
900         )
901
902         return bytes;
903 }
904 EXPORT_SYMBOL(iov_iter_zero);
905
906 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
907                                   struct iov_iter *i)
908 {
909         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
910         if (unlikely(!page_copy_sane(page, offset, bytes))) {
911                 kunmap_atomic(kaddr);
912                 return 0;
913         }
914         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
915                 kunmap_atomic(kaddr);
916                 WARN_ON(1);
917                 return 0;
918         }
919         iterate_and_advance(i, bytes, base, len, off,
920                 copyin(p + off, base, len),
921                 memcpy(p + off, base, len)
922         )
923         kunmap_atomic(kaddr);
924         return bytes;
925 }
926 EXPORT_SYMBOL(copy_page_from_iter_atomic);
927
928 static inline void pipe_truncate(struct iov_iter *i)
929 {
930         struct pipe_inode_info *pipe = i->pipe;
931         unsigned int p_tail = pipe->tail;
932         unsigned int p_head = pipe->head;
933         unsigned int p_mask = pipe->ring_size - 1;
934
935         if (!pipe_empty(p_head, p_tail)) {
936                 struct pipe_buffer *buf;
937                 unsigned int i_head = i->head;
938                 size_t off = i->iov_offset;
939
940                 if (off) {
941                         buf = &pipe->bufs[i_head & p_mask];
942                         buf->len = off - buf->offset;
943                         i_head++;
944                 }
945                 while (p_head != i_head) {
946                         p_head--;
947                         pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
948                 }
949
950                 pipe->head = p_head;
951         }
952 }
953
954 static void pipe_advance(struct iov_iter *i, size_t size)
955 {
956         struct pipe_inode_info *pipe = i->pipe;
957         if (size) {
958                 struct pipe_buffer *buf;
959                 unsigned int p_mask = pipe->ring_size - 1;
960                 unsigned int i_head = i->head;
961                 size_t off = i->iov_offset, left = size;
962
963                 if (off) /* make it relative to the beginning of buffer */
964                         left += off - pipe->bufs[i_head & p_mask].offset;
965                 while (1) {
966                         buf = &pipe->bufs[i_head & p_mask];
967                         if (left <= buf->len)
968                                 break;
969                         left -= buf->len;
970                         i_head++;
971                 }
972                 i->head = i_head;
973                 i->iov_offset = buf->offset + left;
974         }
975         i->count -= size;
976         /* ... and discard everything past that point */
977         pipe_truncate(i);
978 }
979
980 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
981 {
982         struct bvec_iter bi;
983
984         bi.bi_size = i->count;
985         bi.bi_bvec_done = i->iov_offset;
986         bi.bi_idx = 0;
987         bvec_iter_advance(i->bvec, &bi, size);
988
989         i->bvec += bi.bi_idx;
990         i->nr_segs -= bi.bi_idx;
991         i->count = bi.bi_size;
992         i->iov_offset = bi.bi_bvec_done;
993 }
994
995 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
996 {
997         const struct iovec *iov, *end;
998
999         if (!i->count)
1000                 return;
1001         i->count -= size;
1002
1003         size += i->iov_offset; // from beginning of current segment
1004         for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1005                 if (likely(size < iov->iov_len))
1006                         break;
1007                 size -= iov->iov_len;
1008         }
1009         i->iov_offset = size;
1010         i->nr_segs -= iov - i->iov;
1011         i->iov = iov;
1012 }
1013
1014 void iov_iter_advance(struct iov_iter *i, size_t size)
1015 {
1016         if (unlikely(i->count < size))
1017                 size = i->count;
1018         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1019                 /* iovec and kvec have identical layouts */
1020                 iov_iter_iovec_advance(i, size);
1021         } else if (iov_iter_is_bvec(i)) {
1022                 iov_iter_bvec_advance(i, size);
1023         } else if (iov_iter_is_pipe(i)) {
1024                 pipe_advance(i, size);
1025         } else if (unlikely(iov_iter_is_xarray(i))) {
1026                 i->iov_offset += size;
1027                 i->count -= size;
1028         } else if (iov_iter_is_discard(i)) {
1029                 i->count -= size;
1030         }
1031 }
1032 EXPORT_SYMBOL(iov_iter_advance);
1033
1034 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1035 {
1036         if (!unroll)
1037                 return;
1038         if (WARN_ON(unroll > MAX_RW_COUNT))
1039                 return;
1040         i->count += unroll;
1041         if (unlikely(iov_iter_is_pipe(i))) {
1042                 struct pipe_inode_info *pipe = i->pipe;
1043                 unsigned int p_mask = pipe->ring_size - 1;
1044                 unsigned int i_head = i->head;
1045                 size_t off = i->iov_offset;
1046                 while (1) {
1047                         struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1048                         size_t n = off - b->offset;
1049                         if (unroll < n) {
1050                                 off -= unroll;
1051                                 break;
1052                         }
1053                         unroll -= n;
1054                         if (!unroll && i_head == i->start_head) {
1055                                 off = 0;
1056                                 break;
1057                         }
1058                         i_head--;
1059                         b = &pipe->bufs[i_head & p_mask];
1060                         off = b->offset + b->len;
1061                 }
1062                 i->iov_offset = off;
1063                 i->head = i_head;
1064                 pipe_truncate(i);
1065                 return;
1066         }
1067         if (unlikely(iov_iter_is_discard(i)))
1068                 return;
1069         if (unroll <= i->iov_offset) {
1070                 i->iov_offset -= unroll;
1071                 return;
1072         }
1073         unroll -= i->iov_offset;
1074         if (iov_iter_is_xarray(i)) {
1075                 BUG(); /* We should never go beyond the start of the specified
1076                         * range since we might then be straying into pages that
1077                         * aren't pinned.
1078                         */
1079         } else if (iov_iter_is_bvec(i)) {
1080                 const struct bio_vec *bvec = i->bvec;
1081                 while (1) {
1082                         size_t n = (--bvec)->bv_len;
1083                         i->nr_segs++;
1084                         if (unroll <= n) {
1085                                 i->bvec = bvec;
1086                                 i->iov_offset = n - unroll;
1087                                 return;
1088                         }
1089                         unroll -= n;
1090                 }
1091         } else { /* same logics for iovec and kvec */
1092                 const struct iovec *iov = i->iov;
1093                 while (1) {
1094                         size_t n = (--iov)->iov_len;
1095                         i->nr_segs++;
1096                         if (unroll <= n) {
1097                                 i->iov = iov;
1098                                 i->iov_offset = n - unroll;
1099                                 return;
1100                         }
1101                         unroll -= n;
1102                 }
1103         }
1104 }
1105 EXPORT_SYMBOL(iov_iter_revert);
1106
1107 /*
1108  * Return the count of just the current iov_iter segment.
1109  */
1110 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1111 {
1112         if (i->nr_segs > 1) {
1113                 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1114                         return min(i->count, i->iov->iov_len - i->iov_offset);
1115                 if (iov_iter_is_bvec(i))
1116                         return min(i->count, i->bvec->bv_len - i->iov_offset);
1117         }
1118         return i->count;
1119 }
1120 EXPORT_SYMBOL(iov_iter_single_seg_count);
1121
1122 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1123                         const struct kvec *kvec, unsigned long nr_segs,
1124                         size_t count)
1125 {
1126         WARN_ON(direction & ~(READ | WRITE));
1127         *i = (struct iov_iter){
1128                 .iter_type = ITER_KVEC,
1129                 .data_source = direction,
1130                 .kvec = kvec,
1131                 .nr_segs = nr_segs,
1132                 .iov_offset = 0,
1133                 .count = count
1134         };
1135 }
1136 EXPORT_SYMBOL(iov_iter_kvec);
1137
1138 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1139                         const struct bio_vec *bvec, unsigned long nr_segs,
1140                         size_t count)
1141 {
1142         WARN_ON(direction & ~(READ | WRITE));
1143         *i = (struct iov_iter){
1144                 .iter_type = ITER_BVEC,
1145                 .data_source = direction,
1146                 .bvec = bvec,
1147                 .nr_segs = nr_segs,
1148                 .iov_offset = 0,
1149                 .count = count
1150         };
1151 }
1152 EXPORT_SYMBOL(iov_iter_bvec);
1153
1154 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1155                         struct pipe_inode_info *pipe,
1156                         size_t count)
1157 {
1158         BUG_ON(direction != READ);
1159         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1160         *i = (struct iov_iter){
1161                 .iter_type = ITER_PIPE,
1162                 .data_source = false,
1163                 .pipe = pipe,
1164                 .head = pipe->head,
1165                 .start_head = pipe->head,
1166                 .iov_offset = 0,
1167                 .count = count
1168         };
1169 }
1170 EXPORT_SYMBOL(iov_iter_pipe);
1171
1172 /**
1173  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1174  * @i: The iterator to initialise.
1175  * @direction: The direction of the transfer.
1176  * @xarray: The xarray to access.
1177  * @start: The start file position.
1178  * @count: The size of the I/O buffer in bytes.
1179  *
1180  * Set up an I/O iterator to either draw data out of the pages attached to an
1181  * inode or to inject data into those pages.  The pages *must* be prevented
1182  * from evaporation, either by taking a ref on them or locking them by the
1183  * caller.
1184  */
1185 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1186                      struct xarray *xarray, loff_t start, size_t count)
1187 {
1188         BUG_ON(direction & ~1);
1189         *i = (struct iov_iter) {
1190                 .iter_type = ITER_XARRAY,
1191                 .data_source = direction,
1192                 .xarray = xarray,
1193                 .xarray_start = start,
1194                 .count = count,
1195                 .iov_offset = 0
1196         };
1197 }
1198 EXPORT_SYMBOL(iov_iter_xarray);
1199
1200 /**
1201  * iov_iter_discard - Initialise an I/O iterator that discards data
1202  * @i: The iterator to initialise.
1203  * @direction: The direction of the transfer.
1204  * @count: The size of the I/O buffer in bytes.
1205  *
1206  * Set up an I/O iterator that just discards everything that's written to it.
1207  * It's only available as a READ iterator.
1208  */
1209 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1210 {
1211         BUG_ON(direction != READ);
1212         *i = (struct iov_iter){
1213                 .iter_type = ITER_DISCARD,
1214                 .data_source = false,
1215                 .count = count,
1216                 .iov_offset = 0
1217         };
1218 }
1219 EXPORT_SYMBOL(iov_iter_discard);
1220
1221 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1222 {
1223         unsigned long res = 0;
1224         size_t size = i->count;
1225         size_t skip = i->iov_offset;
1226         unsigned k;
1227
1228         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1229                 size_t len = i->iov[k].iov_len - skip;
1230                 if (len) {
1231                         res |= (unsigned long)i->iov[k].iov_base + skip;
1232                         if (len > size)
1233                                 len = size;
1234                         res |= len;
1235                         size -= len;
1236                         if (!size)
1237                                 break;
1238                 }
1239         }
1240         return res;
1241 }
1242
1243 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1244 {
1245         unsigned res = 0;
1246         size_t size = i->count;
1247         unsigned skip = i->iov_offset;
1248         unsigned k;
1249
1250         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1251                 size_t len = i->bvec[k].bv_len - skip;
1252                 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1253                 if (len > size)
1254                         len = size;
1255                 res |= len;
1256                 size -= len;
1257                 if (!size)
1258                         break;
1259         }
1260         return res;
1261 }
1262
1263 unsigned long iov_iter_alignment(const struct iov_iter *i)
1264 {
1265         /* iovec and kvec have identical layouts */
1266         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1267                 return iov_iter_alignment_iovec(i);
1268
1269         if (iov_iter_is_bvec(i))
1270                 return iov_iter_alignment_bvec(i);
1271
1272         if (iov_iter_is_pipe(i)) {
1273                 unsigned int p_mask = i->pipe->ring_size - 1;
1274                 size_t size = i->count;
1275
1276                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1277                         return size | i->iov_offset;
1278                 return size;
1279         }
1280
1281         if (iov_iter_is_xarray(i))
1282                 return (i->xarray_start + i->iov_offset) | i->count;
1283
1284         return 0;
1285 }
1286 EXPORT_SYMBOL(iov_iter_alignment);
1287
1288 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1289 {
1290         unsigned long res = 0;
1291         unsigned long v = 0;
1292         size_t size = i->count;
1293         unsigned k;
1294
1295         if (WARN_ON(!iter_is_iovec(i)))
1296                 return ~0U;
1297
1298         for (k = 0; k < i->nr_segs; k++) {
1299                 if (i->iov[k].iov_len) {
1300                         unsigned long base = (unsigned long)i->iov[k].iov_base;
1301                         if (v) // if not the first one
1302                                 res |= base | v; // this start | previous end
1303                         v = base + i->iov[k].iov_len;
1304                         if (size <= i->iov[k].iov_len)
1305                                 break;
1306                         size -= i->iov[k].iov_len;
1307                 }
1308         }
1309         return res;
1310 }
1311 EXPORT_SYMBOL(iov_iter_gap_alignment);
1312
1313 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1314                                 size_t maxsize,
1315                                 struct page **pages,
1316                                 int iter_head,
1317                                 size_t *start)
1318 {
1319         struct pipe_inode_info *pipe = i->pipe;
1320         unsigned int p_mask = pipe->ring_size - 1;
1321         ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1322         if (!n)
1323                 return -EFAULT;
1324
1325         maxsize = n;
1326         n += *start;
1327         while (n > 0) {
1328                 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1329                 iter_head++;
1330                 n -= PAGE_SIZE;
1331         }
1332
1333         return maxsize;
1334 }
1335
1336 static ssize_t pipe_get_pages(struct iov_iter *i,
1337                    struct page **pages, size_t maxsize, unsigned maxpages,
1338                    size_t *start)
1339 {
1340         unsigned int iter_head, npages;
1341         size_t capacity;
1342
1343         if (!sanity(i))
1344                 return -EFAULT;
1345
1346         data_start(i, &iter_head, start);
1347         /* Amount of free space: some of this one + all after this one */
1348         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1349         capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1350
1351         return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1352 }
1353
1354 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1355                                           pgoff_t index, unsigned int nr_pages)
1356 {
1357         XA_STATE(xas, xa, index);
1358         struct page *page;
1359         unsigned int ret = 0;
1360
1361         rcu_read_lock();
1362         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1363                 if (xas_retry(&xas, page))
1364                         continue;
1365
1366                 /* Has the page moved or been split? */
1367                 if (unlikely(page != xas_reload(&xas))) {
1368                         xas_reset(&xas);
1369                         continue;
1370                 }
1371
1372                 pages[ret] = find_subpage(page, xas.xa_index);
1373                 get_page(pages[ret]);
1374                 if (++ret == nr_pages)
1375                         break;
1376         }
1377         rcu_read_unlock();
1378         return ret;
1379 }
1380
1381 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1382                                      struct page **pages, size_t maxsize,
1383                                      unsigned maxpages, size_t *_start_offset)
1384 {
1385         unsigned nr, offset;
1386         pgoff_t index, count;
1387         size_t size = maxsize, actual;
1388         loff_t pos;
1389
1390         if (!size || !maxpages)
1391                 return 0;
1392
1393         pos = i->xarray_start + i->iov_offset;
1394         index = pos >> PAGE_SHIFT;
1395         offset = pos & ~PAGE_MASK;
1396         *_start_offset = offset;
1397
1398         count = 1;
1399         if (size > PAGE_SIZE - offset) {
1400                 size -= PAGE_SIZE - offset;
1401                 count += size >> PAGE_SHIFT;
1402                 size &= ~PAGE_MASK;
1403                 if (size)
1404                         count++;
1405         }
1406
1407         if (count > maxpages)
1408                 count = maxpages;
1409
1410         nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1411         if (nr == 0)
1412                 return 0;
1413
1414         actual = PAGE_SIZE * nr;
1415         actual -= offset;
1416         if (nr == count && size > 0) {
1417                 unsigned last_offset = (nr > 1) ? 0 : offset;
1418                 actual -= PAGE_SIZE - (last_offset + size);
1419         }
1420         return actual;
1421 }
1422
1423 /* must be done on non-empty ITER_IOVEC one */
1424 static unsigned long first_iovec_segment(const struct iov_iter *i,
1425                                          size_t *size, size_t *start,
1426                                          size_t maxsize, unsigned maxpages)
1427 {
1428         size_t skip;
1429         long k;
1430
1431         for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1432                 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1433                 size_t len = i->iov[k].iov_len - skip;
1434
1435                 if (unlikely(!len))
1436                         continue;
1437                 if (len > maxsize)
1438                         len = maxsize;
1439                 len += (*start = addr % PAGE_SIZE);
1440                 if (len > maxpages * PAGE_SIZE)
1441                         len = maxpages * PAGE_SIZE;
1442                 *size = len;
1443                 return addr & PAGE_MASK;
1444         }
1445         BUG(); // if it had been empty, we wouldn't get called
1446 }
1447
1448 /* must be done on non-empty ITER_BVEC one */
1449 static struct page *first_bvec_segment(const struct iov_iter *i,
1450                                        size_t *size, size_t *start,
1451                                        size_t maxsize, unsigned maxpages)
1452 {
1453         struct page *page;
1454         size_t skip = i->iov_offset, len;
1455
1456         len = i->bvec->bv_len - skip;
1457         if (len > maxsize)
1458                 len = maxsize;
1459         skip += i->bvec->bv_offset;
1460         page = i->bvec->bv_page + skip / PAGE_SIZE;
1461         len += (*start = skip % PAGE_SIZE);
1462         if (len > maxpages * PAGE_SIZE)
1463                 len = maxpages * PAGE_SIZE;
1464         *size = len;
1465         return page;
1466 }
1467
1468 ssize_t iov_iter_get_pages(struct iov_iter *i,
1469                    struct page **pages, size_t maxsize, unsigned maxpages,
1470                    size_t *start)
1471 {
1472         size_t len;
1473         int n, res;
1474
1475         if (maxsize > i->count)
1476                 maxsize = i->count;
1477         if (!maxsize)
1478                 return 0;
1479
1480         if (likely(iter_is_iovec(i))) {
1481                 unsigned long addr;
1482
1483                 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1484                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1485                 res = get_user_pages_fast(addr, n,
1486                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
1487                                 pages);
1488                 if (unlikely(res < 0))
1489                         return res;
1490                 return (res == n ? len : res * PAGE_SIZE) - *start;
1491         }
1492         if (iov_iter_is_bvec(i)) {
1493                 struct page *page;
1494
1495                 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1496                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1497                 while (n--)
1498                         get_page(*pages++ = page++);
1499                 return len - *start;
1500         }
1501         if (iov_iter_is_pipe(i))
1502                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1503         if (iov_iter_is_xarray(i))
1504                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1505         return -EFAULT;
1506 }
1507 EXPORT_SYMBOL(iov_iter_get_pages);
1508
1509 static struct page **get_pages_array(size_t n)
1510 {
1511         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1512 }
1513
1514 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1515                    struct page ***pages, size_t maxsize,
1516                    size_t *start)
1517 {
1518         struct page **p;
1519         unsigned int iter_head, npages;
1520         ssize_t n;
1521
1522         if (!sanity(i))
1523                 return -EFAULT;
1524
1525         data_start(i, &iter_head, start);
1526         /* Amount of free space: some of this one + all after this one */
1527         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1528         n = npages * PAGE_SIZE - *start;
1529         if (maxsize > n)
1530                 maxsize = n;
1531         else
1532                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1533         p = get_pages_array(npages);
1534         if (!p)
1535                 return -ENOMEM;
1536         n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1537         if (n > 0)
1538                 *pages = p;
1539         else
1540                 kvfree(p);
1541         return n;
1542 }
1543
1544 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1545                                            struct page ***pages, size_t maxsize,
1546                                            size_t *_start_offset)
1547 {
1548         struct page **p;
1549         unsigned nr, offset;
1550         pgoff_t index, count;
1551         size_t size = maxsize, actual;
1552         loff_t pos;
1553
1554         if (!size)
1555                 return 0;
1556
1557         pos = i->xarray_start + i->iov_offset;
1558         index = pos >> PAGE_SHIFT;
1559         offset = pos & ~PAGE_MASK;
1560         *_start_offset = offset;
1561
1562         count = 1;
1563         if (size > PAGE_SIZE - offset) {
1564                 size -= PAGE_SIZE - offset;
1565                 count += size >> PAGE_SHIFT;
1566                 size &= ~PAGE_MASK;
1567                 if (size)
1568                         count++;
1569         }
1570
1571         p = get_pages_array(count);
1572         if (!p)
1573                 return -ENOMEM;
1574         *pages = p;
1575
1576         nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1577         if (nr == 0)
1578                 return 0;
1579
1580         actual = PAGE_SIZE * nr;
1581         actual -= offset;
1582         if (nr == count && size > 0) {
1583                 unsigned last_offset = (nr > 1) ? 0 : offset;
1584                 actual -= PAGE_SIZE - (last_offset + size);
1585         }
1586         return actual;
1587 }
1588
1589 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1590                    struct page ***pages, size_t maxsize,
1591                    size_t *start)
1592 {
1593         struct page **p;
1594         size_t len;
1595         int n, res;
1596
1597         if (maxsize > i->count)
1598                 maxsize = i->count;
1599         if (!maxsize)
1600                 return 0;
1601
1602         if (likely(iter_is_iovec(i))) {
1603                 unsigned long addr;
1604
1605                 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1606                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1607                 p = get_pages_array(n);
1608                 if (!p)
1609                         return -ENOMEM;
1610                 res = get_user_pages_fast(addr, n,
1611                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1612                 if (unlikely(res < 0)) {
1613                         kvfree(p);
1614                         return res;
1615                 }
1616                 *pages = p;
1617                 return (res == n ? len : res * PAGE_SIZE) - *start;
1618         }
1619         if (iov_iter_is_bvec(i)) {
1620                 struct page *page;
1621
1622                 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1623                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1624                 *pages = p = get_pages_array(n);
1625                 if (!p)
1626                         return -ENOMEM;
1627                 while (n--)
1628                         get_page(*p++ = page++);
1629                 return len - *start;
1630         }
1631         if (iov_iter_is_pipe(i))
1632                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1633         if (iov_iter_is_xarray(i))
1634                 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1635         return -EFAULT;
1636 }
1637 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1638
1639 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1640                                struct iov_iter *i)
1641 {
1642         __wsum sum, next;
1643         sum = *csum;
1644         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1645                 WARN_ON(1);
1646                 return 0;
1647         }
1648         iterate_and_advance(i, bytes, base, len, off, ({
1649                 next = csum_and_copy_from_user(base, addr + off, len);
1650                 sum = csum_block_add(sum, next, off);
1651                 next ? 0 : len;
1652         }), ({
1653                 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1654         })
1655         )
1656         *csum = sum;
1657         return bytes;
1658 }
1659 EXPORT_SYMBOL(csum_and_copy_from_iter);
1660
1661 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1662                              struct iov_iter *i)
1663 {
1664         struct csum_state *csstate = _csstate;
1665         __wsum sum, next;
1666
1667         if (unlikely(iov_iter_is_discard(i))) {
1668                 WARN_ON(1);     /* for now */
1669                 return 0;
1670         }
1671
1672         sum = csum_shift(csstate->csum, csstate->off);
1673         if (unlikely(iov_iter_is_pipe(i)))
1674                 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1675         else iterate_and_advance(i, bytes, base, len, off, ({
1676                 next = csum_and_copy_to_user(addr + off, base, len);
1677                 sum = csum_block_add(sum, next, off);
1678                 next ? 0 : len;
1679         }), ({
1680                 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1681         })
1682         )
1683         csstate->csum = csum_shift(sum, csstate->off);
1684         csstate->off += bytes;
1685         return bytes;
1686 }
1687 EXPORT_SYMBOL(csum_and_copy_to_iter);
1688
1689 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1690                 struct iov_iter *i)
1691 {
1692 #ifdef CONFIG_CRYPTO_HASH
1693         struct ahash_request *hash = hashp;
1694         struct scatterlist sg;
1695         size_t copied;
1696
1697         copied = copy_to_iter(addr, bytes, i);
1698         sg_init_one(&sg, addr, copied);
1699         ahash_request_set_crypt(hash, &sg, NULL, copied);
1700         crypto_ahash_update(hash);
1701         return copied;
1702 #else
1703         return 0;
1704 #endif
1705 }
1706 EXPORT_SYMBOL(hash_and_copy_to_iter);
1707
1708 static int iov_npages(const struct iov_iter *i, int maxpages)
1709 {
1710         size_t skip = i->iov_offset, size = i->count;
1711         const struct iovec *p;
1712         int npages = 0;
1713
1714         for (p = i->iov; size; skip = 0, p++) {
1715                 unsigned offs = offset_in_page(p->iov_base + skip);
1716                 size_t len = min(p->iov_len - skip, size);
1717
1718                 if (len) {
1719                         size -= len;
1720                         npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1721                         if (unlikely(npages > maxpages))
1722                                 return maxpages;
1723                 }
1724         }
1725         return npages;
1726 }
1727
1728 static int bvec_npages(const struct iov_iter *i, int maxpages)
1729 {
1730         size_t skip = i->iov_offset, size = i->count;
1731         const struct bio_vec *p;
1732         int npages = 0;
1733
1734         for (p = i->bvec; size; skip = 0, p++) {
1735                 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1736                 size_t len = min(p->bv_len - skip, size);
1737
1738                 size -= len;
1739                 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1740                 if (unlikely(npages > maxpages))
1741                         return maxpages;
1742         }
1743         return npages;
1744 }
1745
1746 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1747 {
1748         if (unlikely(!i->count))
1749                 return 0;
1750         /* iovec and kvec have identical layouts */
1751         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1752                 return iov_npages(i, maxpages);
1753         if (iov_iter_is_bvec(i))
1754                 return bvec_npages(i, maxpages);
1755         if (iov_iter_is_pipe(i)) {
1756                 unsigned int iter_head;
1757                 int npages;
1758                 size_t off;
1759
1760                 if (!sanity(i))
1761                         return 0;
1762
1763                 data_start(i, &iter_head, &off);
1764                 /* some of this one + all after this one */
1765                 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1766                 return min(npages, maxpages);
1767         }
1768         if (iov_iter_is_xarray(i)) {
1769                 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1770                 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1771                 return min(npages, maxpages);
1772         }
1773         return 0;
1774 }
1775 EXPORT_SYMBOL(iov_iter_npages);
1776
1777 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1778 {
1779         *new = *old;
1780         if (unlikely(iov_iter_is_pipe(new))) {
1781                 WARN_ON(1);
1782                 return NULL;
1783         }
1784         if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1785                 return NULL;
1786         if (iov_iter_is_bvec(new))
1787                 return new->bvec = kmemdup(new->bvec,
1788                                     new->nr_segs * sizeof(struct bio_vec),
1789                                     flags);
1790         else
1791                 /* iovec and kvec have identical layout */
1792                 return new->iov = kmemdup(new->iov,
1793                                    new->nr_segs * sizeof(struct iovec),
1794                                    flags);
1795 }
1796 EXPORT_SYMBOL(dup_iter);
1797
1798 static int copy_compat_iovec_from_user(struct iovec *iov,
1799                 const struct iovec __user *uvec, unsigned long nr_segs)
1800 {
1801         const struct compat_iovec __user *uiov =
1802                 (const struct compat_iovec __user *)uvec;
1803         int ret = -EFAULT, i;
1804
1805         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1806                 return -EFAULT;
1807
1808         for (i = 0; i < nr_segs; i++) {
1809                 compat_uptr_t buf;
1810                 compat_ssize_t len;
1811
1812                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1813                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1814
1815                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1816                 if (len < 0) {
1817                         ret = -EINVAL;
1818                         goto uaccess_end;
1819                 }
1820                 iov[i].iov_base = compat_ptr(buf);
1821                 iov[i].iov_len = len;
1822         }
1823
1824         ret = 0;
1825 uaccess_end:
1826         user_access_end();
1827         return ret;
1828 }
1829
1830 static int copy_iovec_from_user(struct iovec *iov,
1831                 const struct iovec __user *uvec, unsigned long nr_segs)
1832 {
1833         unsigned long seg;
1834
1835         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1836                 return -EFAULT;
1837         for (seg = 0; seg < nr_segs; seg++) {
1838                 if ((ssize_t)iov[seg].iov_len < 0)
1839                         return -EINVAL;
1840         }
1841
1842         return 0;
1843 }
1844
1845 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1846                 unsigned long nr_segs, unsigned long fast_segs,
1847                 struct iovec *fast_iov, bool compat)
1848 {
1849         struct iovec *iov = fast_iov;
1850         int ret;
1851
1852         /*
1853          * SuS says "The readv() function *may* fail if the iovcnt argument was
1854          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1855          * traditionally returned zero for zero segments, so...
1856          */
1857         if (nr_segs == 0)
1858                 return iov;
1859         if (nr_segs > UIO_MAXIOV)
1860                 return ERR_PTR(-EINVAL);
1861         if (nr_segs > fast_segs) {
1862                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1863                 if (!iov)
1864                         return ERR_PTR(-ENOMEM);
1865         }
1866
1867         if (compat)
1868                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1869         else
1870                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1871         if (ret) {
1872                 if (iov != fast_iov)
1873                         kfree(iov);
1874                 return ERR_PTR(ret);
1875         }
1876
1877         return iov;
1878 }
1879
1880 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1881                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1882                  struct iov_iter *i, bool compat)
1883 {
1884         ssize_t total_len = 0;
1885         unsigned long seg;
1886         struct iovec *iov;
1887
1888         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1889         if (IS_ERR(iov)) {
1890                 *iovp = NULL;
1891                 return PTR_ERR(iov);
1892         }
1893
1894         /*
1895          * According to the Single Unix Specification we should return EINVAL if
1896          * an element length is < 0 when cast to ssize_t or if the total length
1897          * would overflow the ssize_t return value of the system call.
1898          *
1899          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1900          * overflow case.
1901          */
1902         for (seg = 0; seg < nr_segs; seg++) {
1903                 ssize_t len = (ssize_t)iov[seg].iov_len;
1904
1905                 if (!access_ok(iov[seg].iov_base, len)) {
1906                         if (iov != *iovp)
1907                                 kfree(iov);
1908                         *iovp = NULL;
1909                         return -EFAULT;
1910                 }
1911
1912                 if (len > MAX_RW_COUNT - total_len) {
1913                         len = MAX_RW_COUNT - total_len;
1914                         iov[seg].iov_len = len;
1915                 }
1916                 total_len += len;
1917         }
1918
1919         iov_iter_init(i, type, iov, nr_segs, total_len);
1920         if (iov == *iovp)
1921                 *iovp = NULL;
1922         else
1923                 *iovp = iov;
1924         return total_len;
1925 }
1926
1927 /**
1928  * import_iovec() - Copy an array of &struct iovec from userspace
1929  *     into the kernel, check that it is valid, and initialize a new
1930  *     &struct iov_iter iterator to access it.
1931  *
1932  * @type: One of %READ or %WRITE.
1933  * @uvec: Pointer to the userspace array.
1934  * @nr_segs: Number of elements in userspace array.
1935  * @fast_segs: Number of elements in @iov.
1936  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1937  *     on-stack) kernel array.
1938  * @i: Pointer to iterator that will be initialized on success.
1939  *
1940  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1941  * then this function places %NULL in *@iov on return. Otherwise, a new
1942  * array will be allocated and the result placed in *@iov. This means that
1943  * the caller may call kfree() on *@iov regardless of whether the small
1944  * on-stack array was used or not (and regardless of whether this function
1945  * returns an error or not).
1946  *
1947  * Return: Negative error code on error, bytes imported on success
1948  */
1949 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1950                  unsigned nr_segs, unsigned fast_segs,
1951                  struct iovec **iovp, struct iov_iter *i)
1952 {
1953         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1954                               in_compat_syscall());
1955 }
1956 EXPORT_SYMBOL(import_iovec);
1957
1958 int import_single_range(int rw, void __user *buf, size_t len,
1959                  struct iovec *iov, struct iov_iter *i)
1960 {
1961         if (len > MAX_RW_COUNT)
1962                 len = MAX_RW_COUNT;
1963         if (unlikely(!access_ok(buf, len)))
1964                 return -EFAULT;
1965
1966         iov->iov_base = buf;
1967         iov->iov_len = len;
1968         iov_iter_init(i, rw, iov, 1, len);
1969         return 0;
1970 }
1971 EXPORT_SYMBOL(import_single_range);