Merge tag 'net-next-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev...
[linux-2.6-microblaze.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/uio.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/splice.h>
10 #include <linux/compat.h>
11 #include <net/checksum.h>
12 #include <linux/scatterlist.h>
13 #include <linux/instrumented.h>
14
15 #define PIPE_PARANOIA /* for now */
16
17 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
18         size_t left;                                    \
19         size_t wanted = n;                              \
20         __p = i->iov;                                   \
21         __v.iov_len = min(n, __p->iov_len - skip);      \
22         if (likely(__v.iov_len)) {                      \
23                 __v.iov_base = __p->iov_base + skip;    \
24                 left = (STEP);                          \
25                 __v.iov_len -= left;                    \
26                 skip += __v.iov_len;                    \
27                 n -= __v.iov_len;                       \
28         } else {                                        \
29                 left = 0;                               \
30         }                                               \
31         while (unlikely(!left && n)) {                  \
32                 __p++;                                  \
33                 __v.iov_len = min(n, __p->iov_len);     \
34                 if (unlikely(!__v.iov_len))             \
35                         continue;                       \
36                 __v.iov_base = __p->iov_base;           \
37                 left = (STEP);                          \
38                 __v.iov_len -= left;                    \
39                 skip = __v.iov_len;                     \
40                 n -= __v.iov_len;                       \
41         }                                               \
42         n = wanted - n;                                 \
43 }
44
45 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
46         size_t wanted = n;                              \
47         __p = i->kvec;                                  \
48         __v.iov_len = min(n, __p->iov_len - skip);      \
49         if (likely(__v.iov_len)) {                      \
50                 __v.iov_base = __p->iov_base + skip;    \
51                 (void)(STEP);                           \
52                 skip += __v.iov_len;                    \
53                 n -= __v.iov_len;                       \
54         }                                               \
55         while (unlikely(n)) {                           \
56                 __p++;                                  \
57                 __v.iov_len = min(n, __p->iov_len);     \
58                 if (unlikely(!__v.iov_len))             \
59                         continue;                       \
60                 __v.iov_base = __p->iov_base;           \
61                 (void)(STEP);                           \
62                 skip = __v.iov_len;                     \
63                 n -= __v.iov_len;                       \
64         }                                               \
65         n = wanted;                                     \
66 }
67
68 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
69         struct bvec_iter __start;                       \
70         __start.bi_size = n;                            \
71         __start.bi_bvec_done = skip;                    \
72         __start.bi_idx = 0;                             \
73         for_each_bvec(__v, i->bvec, __bi, __start) {    \
74                 if (!__v.bv_len)                        \
75                         continue;                       \
76                 (void)(STEP);                           \
77         }                                               \
78 }
79
80 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
81         if (likely(n)) {                                        \
82                 size_t skip = i->iov_offset;                    \
83                 if (unlikely(i->type & ITER_BVEC)) {            \
84                         struct bio_vec v;                       \
85                         struct bvec_iter __bi;                  \
86                         iterate_bvec(i, n, v, __bi, skip, (B))  \
87                 } else if (unlikely(i->type & ITER_KVEC)) {     \
88                         const struct kvec *kvec;                \
89                         struct kvec v;                          \
90                         iterate_kvec(i, n, v, kvec, skip, (K))  \
91                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
92                 } else {                                        \
93                         const struct iovec *iov;                \
94                         struct iovec v;                         \
95                         iterate_iovec(i, n, v, iov, skip, (I))  \
96                 }                                               \
97         }                                                       \
98 }
99
100 #define iterate_and_advance(i, n, v, I, B, K) {                 \
101         if (unlikely(i->count < n))                             \
102                 n = i->count;                                   \
103         if (i->count) {                                         \
104                 size_t skip = i->iov_offset;                    \
105                 if (unlikely(i->type & ITER_BVEC)) {            \
106                         const struct bio_vec *bvec = i->bvec;   \
107                         struct bio_vec v;                       \
108                         struct bvec_iter __bi;                  \
109                         iterate_bvec(i, n, v, __bi, skip, (B))  \
110                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
111                         i->nr_segs -= i->bvec - bvec;           \
112                         skip = __bi.bi_bvec_done;               \
113                 } else if (unlikely(i->type & ITER_KVEC)) {     \
114                         const struct kvec *kvec;                \
115                         struct kvec v;                          \
116                         iterate_kvec(i, n, v, kvec, skip, (K))  \
117                         if (skip == kvec->iov_len) {            \
118                                 kvec++;                         \
119                                 skip = 0;                       \
120                         }                                       \
121                         i->nr_segs -= kvec - i->kvec;           \
122                         i->kvec = kvec;                         \
123                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
124                         skip += n;                              \
125                 } else {                                        \
126                         const struct iovec *iov;                \
127                         struct iovec v;                         \
128                         iterate_iovec(i, n, v, iov, skip, (I))  \
129                         if (skip == iov->iov_len) {             \
130                                 iov++;                          \
131                                 skip = 0;                       \
132                         }                                       \
133                         i->nr_segs -= iov - i->iov;             \
134                         i->iov = iov;                           \
135                 }                                               \
136                 i->count -= n;                                  \
137                 i->iov_offset = skip;                           \
138         }                                                       \
139 }
140
141 static int copyout(void __user *to, const void *from, size_t n)
142 {
143         if (access_ok(to, n)) {
144                 instrument_copy_to_user(to, from, n);
145                 n = raw_copy_to_user(to, from, n);
146         }
147         return n;
148 }
149
150 static int copyin(void *to, const void __user *from, size_t n)
151 {
152         if (access_ok(from, n)) {
153                 instrument_copy_from_user(to, from, n);
154                 n = raw_copy_from_user(to, from, n);
155         }
156         return n;
157 }
158
159 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
160                          struct iov_iter *i)
161 {
162         size_t skip, copy, left, wanted;
163         const struct iovec *iov;
164         char __user *buf;
165         void *kaddr, *from;
166
167         if (unlikely(bytes > i->count))
168                 bytes = i->count;
169
170         if (unlikely(!bytes))
171                 return 0;
172
173         might_fault();
174         wanted = bytes;
175         iov = i->iov;
176         skip = i->iov_offset;
177         buf = iov->iov_base + skip;
178         copy = min(bytes, iov->iov_len - skip);
179
180         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
181                 kaddr = kmap_atomic(page);
182                 from = kaddr + offset;
183
184                 /* first chunk, usually the only one */
185                 left = copyout(buf, from, copy);
186                 copy -= left;
187                 skip += copy;
188                 from += copy;
189                 bytes -= copy;
190
191                 while (unlikely(!left && bytes)) {
192                         iov++;
193                         buf = iov->iov_base;
194                         copy = min(bytes, iov->iov_len);
195                         left = copyout(buf, from, copy);
196                         copy -= left;
197                         skip = copy;
198                         from += copy;
199                         bytes -= copy;
200                 }
201                 if (likely(!bytes)) {
202                         kunmap_atomic(kaddr);
203                         goto done;
204                 }
205                 offset = from - kaddr;
206                 buf += copy;
207                 kunmap_atomic(kaddr);
208                 copy = min(bytes, iov->iov_len - skip);
209         }
210         /* Too bad - revert to non-atomic kmap */
211
212         kaddr = kmap(page);
213         from = kaddr + offset;
214         left = copyout(buf, from, copy);
215         copy -= left;
216         skip += copy;
217         from += copy;
218         bytes -= copy;
219         while (unlikely(!left && bytes)) {
220                 iov++;
221                 buf = iov->iov_base;
222                 copy = min(bytes, iov->iov_len);
223                 left = copyout(buf, from, copy);
224                 copy -= left;
225                 skip = copy;
226                 from += copy;
227                 bytes -= copy;
228         }
229         kunmap(page);
230
231 done:
232         if (skip == iov->iov_len) {
233                 iov++;
234                 skip = 0;
235         }
236         i->count -= wanted - bytes;
237         i->nr_segs -= iov - i->iov;
238         i->iov = iov;
239         i->iov_offset = skip;
240         return wanted - bytes;
241 }
242
243 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
244                          struct iov_iter *i)
245 {
246         size_t skip, copy, left, wanted;
247         const struct iovec *iov;
248         char __user *buf;
249         void *kaddr, *to;
250
251         if (unlikely(bytes > i->count))
252                 bytes = i->count;
253
254         if (unlikely(!bytes))
255                 return 0;
256
257         might_fault();
258         wanted = bytes;
259         iov = i->iov;
260         skip = i->iov_offset;
261         buf = iov->iov_base + skip;
262         copy = min(bytes, iov->iov_len - skip);
263
264         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
265                 kaddr = kmap_atomic(page);
266                 to = kaddr + offset;
267
268                 /* first chunk, usually the only one */
269                 left = copyin(to, buf, copy);
270                 copy -= left;
271                 skip += copy;
272                 to += copy;
273                 bytes -= copy;
274
275                 while (unlikely(!left && bytes)) {
276                         iov++;
277                         buf = iov->iov_base;
278                         copy = min(bytes, iov->iov_len);
279                         left = copyin(to, buf, copy);
280                         copy -= left;
281                         skip = copy;
282                         to += copy;
283                         bytes -= copy;
284                 }
285                 if (likely(!bytes)) {
286                         kunmap_atomic(kaddr);
287                         goto done;
288                 }
289                 offset = to - kaddr;
290                 buf += copy;
291                 kunmap_atomic(kaddr);
292                 copy = min(bytes, iov->iov_len - skip);
293         }
294         /* Too bad - revert to non-atomic kmap */
295
296         kaddr = kmap(page);
297         to = kaddr + offset;
298         left = copyin(to, buf, copy);
299         copy -= left;
300         skip += copy;
301         to += copy;
302         bytes -= copy;
303         while (unlikely(!left && bytes)) {
304                 iov++;
305                 buf = iov->iov_base;
306                 copy = min(bytes, iov->iov_len);
307                 left = copyin(to, buf, copy);
308                 copy -= left;
309                 skip = copy;
310                 to += copy;
311                 bytes -= copy;
312         }
313         kunmap(page);
314
315 done:
316         if (skip == iov->iov_len) {
317                 iov++;
318                 skip = 0;
319         }
320         i->count -= wanted - bytes;
321         i->nr_segs -= iov - i->iov;
322         i->iov = iov;
323         i->iov_offset = skip;
324         return wanted - bytes;
325 }
326
327 #ifdef PIPE_PARANOIA
328 static bool sanity(const struct iov_iter *i)
329 {
330         struct pipe_inode_info *pipe = i->pipe;
331         unsigned int p_head = pipe->head;
332         unsigned int p_tail = pipe->tail;
333         unsigned int p_mask = pipe->ring_size - 1;
334         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
335         unsigned int i_head = i->head;
336         unsigned int idx;
337
338         if (i->iov_offset) {
339                 struct pipe_buffer *p;
340                 if (unlikely(p_occupancy == 0))
341                         goto Bad;       // pipe must be non-empty
342                 if (unlikely(i_head != p_head - 1))
343                         goto Bad;       // must be at the last buffer...
344
345                 p = &pipe->bufs[i_head & p_mask];
346                 if (unlikely(p->offset + p->len != i->iov_offset))
347                         goto Bad;       // ... at the end of segment
348         } else {
349                 if (i_head != p_head)
350                         goto Bad;       // must be right after the last buffer
351         }
352         return true;
353 Bad:
354         printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
355         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
356                         p_head, p_tail, pipe->ring_size);
357         for (idx = 0; idx < pipe->ring_size; idx++)
358                 printk(KERN_ERR "[%p %p %d %d]\n",
359                         pipe->bufs[idx].ops,
360                         pipe->bufs[idx].page,
361                         pipe->bufs[idx].offset,
362                         pipe->bufs[idx].len);
363         WARN_ON(1);
364         return false;
365 }
366 #else
367 #define sanity(i) true
368 #endif
369
370 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
371                          struct iov_iter *i)
372 {
373         struct pipe_inode_info *pipe = i->pipe;
374         struct pipe_buffer *buf;
375         unsigned int p_tail = pipe->tail;
376         unsigned int p_mask = pipe->ring_size - 1;
377         unsigned int i_head = i->head;
378         size_t off;
379
380         if (unlikely(bytes > i->count))
381                 bytes = i->count;
382
383         if (unlikely(!bytes))
384                 return 0;
385
386         if (!sanity(i))
387                 return 0;
388
389         off = i->iov_offset;
390         buf = &pipe->bufs[i_head & p_mask];
391         if (off) {
392                 if (offset == off && buf->page == page) {
393                         /* merge with the last one */
394                         buf->len += bytes;
395                         i->iov_offset += bytes;
396                         goto out;
397                 }
398                 i_head++;
399                 buf = &pipe->bufs[i_head & p_mask];
400         }
401         if (pipe_full(i_head, p_tail, pipe->max_usage))
402                 return 0;
403
404         buf->ops = &page_cache_pipe_buf_ops;
405         get_page(page);
406         buf->page = page;
407         buf->offset = offset;
408         buf->len = bytes;
409
410         pipe->head = i_head + 1;
411         i->iov_offset = offset + bytes;
412         i->head = i_head;
413 out:
414         i->count -= bytes;
415         return bytes;
416 }
417
418 /*
419  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
420  * bytes.  For each iovec, fault in each page that constitutes the iovec.
421  *
422  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
423  * because it is an invalid address).
424  */
425 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
426 {
427         size_t skip = i->iov_offset;
428         const struct iovec *iov;
429         int err;
430         struct iovec v;
431
432         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
433                 iterate_iovec(i, bytes, v, iov, skip, ({
434                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
435                         if (unlikely(err))
436                         return err;
437                 0;}))
438         }
439         return 0;
440 }
441 EXPORT_SYMBOL(iov_iter_fault_in_readable);
442
443 void iov_iter_init(struct iov_iter *i, unsigned int direction,
444                         const struct iovec *iov, unsigned long nr_segs,
445                         size_t count)
446 {
447         WARN_ON(direction & ~(READ | WRITE));
448         direction &= READ | WRITE;
449
450         /* It will get better.  Eventually... */
451         if (uaccess_kernel()) {
452                 i->type = ITER_KVEC | direction;
453                 i->kvec = (struct kvec *)iov;
454         } else {
455                 i->type = ITER_IOVEC | direction;
456                 i->iov = iov;
457         }
458         i->nr_segs = nr_segs;
459         i->iov_offset = 0;
460         i->count = count;
461 }
462 EXPORT_SYMBOL(iov_iter_init);
463
464 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
465 {
466         char *from = kmap_atomic(page);
467         memcpy(to, from + offset, len);
468         kunmap_atomic(from);
469 }
470
471 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
472 {
473         char *to = kmap_atomic(page);
474         memcpy(to + offset, from, len);
475         kunmap_atomic(to);
476 }
477
478 static void memzero_page(struct page *page, size_t offset, size_t len)
479 {
480         char *addr = kmap_atomic(page);
481         memset(addr + offset, 0, len);
482         kunmap_atomic(addr);
483 }
484
485 static inline bool allocated(struct pipe_buffer *buf)
486 {
487         return buf->ops == &default_pipe_buf_ops;
488 }
489
490 static inline void data_start(const struct iov_iter *i,
491                               unsigned int *iter_headp, size_t *offp)
492 {
493         unsigned int p_mask = i->pipe->ring_size - 1;
494         unsigned int iter_head = i->head;
495         size_t off = i->iov_offset;
496
497         if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
498                     off == PAGE_SIZE)) {
499                 iter_head++;
500                 off = 0;
501         }
502         *iter_headp = iter_head;
503         *offp = off;
504 }
505
506 static size_t push_pipe(struct iov_iter *i, size_t size,
507                         int *iter_headp, size_t *offp)
508 {
509         struct pipe_inode_info *pipe = i->pipe;
510         unsigned int p_tail = pipe->tail;
511         unsigned int p_mask = pipe->ring_size - 1;
512         unsigned int iter_head;
513         size_t off;
514         ssize_t left;
515
516         if (unlikely(size > i->count))
517                 size = i->count;
518         if (unlikely(!size))
519                 return 0;
520
521         left = size;
522         data_start(i, &iter_head, &off);
523         *iter_headp = iter_head;
524         *offp = off;
525         if (off) {
526                 left -= PAGE_SIZE - off;
527                 if (left <= 0) {
528                         pipe->bufs[iter_head & p_mask].len += size;
529                         return size;
530                 }
531                 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
532                 iter_head++;
533         }
534         while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
535                 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
536                 struct page *page = alloc_page(GFP_USER);
537                 if (!page)
538                         break;
539
540                 buf->ops = &default_pipe_buf_ops;
541                 buf->page = page;
542                 buf->offset = 0;
543                 buf->len = min_t(ssize_t, left, PAGE_SIZE);
544                 left -= buf->len;
545                 iter_head++;
546                 pipe->head = iter_head;
547
548                 if (left == 0)
549                         return size;
550         }
551         return size - left;
552 }
553
554 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
555                                 struct iov_iter *i)
556 {
557         struct pipe_inode_info *pipe = i->pipe;
558         unsigned int p_mask = pipe->ring_size - 1;
559         unsigned int i_head;
560         size_t n, off;
561
562         if (!sanity(i))
563                 return 0;
564
565         bytes = n = push_pipe(i, bytes, &i_head, &off);
566         if (unlikely(!n))
567                 return 0;
568         do {
569                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
570                 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
571                 i->head = i_head;
572                 i->iov_offset = off + chunk;
573                 n -= chunk;
574                 addr += chunk;
575                 off = 0;
576                 i_head++;
577         } while (n);
578         i->count -= bytes;
579         return bytes;
580 }
581
582 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
583                               __wsum sum, size_t off)
584 {
585         __wsum next = csum_partial_copy_nocheck(from, to, len);
586         return csum_block_add(sum, next, off);
587 }
588
589 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
590                                 __wsum *csum, struct iov_iter *i)
591 {
592         struct pipe_inode_info *pipe = i->pipe;
593         unsigned int p_mask = pipe->ring_size - 1;
594         unsigned int i_head;
595         size_t n, r;
596         size_t off = 0;
597         __wsum sum = *csum;
598
599         if (!sanity(i))
600                 return 0;
601
602         bytes = n = push_pipe(i, bytes, &i_head, &r);
603         if (unlikely(!n))
604                 return 0;
605         do {
606                 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
607                 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
608                 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
609                 kunmap_atomic(p);
610                 i->head = i_head;
611                 i->iov_offset = r + chunk;
612                 n -= chunk;
613                 off += chunk;
614                 addr += chunk;
615                 r = 0;
616                 i_head++;
617         } while (n);
618         i->count -= bytes;
619         *csum = sum;
620         return bytes;
621 }
622
623 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
624 {
625         const char *from = addr;
626         if (unlikely(iov_iter_is_pipe(i)))
627                 return copy_pipe_to_iter(addr, bytes, i);
628         if (iter_is_iovec(i))
629                 might_fault();
630         iterate_and_advance(i, bytes, v,
631                 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
632                 memcpy_to_page(v.bv_page, v.bv_offset,
633                                (from += v.bv_len) - v.bv_len, v.bv_len),
634                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
635         )
636
637         return bytes;
638 }
639 EXPORT_SYMBOL(_copy_to_iter);
640
641 #ifdef CONFIG_ARCH_HAS_COPY_MC
642 static int copyout_mc(void __user *to, const void *from, size_t n)
643 {
644         if (access_ok(to, n)) {
645                 instrument_copy_to_user(to, from, n);
646                 n = copy_mc_to_user((__force void *) to, from, n);
647         }
648         return n;
649 }
650
651 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
652                 const char *from, size_t len)
653 {
654         unsigned long ret;
655         char *to;
656
657         to = kmap_atomic(page);
658         ret = copy_mc_to_kernel(to + offset, from, len);
659         kunmap_atomic(to);
660
661         return ret;
662 }
663
664 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
665                                 struct iov_iter *i)
666 {
667         struct pipe_inode_info *pipe = i->pipe;
668         unsigned int p_mask = pipe->ring_size - 1;
669         unsigned int i_head;
670         size_t n, off, xfer = 0;
671
672         if (!sanity(i))
673                 return 0;
674
675         bytes = n = push_pipe(i, bytes, &i_head, &off);
676         if (unlikely(!n))
677                 return 0;
678         do {
679                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
680                 unsigned long rem;
681
682                 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
683                                             off, addr, chunk);
684                 i->head = i_head;
685                 i->iov_offset = off + chunk - rem;
686                 xfer += chunk - rem;
687                 if (rem)
688                         break;
689                 n -= chunk;
690                 addr += chunk;
691                 off = 0;
692                 i_head++;
693         } while (n);
694         i->count -= xfer;
695         return xfer;
696 }
697
698 /**
699  * _copy_mc_to_iter - copy to iter with source memory error exception handling
700  * @addr: source kernel address
701  * @bytes: total transfer length
702  * @iter: destination iterator
703  *
704  * The pmem driver deploys this for the dax operation
705  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
706  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
707  * successfully copied.
708  *
709  * The main differences between this and typical _copy_to_iter().
710  *
711  * * Typical tail/residue handling after a fault retries the copy
712  *   byte-by-byte until the fault happens again. Re-triggering machine
713  *   checks is potentially fatal so the implementation uses source
714  *   alignment and poison alignment assumptions to avoid re-triggering
715  *   hardware exceptions.
716  *
717  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
718  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
719  *   a short copy.
720  */
721 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
722 {
723         const char *from = addr;
724         unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
725
726         if (unlikely(iov_iter_is_pipe(i)))
727                 return copy_mc_pipe_to_iter(addr, bytes, i);
728         if (iter_is_iovec(i))
729                 might_fault();
730         iterate_and_advance(i, bytes, v,
731                 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
732                            v.iov_len),
733                 ({
734                 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
735                                       (from += v.bv_len) - v.bv_len, v.bv_len);
736                 if (rem) {
737                         curr_addr = (unsigned long) from;
738                         bytes = curr_addr - s_addr - rem;
739                         return bytes;
740                 }
741                 }),
742                 ({
743                 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
744                                         - v.iov_len, v.iov_len);
745                 if (rem) {
746                         curr_addr = (unsigned long) from;
747                         bytes = curr_addr - s_addr - rem;
748                         return bytes;
749                 }
750                 })
751         )
752
753         return bytes;
754 }
755 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
756 #endif /* CONFIG_ARCH_HAS_COPY_MC */
757
758 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
759 {
760         char *to = addr;
761         if (unlikely(iov_iter_is_pipe(i))) {
762                 WARN_ON(1);
763                 return 0;
764         }
765         if (iter_is_iovec(i))
766                 might_fault();
767         iterate_and_advance(i, bytes, v,
768                 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
769                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
770                                  v.bv_offset, v.bv_len),
771                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
772         )
773
774         return bytes;
775 }
776 EXPORT_SYMBOL(_copy_from_iter);
777
778 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
779 {
780         char *to = addr;
781         if (unlikely(iov_iter_is_pipe(i))) {
782                 WARN_ON(1);
783                 return false;
784         }
785         if (unlikely(i->count < bytes))
786                 return false;
787
788         if (iter_is_iovec(i))
789                 might_fault();
790         iterate_all_kinds(i, bytes, v, ({
791                 if (copyin((to += v.iov_len) - v.iov_len,
792                                       v.iov_base, v.iov_len))
793                         return false;
794                 0;}),
795                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
796                                  v.bv_offset, v.bv_len),
797                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
798         )
799
800         iov_iter_advance(i, bytes);
801         return true;
802 }
803 EXPORT_SYMBOL(_copy_from_iter_full);
804
805 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
806 {
807         char *to = addr;
808         if (unlikely(iov_iter_is_pipe(i))) {
809                 WARN_ON(1);
810                 return 0;
811         }
812         iterate_and_advance(i, bytes, v,
813                 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
814                                          v.iov_base, v.iov_len),
815                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
816                                  v.bv_offset, v.bv_len),
817                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
818         )
819
820         return bytes;
821 }
822 EXPORT_SYMBOL(_copy_from_iter_nocache);
823
824 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
825 /**
826  * _copy_from_iter_flushcache - write destination through cpu cache
827  * @addr: destination kernel address
828  * @bytes: total transfer length
829  * @iter: source iterator
830  *
831  * The pmem driver arranges for filesystem-dax to use this facility via
832  * dax_copy_from_iter() for ensuring that writes to persistent memory
833  * are flushed through the CPU cache. It is differentiated from
834  * _copy_from_iter_nocache() in that guarantees all data is flushed for
835  * all iterator types. The _copy_from_iter_nocache() only attempts to
836  * bypass the cache for the ITER_IOVEC case, and on some archs may use
837  * instructions that strand dirty-data in the cache.
838  */
839 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
840 {
841         char *to = addr;
842         if (unlikely(iov_iter_is_pipe(i))) {
843                 WARN_ON(1);
844                 return 0;
845         }
846         iterate_and_advance(i, bytes, v,
847                 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
848                                          v.iov_base, v.iov_len),
849                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
850                                  v.bv_offset, v.bv_len),
851                 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
852                         v.iov_len)
853         )
854
855         return bytes;
856 }
857 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
858 #endif
859
860 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
861 {
862         char *to = addr;
863         if (unlikely(iov_iter_is_pipe(i))) {
864                 WARN_ON(1);
865                 return false;
866         }
867         if (unlikely(i->count < bytes))
868                 return false;
869         iterate_all_kinds(i, bytes, v, ({
870                 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
871                                              v.iov_base, v.iov_len))
872                         return false;
873                 0;}),
874                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
875                                  v.bv_offset, v.bv_len),
876                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
877         )
878
879         iov_iter_advance(i, bytes);
880         return true;
881 }
882 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
883
884 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
885 {
886         struct page *head;
887         size_t v = n + offset;
888
889         /*
890          * The general case needs to access the page order in order
891          * to compute the page size.
892          * However, we mostly deal with order-0 pages and thus can
893          * avoid a possible cache line miss for requests that fit all
894          * page orders.
895          */
896         if (n <= v && v <= PAGE_SIZE)
897                 return true;
898
899         head = compound_head(page);
900         v += (page - head) << PAGE_SHIFT;
901
902         if (likely(n <= v && v <= (page_size(head))))
903                 return true;
904         WARN_ON(1);
905         return false;
906 }
907
908 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
909                          struct iov_iter *i)
910 {
911         if (unlikely(!page_copy_sane(page, offset, bytes)))
912                 return 0;
913         if (i->type & (ITER_BVEC|ITER_KVEC)) {
914                 void *kaddr = kmap_atomic(page);
915                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
916                 kunmap_atomic(kaddr);
917                 return wanted;
918         } else if (unlikely(iov_iter_is_discard(i)))
919                 return bytes;
920         else if (likely(!iov_iter_is_pipe(i)))
921                 return copy_page_to_iter_iovec(page, offset, bytes, i);
922         else
923                 return copy_page_to_iter_pipe(page, offset, bytes, i);
924 }
925 EXPORT_SYMBOL(copy_page_to_iter);
926
927 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
928                          struct iov_iter *i)
929 {
930         if (unlikely(!page_copy_sane(page, offset, bytes)))
931                 return 0;
932         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
933                 WARN_ON(1);
934                 return 0;
935         }
936         if (i->type & (ITER_BVEC|ITER_KVEC)) {
937                 void *kaddr = kmap_atomic(page);
938                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
939                 kunmap_atomic(kaddr);
940                 return wanted;
941         } else
942                 return copy_page_from_iter_iovec(page, offset, bytes, i);
943 }
944 EXPORT_SYMBOL(copy_page_from_iter);
945
946 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
947 {
948         struct pipe_inode_info *pipe = i->pipe;
949         unsigned int p_mask = pipe->ring_size - 1;
950         unsigned int i_head;
951         size_t n, off;
952
953         if (!sanity(i))
954                 return 0;
955
956         bytes = n = push_pipe(i, bytes, &i_head, &off);
957         if (unlikely(!n))
958                 return 0;
959
960         do {
961                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
962                 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
963                 i->head = i_head;
964                 i->iov_offset = off + chunk;
965                 n -= chunk;
966                 off = 0;
967                 i_head++;
968         } while (n);
969         i->count -= bytes;
970         return bytes;
971 }
972
973 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
974 {
975         if (unlikely(iov_iter_is_pipe(i)))
976                 return pipe_zero(bytes, i);
977         iterate_and_advance(i, bytes, v,
978                 clear_user(v.iov_base, v.iov_len),
979                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
980                 memset(v.iov_base, 0, v.iov_len)
981         )
982
983         return bytes;
984 }
985 EXPORT_SYMBOL(iov_iter_zero);
986
987 size_t iov_iter_copy_from_user_atomic(struct page *page,
988                 struct iov_iter *i, unsigned long offset, size_t bytes)
989 {
990         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
991         if (unlikely(!page_copy_sane(page, offset, bytes))) {
992                 kunmap_atomic(kaddr);
993                 return 0;
994         }
995         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
996                 kunmap_atomic(kaddr);
997                 WARN_ON(1);
998                 return 0;
999         }
1000         iterate_all_kinds(i, bytes, v,
1001                 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1002                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1003                                  v.bv_offset, v.bv_len),
1004                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1005         )
1006         kunmap_atomic(kaddr);
1007         return bytes;
1008 }
1009 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1010
1011 static inline void pipe_truncate(struct iov_iter *i)
1012 {
1013         struct pipe_inode_info *pipe = i->pipe;
1014         unsigned int p_tail = pipe->tail;
1015         unsigned int p_head = pipe->head;
1016         unsigned int p_mask = pipe->ring_size - 1;
1017
1018         if (!pipe_empty(p_head, p_tail)) {
1019                 struct pipe_buffer *buf;
1020                 unsigned int i_head = i->head;
1021                 size_t off = i->iov_offset;
1022
1023                 if (off) {
1024                         buf = &pipe->bufs[i_head & p_mask];
1025                         buf->len = off - buf->offset;
1026                         i_head++;
1027                 }
1028                 while (p_head != i_head) {
1029                         p_head--;
1030                         pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1031                 }
1032
1033                 pipe->head = p_head;
1034         }
1035 }
1036
1037 static void pipe_advance(struct iov_iter *i, size_t size)
1038 {
1039         struct pipe_inode_info *pipe = i->pipe;
1040         if (unlikely(i->count < size))
1041                 size = i->count;
1042         if (size) {
1043                 struct pipe_buffer *buf;
1044                 unsigned int p_mask = pipe->ring_size - 1;
1045                 unsigned int i_head = i->head;
1046                 size_t off = i->iov_offset, left = size;
1047
1048                 if (off) /* make it relative to the beginning of buffer */
1049                         left += off - pipe->bufs[i_head & p_mask].offset;
1050                 while (1) {
1051                         buf = &pipe->bufs[i_head & p_mask];
1052                         if (left <= buf->len)
1053                                 break;
1054                         left -= buf->len;
1055                         i_head++;
1056                 }
1057                 i->head = i_head;
1058                 i->iov_offset = buf->offset + left;
1059         }
1060         i->count -= size;
1061         /* ... and discard everything past that point */
1062         pipe_truncate(i);
1063 }
1064
1065 void iov_iter_advance(struct iov_iter *i, size_t size)
1066 {
1067         if (unlikely(iov_iter_is_pipe(i))) {
1068                 pipe_advance(i, size);
1069                 return;
1070         }
1071         if (unlikely(iov_iter_is_discard(i))) {
1072                 i->count -= size;
1073                 return;
1074         }
1075         iterate_and_advance(i, size, v, 0, 0, 0)
1076 }
1077 EXPORT_SYMBOL(iov_iter_advance);
1078
1079 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1080 {
1081         if (!unroll)
1082                 return;
1083         if (WARN_ON(unroll > MAX_RW_COUNT))
1084                 return;
1085         i->count += unroll;
1086         if (unlikely(iov_iter_is_pipe(i))) {
1087                 struct pipe_inode_info *pipe = i->pipe;
1088                 unsigned int p_mask = pipe->ring_size - 1;
1089                 unsigned int i_head = i->head;
1090                 size_t off = i->iov_offset;
1091                 while (1) {
1092                         struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1093                         size_t n = off - b->offset;
1094                         if (unroll < n) {
1095                                 off -= unroll;
1096                                 break;
1097                         }
1098                         unroll -= n;
1099                         if (!unroll && i_head == i->start_head) {
1100                                 off = 0;
1101                                 break;
1102                         }
1103                         i_head--;
1104                         b = &pipe->bufs[i_head & p_mask];
1105                         off = b->offset + b->len;
1106                 }
1107                 i->iov_offset = off;
1108                 i->head = i_head;
1109                 pipe_truncate(i);
1110                 return;
1111         }
1112         if (unlikely(iov_iter_is_discard(i)))
1113                 return;
1114         if (unroll <= i->iov_offset) {
1115                 i->iov_offset -= unroll;
1116                 return;
1117         }
1118         unroll -= i->iov_offset;
1119         if (iov_iter_is_bvec(i)) {
1120                 const struct bio_vec *bvec = i->bvec;
1121                 while (1) {
1122                         size_t n = (--bvec)->bv_len;
1123                         i->nr_segs++;
1124                         if (unroll <= n) {
1125                                 i->bvec = bvec;
1126                                 i->iov_offset = n - unroll;
1127                                 return;
1128                         }
1129                         unroll -= n;
1130                 }
1131         } else { /* same logics for iovec and kvec */
1132                 const struct iovec *iov = i->iov;
1133                 while (1) {
1134                         size_t n = (--iov)->iov_len;
1135                         i->nr_segs++;
1136                         if (unroll <= n) {
1137                                 i->iov = iov;
1138                                 i->iov_offset = n - unroll;
1139                                 return;
1140                         }
1141                         unroll -= n;
1142                 }
1143         }
1144 }
1145 EXPORT_SYMBOL(iov_iter_revert);
1146
1147 /*
1148  * Return the count of just the current iov_iter segment.
1149  */
1150 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1151 {
1152         if (unlikely(iov_iter_is_pipe(i)))
1153                 return i->count;        // it is a silly place, anyway
1154         if (i->nr_segs == 1)
1155                 return i->count;
1156         if (unlikely(iov_iter_is_discard(i)))
1157                 return i->count;
1158         else if (iov_iter_is_bvec(i))
1159                 return min(i->count, i->bvec->bv_len - i->iov_offset);
1160         else
1161                 return min(i->count, i->iov->iov_len - i->iov_offset);
1162 }
1163 EXPORT_SYMBOL(iov_iter_single_seg_count);
1164
1165 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1166                         const struct kvec *kvec, unsigned long nr_segs,
1167                         size_t count)
1168 {
1169         WARN_ON(direction & ~(READ | WRITE));
1170         i->type = ITER_KVEC | (direction & (READ | WRITE));
1171         i->kvec = kvec;
1172         i->nr_segs = nr_segs;
1173         i->iov_offset = 0;
1174         i->count = count;
1175 }
1176 EXPORT_SYMBOL(iov_iter_kvec);
1177
1178 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1179                         const struct bio_vec *bvec, unsigned long nr_segs,
1180                         size_t count)
1181 {
1182         WARN_ON(direction & ~(READ | WRITE));
1183         i->type = ITER_BVEC | (direction & (READ | WRITE));
1184         i->bvec = bvec;
1185         i->nr_segs = nr_segs;
1186         i->iov_offset = 0;
1187         i->count = count;
1188 }
1189 EXPORT_SYMBOL(iov_iter_bvec);
1190
1191 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1192                         struct pipe_inode_info *pipe,
1193                         size_t count)
1194 {
1195         BUG_ON(direction != READ);
1196         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1197         i->type = ITER_PIPE | READ;
1198         i->pipe = pipe;
1199         i->head = pipe->head;
1200         i->iov_offset = 0;
1201         i->count = count;
1202         i->start_head = i->head;
1203 }
1204 EXPORT_SYMBOL(iov_iter_pipe);
1205
1206 /**
1207  * iov_iter_discard - Initialise an I/O iterator that discards data
1208  * @i: The iterator to initialise.
1209  * @direction: The direction of the transfer.
1210  * @count: The size of the I/O buffer in bytes.
1211  *
1212  * Set up an I/O iterator that just discards everything that's written to it.
1213  * It's only available as a READ iterator.
1214  */
1215 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1216 {
1217         BUG_ON(direction != READ);
1218         i->type = ITER_DISCARD | READ;
1219         i->count = count;
1220         i->iov_offset = 0;
1221 }
1222 EXPORT_SYMBOL(iov_iter_discard);
1223
1224 unsigned long iov_iter_alignment(const struct iov_iter *i)
1225 {
1226         unsigned long res = 0;
1227         size_t size = i->count;
1228
1229         if (unlikely(iov_iter_is_pipe(i))) {
1230                 unsigned int p_mask = i->pipe->ring_size - 1;
1231
1232                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1233                         return size | i->iov_offset;
1234                 return size;
1235         }
1236         iterate_all_kinds(i, size, v,
1237                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1238                 res |= v.bv_offset | v.bv_len,
1239                 res |= (unsigned long)v.iov_base | v.iov_len
1240         )
1241         return res;
1242 }
1243 EXPORT_SYMBOL(iov_iter_alignment);
1244
1245 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1246 {
1247         unsigned long res = 0;
1248         size_t size = i->count;
1249
1250         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1251                 WARN_ON(1);
1252                 return ~0U;
1253         }
1254
1255         iterate_all_kinds(i, size, v,
1256                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1257                         (size != v.iov_len ? size : 0), 0),
1258                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1259                         (size != v.bv_len ? size : 0)),
1260                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1261                         (size != v.iov_len ? size : 0))
1262                 );
1263         return res;
1264 }
1265 EXPORT_SYMBOL(iov_iter_gap_alignment);
1266
1267 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1268                                 size_t maxsize,
1269                                 struct page **pages,
1270                                 int iter_head,
1271                                 size_t *start)
1272 {
1273         struct pipe_inode_info *pipe = i->pipe;
1274         unsigned int p_mask = pipe->ring_size - 1;
1275         ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1276         if (!n)
1277                 return -EFAULT;
1278
1279         maxsize = n;
1280         n += *start;
1281         while (n > 0) {
1282                 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1283                 iter_head++;
1284                 n -= PAGE_SIZE;
1285         }
1286
1287         return maxsize;
1288 }
1289
1290 static ssize_t pipe_get_pages(struct iov_iter *i,
1291                    struct page **pages, size_t maxsize, unsigned maxpages,
1292                    size_t *start)
1293 {
1294         unsigned int iter_head, npages;
1295         size_t capacity;
1296
1297         if (!maxsize)
1298                 return 0;
1299
1300         if (!sanity(i))
1301                 return -EFAULT;
1302
1303         data_start(i, &iter_head, start);
1304         /* Amount of free space: some of this one + all after this one */
1305         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1306         capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1307
1308         return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1309 }
1310
1311 ssize_t iov_iter_get_pages(struct iov_iter *i,
1312                    struct page **pages, size_t maxsize, unsigned maxpages,
1313                    size_t *start)
1314 {
1315         if (maxsize > i->count)
1316                 maxsize = i->count;
1317
1318         if (unlikely(iov_iter_is_pipe(i)))
1319                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1320         if (unlikely(iov_iter_is_discard(i)))
1321                 return -EFAULT;
1322
1323         iterate_all_kinds(i, maxsize, v, ({
1324                 unsigned long addr = (unsigned long)v.iov_base;
1325                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1326                 int n;
1327                 int res;
1328
1329                 if (len > maxpages * PAGE_SIZE)
1330                         len = maxpages * PAGE_SIZE;
1331                 addr &= ~(PAGE_SIZE - 1);
1332                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1333                 res = get_user_pages_fast(addr, n,
1334                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
1335                                 pages);
1336                 if (unlikely(res < 0))
1337                         return res;
1338                 return (res == n ? len : res * PAGE_SIZE) - *start;
1339         0;}),({
1340                 /* can't be more than PAGE_SIZE */
1341                 *start = v.bv_offset;
1342                 get_page(*pages = v.bv_page);
1343                 return v.bv_len;
1344         }),({
1345                 return -EFAULT;
1346         })
1347         )
1348         return 0;
1349 }
1350 EXPORT_SYMBOL(iov_iter_get_pages);
1351
1352 static struct page **get_pages_array(size_t n)
1353 {
1354         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1355 }
1356
1357 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1358                    struct page ***pages, size_t maxsize,
1359                    size_t *start)
1360 {
1361         struct page **p;
1362         unsigned int iter_head, npages;
1363         ssize_t n;
1364
1365         if (!maxsize)
1366                 return 0;
1367
1368         if (!sanity(i))
1369                 return -EFAULT;
1370
1371         data_start(i, &iter_head, start);
1372         /* Amount of free space: some of this one + all after this one */
1373         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1374         n = npages * PAGE_SIZE - *start;
1375         if (maxsize > n)
1376                 maxsize = n;
1377         else
1378                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1379         p = get_pages_array(npages);
1380         if (!p)
1381                 return -ENOMEM;
1382         n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1383         if (n > 0)
1384                 *pages = p;
1385         else
1386                 kvfree(p);
1387         return n;
1388 }
1389
1390 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1391                    struct page ***pages, size_t maxsize,
1392                    size_t *start)
1393 {
1394         struct page **p;
1395
1396         if (maxsize > i->count)
1397                 maxsize = i->count;
1398
1399         if (unlikely(iov_iter_is_pipe(i)))
1400                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1401         if (unlikely(iov_iter_is_discard(i)))
1402                 return -EFAULT;
1403
1404         iterate_all_kinds(i, maxsize, v, ({
1405                 unsigned long addr = (unsigned long)v.iov_base;
1406                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1407                 int n;
1408                 int res;
1409
1410                 addr &= ~(PAGE_SIZE - 1);
1411                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1412                 p = get_pages_array(n);
1413                 if (!p)
1414                         return -ENOMEM;
1415                 res = get_user_pages_fast(addr, n,
1416                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1417                 if (unlikely(res < 0)) {
1418                         kvfree(p);
1419                         return res;
1420                 }
1421                 *pages = p;
1422                 return (res == n ? len : res * PAGE_SIZE) - *start;
1423         0;}),({
1424                 /* can't be more than PAGE_SIZE */
1425                 *start = v.bv_offset;
1426                 *pages = p = get_pages_array(1);
1427                 if (!p)
1428                         return -ENOMEM;
1429                 get_page(*p = v.bv_page);
1430                 return v.bv_len;
1431         }),({
1432                 return -EFAULT;
1433         })
1434         )
1435         return 0;
1436 }
1437 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1438
1439 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1440                                struct iov_iter *i)
1441 {
1442         char *to = addr;
1443         __wsum sum, next;
1444         size_t off = 0;
1445         sum = *csum;
1446         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1447                 WARN_ON(1);
1448                 return 0;
1449         }
1450         iterate_and_advance(i, bytes, v, ({
1451                 next = csum_and_copy_from_user(v.iov_base,
1452                                                (to += v.iov_len) - v.iov_len,
1453                                                v.iov_len);
1454                 if (next) {
1455                         sum = csum_block_add(sum, next, off);
1456                         off += v.iov_len;
1457                 }
1458                 next ? 0 : v.iov_len;
1459         }), ({
1460                 char *p = kmap_atomic(v.bv_page);
1461                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1462                                       p + v.bv_offset, v.bv_len,
1463                                       sum, off);
1464                 kunmap_atomic(p);
1465                 off += v.bv_len;
1466         }),({
1467                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1468                                       v.iov_base, v.iov_len,
1469                                       sum, off);
1470                 off += v.iov_len;
1471         })
1472         )
1473         *csum = sum;
1474         return bytes;
1475 }
1476 EXPORT_SYMBOL(csum_and_copy_from_iter);
1477
1478 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1479                                struct iov_iter *i)
1480 {
1481         char *to = addr;
1482         __wsum sum, next;
1483         size_t off = 0;
1484         sum = *csum;
1485         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1486                 WARN_ON(1);
1487                 return false;
1488         }
1489         if (unlikely(i->count < bytes))
1490                 return false;
1491         iterate_all_kinds(i, bytes, v, ({
1492                 next = csum_and_copy_from_user(v.iov_base,
1493                                                (to += v.iov_len) - v.iov_len,
1494                                                v.iov_len);
1495                 if (!next)
1496                         return false;
1497                 sum = csum_block_add(sum, next, off);
1498                 off += v.iov_len;
1499                 0;
1500         }), ({
1501                 char *p = kmap_atomic(v.bv_page);
1502                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1503                                       p + v.bv_offset, v.bv_len,
1504                                       sum, off);
1505                 kunmap_atomic(p);
1506                 off += v.bv_len;
1507         }),({
1508                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1509                                       v.iov_base, v.iov_len,
1510                                       sum, off);
1511                 off += v.iov_len;
1512         })
1513         )
1514         *csum = sum;
1515         iov_iter_advance(i, bytes);
1516         return true;
1517 }
1518 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1519
1520 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1521                              struct iov_iter *i)
1522 {
1523         const char *from = addr;
1524         __wsum *csum = csump;
1525         __wsum sum, next;
1526         size_t off = 0;
1527
1528         if (unlikely(iov_iter_is_pipe(i)))
1529                 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1530
1531         sum = *csum;
1532         if (unlikely(iov_iter_is_discard(i))) {
1533                 WARN_ON(1);     /* for now */
1534                 return 0;
1535         }
1536         iterate_and_advance(i, bytes, v, ({
1537                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1538                                              v.iov_base,
1539                                              v.iov_len);
1540                 if (next) {
1541                         sum = csum_block_add(sum, next, off);
1542                         off += v.iov_len;
1543                 }
1544                 next ? 0 : v.iov_len;
1545         }), ({
1546                 char *p = kmap_atomic(v.bv_page);
1547                 sum = csum_and_memcpy(p + v.bv_offset,
1548                                       (from += v.bv_len) - v.bv_len,
1549                                       v.bv_len, sum, off);
1550                 kunmap_atomic(p);
1551                 off += v.bv_len;
1552         }),({
1553                 sum = csum_and_memcpy(v.iov_base,
1554                                      (from += v.iov_len) - v.iov_len,
1555                                      v.iov_len, sum, off);
1556                 off += v.iov_len;
1557         })
1558         )
1559         *csum = sum;
1560         return bytes;
1561 }
1562 EXPORT_SYMBOL(csum_and_copy_to_iter);
1563
1564 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1565                 struct iov_iter *i)
1566 {
1567 #ifdef CONFIG_CRYPTO_HASH
1568         struct ahash_request *hash = hashp;
1569         struct scatterlist sg;
1570         size_t copied;
1571
1572         copied = copy_to_iter(addr, bytes, i);
1573         sg_init_one(&sg, addr, copied);
1574         ahash_request_set_crypt(hash, &sg, NULL, copied);
1575         crypto_ahash_update(hash);
1576         return copied;
1577 #else
1578         return 0;
1579 #endif
1580 }
1581 EXPORT_SYMBOL(hash_and_copy_to_iter);
1582
1583 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1584 {
1585         size_t size = i->count;
1586         int npages = 0;
1587
1588         if (!size)
1589                 return 0;
1590         if (unlikely(iov_iter_is_discard(i)))
1591                 return 0;
1592
1593         if (unlikely(iov_iter_is_pipe(i))) {
1594                 struct pipe_inode_info *pipe = i->pipe;
1595                 unsigned int iter_head;
1596                 size_t off;
1597
1598                 if (!sanity(i))
1599                         return 0;
1600
1601                 data_start(i, &iter_head, &off);
1602                 /* some of this one + all after this one */
1603                 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1604                 if (npages >= maxpages)
1605                         return maxpages;
1606         } else iterate_all_kinds(i, size, v, ({
1607                 unsigned long p = (unsigned long)v.iov_base;
1608                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1609                         - p / PAGE_SIZE;
1610                 if (npages >= maxpages)
1611                         return maxpages;
1612         0;}),({
1613                 npages++;
1614                 if (npages >= maxpages)
1615                         return maxpages;
1616         }),({
1617                 unsigned long p = (unsigned long)v.iov_base;
1618                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1619                         - p / PAGE_SIZE;
1620                 if (npages >= maxpages)
1621                         return maxpages;
1622         })
1623         )
1624         return npages;
1625 }
1626 EXPORT_SYMBOL(iov_iter_npages);
1627
1628 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1629 {
1630         *new = *old;
1631         if (unlikely(iov_iter_is_pipe(new))) {
1632                 WARN_ON(1);
1633                 return NULL;
1634         }
1635         if (unlikely(iov_iter_is_discard(new)))
1636                 return NULL;
1637         if (iov_iter_is_bvec(new))
1638                 return new->bvec = kmemdup(new->bvec,
1639                                     new->nr_segs * sizeof(struct bio_vec),
1640                                     flags);
1641         else
1642                 /* iovec and kvec have identical layout */
1643                 return new->iov = kmemdup(new->iov,
1644                                    new->nr_segs * sizeof(struct iovec),
1645                                    flags);
1646 }
1647 EXPORT_SYMBOL(dup_iter);
1648
1649 static int copy_compat_iovec_from_user(struct iovec *iov,
1650                 const struct iovec __user *uvec, unsigned long nr_segs)
1651 {
1652         const struct compat_iovec __user *uiov =
1653                 (const struct compat_iovec __user *)uvec;
1654         int ret = -EFAULT, i;
1655
1656         if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
1657                 return -EFAULT;
1658
1659         for (i = 0; i < nr_segs; i++) {
1660                 compat_uptr_t buf;
1661                 compat_ssize_t len;
1662
1663                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1664                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1665
1666                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1667                 if (len < 0) {
1668                         ret = -EINVAL;
1669                         goto uaccess_end;
1670                 }
1671                 iov[i].iov_base = compat_ptr(buf);
1672                 iov[i].iov_len = len;
1673         }
1674
1675         ret = 0;
1676 uaccess_end:
1677         user_access_end();
1678         return ret;
1679 }
1680
1681 static int copy_iovec_from_user(struct iovec *iov,
1682                 const struct iovec __user *uvec, unsigned long nr_segs)
1683 {
1684         unsigned long seg;
1685
1686         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1687                 return -EFAULT;
1688         for (seg = 0; seg < nr_segs; seg++) {
1689                 if ((ssize_t)iov[seg].iov_len < 0)
1690                         return -EINVAL;
1691         }
1692
1693         return 0;
1694 }
1695
1696 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1697                 unsigned long nr_segs, unsigned long fast_segs,
1698                 struct iovec *fast_iov, bool compat)
1699 {
1700         struct iovec *iov = fast_iov;
1701         int ret;
1702
1703         /*
1704          * SuS says "The readv() function *may* fail if the iovcnt argument was
1705          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1706          * traditionally returned zero for zero segments, so...
1707          */
1708         if (nr_segs == 0)
1709                 return iov;
1710         if (nr_segs > UIO_MAXIOV)
1711                 return ERR_PTR(-EINVAL);
1712         if (nr_segs > fast_segs) {
1713                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1714                 if (!iov)
1715                         return ERR_PTR(-ENOMEM);
1716         }
1717
1718         if (compat)
1719                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1720         else
1721                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1722         if (ret) {
1723                 if (iov != fast_iov)
1724                         kfree(iov);
1725                 return ERR_PTR(ret);
1726         }
1727
1728         return iov;
1729 }
1730
1731 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1732                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1733                  struct iov_iter *i, bool compat)
1734 {
1735         ssize_t total_len = 0;
1736         unsigned long seg;
1737         struct iovec *iov;
1738
1739         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1740         if (IS_ERR(iov)) {
1741                 *iovp = NULL;
1742                 return PTR_ERR(iov);
1743         }
1744
1745         /*
1746          * According to the Single Unix Specification we should return EINVAL if
1747          * an element length is < 0 when cast to ssize_t or if the total length
1748          * would overflow the ssize_t return value of the system call.
1749          *
1750          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1751          * overflow case.
1752          */
1753         for (seg = 0; seg < nr_segs; seg++) {
1754                 ssize_t len = (ssize_t)iov[seg].iov_len;
1755
1756                 if (!access_ok(iov[seg].iov_base, len)) {
1757                         if (iov != *iovp)
1758                                 kfree(iov);
1759                         *iovp = NULL;
1760                         return -EFAULT;
1761                 }
1762
1763                 if (len > MAX_RW_COUNT - total_len) {
1764                         len = MAX_RW_COUNT - total_len;
1765                         iov[seg].iov_len = len;
1766                 }
1767                 total_len += len;
1768         }
1769
1770         iov_iter_init(i, type, iov, nr_segs, total_len);
1771         if (iov == *iovp)
1772                 *iovp = NULL;
1773         else
1774                 *iovp = iov;
1775         return total_len;
1776 }
1777
1778 /**
1779  * import_iovec() - Copy an array of &struct iovec from userspace
1780  *     into the kernel, check that it is valid, and initialize a new
1781  *     &struct iov_iter iterator to access it.
1782  *
1783  * @type: One of %READ or %WRITE.
1784  * @uvec: Pointer to the userspace array.
1785  * @nr_segs: Number of elements in userspace array.
1786  * @fast_segs: Number of elements in @iov.
1787  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1788  *     on-stack) kernel array.
1789  * @i: Pointer to iterator that will be initialized on success.
1790  *
1791  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1792  * then this function places %NULL in *@iov on return. Otherwise, a new
1793  * array will be allocated and the result placed in *@iov. This means that
1794  * the caller may call kfree() on *@iov regardless of whether the small
1795  * on-stack array was used or not (and regardless of whether this function
1796  * returns an error or not).
1797  *
1798  * Return: Negative error code on error, bytes imported on success
1799  */
1800 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1801                  unsigned nr_segs, unsigned fast_segs,
1802                  struct iovec **iovp, struct iov_iter *i)
1803 {
1804         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1805                               in_compat_syscall());
1806 }
1807 EXPORT_SYMBOL(import_iovec);
1808
1809 int import_single_range(int rw, void __user *buf, size_t len,
1810                  struct iovec *iov, struct iov_iter *i)
1811 {
1812         if (len > MAX_RW_COUNT)
1813                 len = MAX_RW_COUNT;
1814         if (unlikely(!access_ok(buf, len)))
1815                 return -EFAULT;
1816
1817         iov->iov_base = buf;
1818         iov->iov_len = len;
1819         iov_iter_init(i, rw, iov, 1, len);
1820         return 0;
1821 }
1822 EXPORT_SYMBOL(import_single_range);
1823
1824 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1825                             int (*f)(struct kvec *vec, void *context),
1826                             void *context)
1827 {
1828         struct kvec w;
1829         int err = -EINVAL;
1830         if (!bytes)
1831                 return 0;
1832
1833         iterate_all_kinds(i, bytes, v, -EINVAL, ({
1834                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1835                 w.iov_len = v.bv_len;
1836                 err = f(&w, context);
1837                 kunmap(v.bv_page);
1838                 err;}), ({
1839                 w = v;
1840                 err = f(&w, context);})
1841         )
1842         return err;
1843 }
1844 EXPORT_SYMBOL(iov_iter_for_each_range);