bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros
[linux-2.6-microblaze.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bvec.h>
4 #include <linux/uio.h>
5 #include <linux/pagemap.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/splice.h>
9 #include <net/checksum.h>
10 #include <linux/scatterlist.h>
11 #include <linux/instrumented.h>
12
13 #define PIPE_PARANOIA /* for now */
14
15 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
16         size_t left;                                    \
17         size_t wanted = n;                              \
18         __p = i->iov;                                   \
19         __v.iov_len = min(n, __p->iov_len - skip);      \
20         if (likely(__v.iov_len)) {                      \
21                 __v.iov_base = __p->iov_base + skip;    \
22                 left = (STEP);                          \
23                 __v.iov_len -= left;                    \
24                 skip += __v.iov_len;                    \
25                 n -= __v.iov_len;                       \
26         } else {                                        \
27                 left = 0;                               \
28         }                                               \
29         while (unlikely(!left && n)) {                  \
30                 __p++;                                  \
31                 __v.iov_len = min(n, __p->iov_len);     \
32                 if (unlikely(!__v.iov_len))             \
33                         continue;                       \
34                 __v.iov_base = __p->iov_base;           \
35                 left = (STEP);                          \
36                 __v.iov_len -= left;                    \
37                 skip = __v.iov_len;                     \
38                 n -= __v.iov_len;                       \
39         }                                               \
40         n = wanted - n;                                 \
41 }
42
43 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
44         size_t wanted = n;                              \
45         __p = i->kvec;                                  \
46         __v.iov_len = min(n, __p->iov_len - skip);      \
47         if (likely(__v.iov_len)) {                      \
48                 __v.iov_base = __p->iov_base + skip;    \
49                 (void)(STEP);                           \
50                 skip += __v.iov_len;                    \
51                 n -= __v.iov_len;                       \
52         }                                               \
53         while (unlikely(n)) {                           \
54                 __p++;                                  \
55                 __v.iov_len = min(n, __p->iov_len);     \
56                 if (unlikely(!__v.iov_len))             \
57                         continue;                       \
58                 __v.iov_base = __p->iov_base;           \
59                 (void)(STEP);                           \
60                 skip = __v.iov_len;                     \
61                 n -= __v.iov_len;                       \
62         }                                               \
63         n = wanted;                                     \
64 }
65
66 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
67         struct bvec_iter __start;                       \
68         __start.bi_size = n;                            \
69         __start.bi_bvec_done = skip;                    \
70         __start.bi_idx = 0;                             \
71         for_each_bvec(__v, i->bvec, __bi, __start) {    \
72                 if (!__v.bv_len)                        \
73                         continue;                       \
74                 (void)(STEP);                           \
75         }                                               \
76 }
77
78 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
79         if (likely(n)) {                                        \
80                 size_t skip = i->iov_offset;                    \
81                 if (unlikely(i->type & ITER_BVEC)) {            \
82                         struct bio_vec v;                       \
83                         struct bvec_iter __bi;                  \
84                         iterate_bvec(i, n, v, __bi, skip, (B))  \
85                 } else if (unlikely(i->type & ITER_KVEC)) {     \
86                         const struct kvec *kvec;                \
87                         struct kvec v;                          \
88                         iterate_kvec(i, n, v, kvec, skip, (K))  \
89                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
90                 } else {                                        \
91                         const struct iovec *iov;                \
92                         struct iovec v;                         \
93                         iterate_iovec(i, n, v, iov, skip, (I))  \
94                 }                                               \
95         }                                                       \
96 }
97
98 #define iterate_and_advance(i, n, v, I, B, K) {                 \
99         if (unlikely(i->count < n))                             \
100                 n = i->count;                                   \
101         if (i->count) {                                         \
102                 size_t skip = i->iov_offset;                    \
103                 if (unlikely(i->type & ITER_BVEC)) {            \
104                         const struct bio_vec *bvec = i->bvec;   \
105                         struct bio_vec v;                       \
106                         struct bvec_iter __bi;                  \
107                         iterate_bvec(i, n, v, __bi, skip, (B))  \
108                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
109                         i->nr_segs -= i->bvec - bvec;           \
110                         skip = __bi.bi_bvec_done;               \
111                 } else if (unlikely(i->type & ITER_KVEC)) {     \
112                         const struct kvec *kvec;                \
113                         struct kvec v;                          \
114                         iterate_kvec(i, n, v, kvec, skip, (K))  \
115                         if (skip == kvec->iov_len) {            \
116                                 kvec++;                         \
117                                 skip = 0;                       \
118                         }                                       \
119                         i->nr_segs -= kvec - i->kvec;           \
120                         i->kvec = kvec;                         \
121                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
122                         skip += n;                              \
123                 } else {                                        \
124                         const struct iovec *iov;                \
125                         struct iovec v;                         \
126                         iterate_iovec(i, n, v, iov, skip, (I))  \
127                         if (skip == iov->iov_len) {             \
128                                 iov++;                          \
129                                 skip = 0;                       \
130                         }                                       \
131                         i->nr_segs -= iov - i->iov;             \
132                         i->iov = iov;                           \
133                 }                                               \
134                 i->count -= n;                                  \
135                 i->iov_offset = skip;                           \
136         }                                                       \
137 }
138
139 static int copyout(void __user *to, const void *from, size_t n)
140 {
141         if (access_ok(to, n)) {
142                 instrument_copy_to_user(to, from, n);
143                 n = raw_copy_to_user(to, from, n);
144         }
145         return n;
146 }
147
148 static int copyin(void *to, const void __user *from, size_t n)
149 {
150         if (access_ok(from, n)) {
151                 instrument_copy_from_user(to, from, n);
152                 n = raw_copy_from_user(to, from, n);
153         }
154         return n;
155 }
156
157 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
158                          struct iov_iter *i)
159 {
160         size_t skip, copy, left, wanted;
161         const struct iovec *iov;
162         char __user *buf;
163         void *kaddr, *from;
164
165         if (unlikely(bytes > i->count))
166                 bytes = i->count;
167
168         if (unlikely(!bytes))
169                 return 0;
170
171         might_fault();
172         wanted = bytes;
173         iov = i->iov;
174         skip = i->iov_offset;
175         buf = iov->iov_base + skip;
176         copy = min(bytes, iov->iov_len - skip);
177
178         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
179                 kaddr = kmap_atomic(page);
180                 from = kaddr + offset;
181
182                 /* first chunk, usually the only one */
183                 left = copyout(buf, from, copy);
184                 copy -= left;
185                 skip += copy;
186                 from += copy;
187                 bytes -= copy;
188
189                 while (unlikely(!left && bytes)) {
190                         iov++;
191                         buf = iov->iov_base;
192                         copy = min(bytes, iov->iov_len);
193                         left = copyout(buf, from, copy);
194                         copy -= left;
195                         skip = copy;
196                         from += copy;
197                         bytes -= copy;
198                 }
199                 if (likely(!bytes)) {
200                         kunmap_atomic(kaddr);
201                         goto done;
202                 }
203                 offset = from - kaddr;
204                 buf += copy;
205                 kunmap_atomic(kaddr);
206                 copy = min(bytes, iov->iov_len - skip);
207         }
208         /* Too bad - revert to non-atomic kmap */
209
210         kaddr = kmap(page);
211         from = kaddr + offset;
212         left = copyout(buf, from, copy);
213         copy -= left;
214         skip += copy;
215         from += copy;
216         bytes -= copy;
217         while (unlikely(!left && bytes)) {
218                 iov++;
219                 buf = iov->iov_base;
220                 copy = min(bytes, iov->iov_len);
221                 left = copyout(buf, from, copy);
222                 copy -= left;
223                 skip = copy;
224                 from += copy;
225                 bytes -= copy;
226         }
227         kunmap(page);
228
229 done:
230         if (skip == iov->iov_len) {
231                 iov++;
232                 skip = 0;
233         }
234         i->count -= wanted - bytes;
235         i->nr_segs -= iov - i->iov;
236         i->iov = iov;
237         i->iov_offset = skip;
238         return wanted - bytes;
239 }
240
241 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
242                          struct iov_iter *i)
243 {
244         size_t skip, copy, left, wanted;
245         const struct iovec *iov;
246         char __user *buf;
247         void *kaddr, *to;
248
249         if (unlikely(bytes > i->count))
250                 bytes = i->count;
251
252         if (unlikely(!bytes))
253                 return 0;
254
255         might_fault();
256         wanted = bytes;
257         iov = i->iov;
258         skip = i->iov_offset;
259         buf = iov->iov_base + skip;
260         copy = min(bytes, iov->iov_len - skip);
261
262         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
263                 kaddr = kmap_atomic(page);
264                 to = kaddr + offset;
265
266                 /* first chunk, usually the only one */
267                 left = copyin(to, buf, copy);
268                 copy -= left;
269                 skip += copy;
270                 to += copy;
271                 bytes -= copy;
272
273                 while (unlikely(!left && bytes)) {
274                         iov++;
275                         buf = iov->iov_base;
276                         copy = min(bytes, iov->iov_len);
277                         left = copyin(to, buf, copy);
278                         copy -= left;
279                         skip = copy;
280                         to += copy;
281                         bytes -= copy;
282                 }
283                 if (likely(!bytes)) {
284                         kunmap_atomic(kaddr);
285                         goto done;
286                 }
287                 offset = to - kaddr;
288                 buf += copy;
289                 kunmap_atomic(kaddr);
290                 copy = min(bytes, iov->iov_len - skip);
291         }
292         /* Too bad - revert to non-atomic kmap */
293
294         kaddr = kmap(page);
295         to = kaddr + offset;
296         left = copyin(to, buf, copy);
297         copy -= left;
298         skip += copy;
299         to += copy;
300         bytes -= copy;
301         while (unlikely(!left && bytes)) {
302                 iov++;
303                 buf = iov->iov_base;
304                 copy = min(bytes, iov->iov_len);
305                 left = copyin(to, buf, copy);
306                 copy -= left;
307                 skip = copy;
308                 to += copy;
309                 bytes -= copy;
310         }
311         kunmap(page);
312
313 done:
314         if (skip == iov->iov_len) {
315                 iov++;
316                 skip = 0;
317         }
318         i->count -= wanted - bytes;
319         i->nr_segs -= iov - i->iov;
320         i->iov = iov;
321         i->iov_offset = skip;
322         return wanted - bytes;
323 }
324
325 #ifdef PIPE_PARANOIA
326 static bool sanity(const struct iov_iter *i)
327 {
328         struct pipe_inode_info *pipe = i->pipe;
329         unsigned int p_head = pipe->head;
330         unsigned int p_tail = pipe->tail;
331         unsigned int p_mask = pipe->ring_size - 1;
332         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
333         unsigned int i_head = i->head;
334         unsigned int idx;
335
336         if (i->iov_offset) {
337                 struct pipe_buffer *p;
338                 if (unlikely(p_occupancy == 0))
339                         goto Bad;       // pipe must be non-empty
340                 if (unlikely(i_head != p_head - 1))
341                         goto Bad;       // must be at the last buffer...
342
343                 p = &pipe->bufs[i_head & p_mask];
344                 if (unlikely(p->offset + p->len != i->iov_offset))
345                         goto Bad;       // ... at the end of segment
346         } else {
347                 if (i_head != p_head)
348                         goto Bad;       // must be right after the last buffer
349         }
350         return true;
351 Bad:
352         printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
353         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
354                         p_head, p_tail, pipe->ring_size);
355         for (idx = 0; idx < pipe->ring_size; idx++)
356                 printk(KERN_ERR "[%p %p %d %d]\n",
357                         pipe->bufs[idx].ops,
358                         pipe->bufs[idx].page,
359                         pipe->bufs[idx].offset,
360                         pipe->bufs[idx].len);
361         WARN_ON(1);
362         return false;
363 }
364 #else
365 #define sanity(i) true
366 #endif
367
368 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
369                          struct iov_iter *i)
370 {
371         struct pipe_inode_info *pipe = i->pipe;
372         struct pipe_buffer *buf;
373         unsigned int p_tail = pipe->tail;
374         unsigned int p_mask = pipe->ring_size - 1;
375         unsigned int i_head = i->head;
376         size_t off;
377
378         if (unlikely(bytes > i->count))
379                 bytes = i->count;
380
381         if (unlikely(!bytes))
382                 return 0;
383
384         if (!sanity(i))
385                 return 0;
386
387         off = i->iov_offset;
388         buf = &pipe->bufs[i_head & p_mask];
389         if (off) {
390                 if (offset == off && buf->page == page) {
391                         /* merge with the last one */
392                         buf->len += bytes;
393                         i->iov_offset += bytes;
394                         goto out;
395                 }
396                 i_head++;
397                 buf = &pipe->bufs[i_head & p_mask];
398         }
399         if (pipe_full(i_head, p_tail, pipe->max_usage))
400                 return 0;
401
402         buf->ops = &page_cache_pipe_buf_ops;
403         get_page(page);
404         buf->page = page;
405         buf->offset = offset;
406         buf->len = bytes;
407
408         pipe->head = i_head + 1;
409         i->iov_offset = offset + bytes;
410         i->head = i_head;
411 out:
412         i->count -= bytes;
413         return bytes;
414 }
415
416 /*
417  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
418  * bytes.  For each iovec, fault in each page that constitutes the iovec.
419  *
420  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
421  * because it is an invalid address).
422  */
423 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
424 {
425         size_t skip = i->iov_offset;
426         const struct iovec *iov;
427         int err;
428         struct iovec v;
429
430         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
431                 iterate_iovec(i, bytes, v, iov, skip, ({
432                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
433                         if (unlikely(err))
434                         return err;
435                 0;}))
436         }
437         return 0;
438 }
439 EXPORT_SYMBOL(iov_iter_fault_in_readable);
440
441 void iov_iter_init(struct iov_iter *i, unsigned int direction,
442                         const struct iovec *iov, unsigned long nr_segs,
443                         size_t count)
444 {
445         WARN_ON(direction & ~(READ | WRITE));
446         direction &= READ | WRITE;
447
448         /* It will get better.  Eventually... */
449         if (uaccess_kernel()) {
450                 i->type = ITER_KVEC | direction;
451                 i->kvec = (struct kvec *)iov;
452         } else {
453                 i->type = ITER_IOVEC | direction;
454                 i->iov = iov;
455         }
456         i->nr_segs = nr_segs;
457         i->iov_offset = 0;
458         i->count = count;
459 }
460 EXPORT_SYMBOL(iov_iter_init);
461
462 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
463 {
464         char *from = kmap_atomic(page);
465         memcpy(to, from + offset, len);
466         kunmap_atomic(from);
467 }
468
469 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
470 {
471         char *to = kmap_atomic(page);
472         memcpy(to + offset, from, len);
473         kunmap_atomic(to);
474 }
475
476 static void memzero_page(struct page *page, size_t offset, size_t len)
477 {
478         char *addr = kmap_atomic(page);
479         memset(addr + offset, 0, len);
480         kunmap_atomic(addr);
481 }
482
483 static inline bool allocated(struct pipe_buffer *buf)
484 {
485         return buf->ops == &default_pipe_buf_ops;
486 }
487
488 static inline void data_start(const struct iov_iter *i,
489                               unsigned int *iter_headp, size_t *offp)
490 {
491         unsigned int p_mask = i->pipe->ring_size - 1;
492         unsigned int iter_head = i->head;
493         size_t off = i->iov_offset;
494
495         if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
496                     off == PAGE_SIZE)) {
497                 iter_head++;
498                 off = 0;
499         }
500         *iter_headp = iter_head;
501         *offp = off;
502 }
503
504 static size_t push_pipe(struct iov_iter *i, size_t size,
505                         int *iter_headp, size_t *offp)
506 {
507         struct pipe_inode_info *pipe = i->pipe;
508         unsigned int p_tail = pipe->tail;
509         unsigned int p_mask = pipe->ring_size - 1;
510         unsigned int iter_head;
511         size_t off;
512         ssize_t left;
513
514         if (unlikely(size > i->count))
515                 size = i->count;
516         if (unlikely(!size))
517                 return 0;
518
519         left = size;
520         data_start(i, &iter_head, &off);
521         *iter_headp = iter_head;
522         *offp = off;
523         if (off) {
524                 left -= PAGE_SIZE - off;
525                 if (left <= 0) {
526                         pipe->bufs[iter_head & p_mask].len += size;
527                         return size;
528                 }
529                 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
530                 iter_head++;
531         }
532         while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
533                 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
534                 struct page *page = alloc_page(GFP_USER);
535                 if (!page)
536                         break;
537
538                 buf->ops = &default_pipe_buf_ops;
539                 buf->page = page;
540                 buf->offset = 0;
541                 buf->len = min_t(ssize_t, left, PAGE_SIZE);
542                 left -= buf->len;
543                 iter_head++;
544                 pipe->head = iter_head;
545
546                 if (left == 0)
547                         return size;
548         }
549         return size - left;
550 }
551
552 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
553                                 struct iov_iter *i)
554 {
555         struct pipe_inode_info *pipe = i->pipe;
556         unsigned int p_mask = pipe->ring_size - 1;
557         unsigned int i_head;
558         size_t n, off;
559
560         if (!sanity(i))
561                 return 0;
562
563         bytes = n = push_pipe(i, bytes, &i_head, &off);
564         if (unlikely(!n))
565                 return 0;
566         do {
567                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
568                 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
569                 i->head = i_head;
570                 i->iov_offset = off + chunk;
571                 n -= chunk;
572                 addr += chunk;
573                 off = 0;
574                 i_head++;
575         } while (n);
576         i->count -= bytes;
577         return bytes;
578 }
579
580 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
581                               __wsum sum, size_t off)
582 {
583         __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
584         return csum_block_add(sum, next, off);
585 }
586
587 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
588                                 __wsum *csum, struct iov_iter *i)
589 {
590         struct pipe_inode_info *pipe = i->pipe;
591         unsigned int p_mask = pipe->ring_size - 1;
592         unsigned int i_head;
593         size_t n, r;
594         size_t off = 0;
595         __wsum sum = *csum;
596
597         if (!sanity(i))
598                 return 0;
599
600         bytes = n = push_pipe(i, bytes, &i_head, &r);
601         if (unlikely(!n))
602                 return 0;
603         do {
604                 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
605                 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
606                 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
607                 kunmap_atomic(p);
608                 i->head = i_head;
609                 i->iov_offset = r + chunk;
610                 n -= chunk;
611                 off += chunk;
612                 addr += chunk;
613                 r = 0;
614                 i_head++;
615         } while (n);
616         i->count -= bytes;
617         *csum = sum;
618         return bytes;
619 }
620
621 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
622 {
623         const char *from = addr;
624         if (unlikely(iov_iter_is_pipe(i)))
625                 return copy_pipe_to_iter(addr, bytes, i);
626         if (iter_is_iovec(i))
627                 might_fault();
628         iterate_and_advance(i, bytes, v,
629                 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
630                 memcpy_to_page(v.bv_page, v.bv_offset,
631                                (from += v.bv_len) - v.bv_len, v.bv_len),
632                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
633         )
634
635         return bytes;
636 }
637 EXPORT_SYMBOL(_copy_to_iter);
638
639 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
640 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
641 {
642         if (access_ok(to, n)) {
643                 instrument_copy_to_user(to, from, n);
644                 n = copy_to_user_mcsafe((__force void *) to, from, n);
645         }
646         return n;
647 }
648
649 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
650                 const char *from, size_t len)
651 {
652         unsigned long ret;
653         char *to;
654
655         to = kmap_atomic(page);
656         ret = memcpy_mcsafe(to + offset, from, len);
657         kunmap_atomic(to);
658
659         return ret;
660 }
661
662 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
663                                 struct iov_iter *i)
664 {
665         struct pipe_inode_info *pipe = i->pipe;
666         unsigned int p_mask = pipe->ring_size - 1;
667         unsigned int i_head;
668         size_t n, off, xfer = 0;
669
670         if (!sanity(i))
671                 return 0;
672
673         bytes = n = push_pipe(i, bytes, &i_head, &off);
674         if (unlikely(!n))
675                 return 0;
676         do {
677                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
678                 unsigned long rem;
679
680                 rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
681                                             off, addr, chunk);
682                 i->head = i_head;
683                 i->iov_offset = off + chunk - rem;
684                 xfer += chunk - rem;
685                 if (rem)
686                         break;
687                 n -= chunk;
688                 addr += chunk;
689                 off = 0;
690                 i_head++;
691         } while (n);
692         i->count -= xfer;
693         return xfer;
694 }
695
696 /**
697  * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
698  * @addr: source kernel address
699  * @bytes: total transfer length
700  * @iter: destination iterator
701  *
702  * The pmem driver arranges for filesystem-dax to use this facility via
703  * dax_copy_to_iter() for protecting read/write to persistent memory.
704  * Unless / until an architecture can guarantee identical performance
705  * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
706  * performance regression to switch more users to the mcsafe version.
707  *
708  * Otherwise, the main differences between this and typical _copy_to_iter().
709  *
710  * * Typical tail/residue handling after a fault retries the copy
711  *   byte-by-byte until the fault happens again. Re-triggering machine
712  *   checks is potentially fatal so the implementation uses source
713  *   alignment and poison alignment assumptions to avoid re-triggering
714  *   hardware exceptions.
715  *
716  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
717  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
718  *   a short copy.
719  *
720  * See MCSAFE_TEST for self-test.
721  */
722 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
723 {
724         const char *from = addr;
725         unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
726
727         if (unlikely(iov_iter_is_pipe(i)))
728                 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
729         if (iter_is_iovec(i))
730                 might_fault();
731         iterate_and_advance(i, bytes, v,
732                 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
733                 ({
734                 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
735                                (from += v.bv_len) - v.bv_len, v.bv_len);
736                 if (rem) {
737                         curr_addr = (unsigned long) from;
738                         bytes = curr_addr - s_addr - rem;
739                         return bytes;
740                 }
741                 }),
742                 ({
743                 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
744                                 v.iov_len);
745                 if (rem) {
746                         curr_addr = (unsigned long) from;
747                         bytes = curr_addr - s_addr - rem;
748                         return bytes;
749                 }
750                 })
751         )
752
753         return bytes;
754 }
755 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
756 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
757
758 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
759 {
760         char *to = addr;
761         if (unlikely(iov_iter_is_pipe(i))) {
762                 WARN_ON(1);
763                 return 0;
764         }
765         if (iter_is_iovec(i))
766                 might_fault();
767         iterate_and_advance(i, bytes, v,
768                 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
769                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
770                                  v.bv_offset, v.bv_len),
771                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
772         )
773
774         return bytes;
775 }
776 EXPORT_SYMBOL(_copy_from_iter);
777
778 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
779 {
780         char *to = addr;
781         if (unlikely(iov_iter_is_pipe(i))) {
782                 WARN_ON(1);
783                 return false;
784         }
785         if (unlikely(i->count < bytes))
786                 return false;
787
788         if (iter_is_iovec(i))
789                 might_fault();
790         iterate_all_kinds(i, bytes, v, ({
791                 if (copyin((to += v.iov_len) - v.iov_len,
792                                       v.iov_base, v.iov_len))
793                         return false;
794                 0;}),
795                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
796                                  v.bv_offset, v.bv_len),
797                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
798         )
799
800         iov_iter_advance(i, bytes);
801         return true;
802 }
803 EXPORT_SYMBOL(_copy_from_iter_full);
804
805 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
806 {
807         char *to = addr;
808         if (unlikely(iov_iter_is_pipe(i))) {
809                 WARN_ON(1);
810                 return 0;
811         }
812         iterate_and_advance(i, bytes, v,
813                 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
814                                          v.iov_base, v.iov_len),
815                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
816                                  v.bv_offset, v.bv_len),
817                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
818         )
819
820         return bytes;
821 }
822 EXPORT_SYMBOL(_copy_from_iter_nocache);
823
824 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
825 /**
826  * _copy_from_iter_flushcache - write destination through cpu cache
827  * @addr: destination kernel address
828  * @bytes: total transfer length
829  * @iter: source iterator
830  *
831  * The pmem driver arranges for filesystem-dax to use this facility via
832  * dax_copy_from_iter() for ensuring that writes to persistent memory
833  * are flushed through the CPU cache. It is differentiated from
834  * _copy_from_iter_nocache() in that guarantees all data is flushed for
835  * all iterator types. The _copy_from_iter_nocache() only attempts to
836  * bypass the cache for the ITER_IOVEC case, and on some archs may use
837  * instructions that strand dirty-data in the cache.
838  */
839 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
840 {
841         char *to = addr;
842         if (unlikely(iov_iter_is_pipe(i))) {
843                 WARN_ON(1);
844                 return 0;
845         }
846         iterate_and_advance(i, bytes, v,
847                 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
848                                          v.iov_base, v.iov_len),
849                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
850                                  v.bv_offset, v.bv_len),
851                 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
852                         v.iov_len)
853         )
854
855         return bytes;
856 }
857 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
858 #endif
859
860 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
861 {
862         char *to = addr;
863         if (unlikely(iov_iter_is_pipe(i))) {
864                 WARN_ON(1);
865                 return false;
866         }
867         if (unlikely(i->count < bytes))
868                 return false;
869         iterate_all_kinds(i, bytes, v, ({
870                 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
871                                              v.iov_base, v.iov_len))
872                         return false;
873                 0;}),
874                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
875                                  v.bv_offset, v.bv_len),
876                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
877         )
878
879         iov_iter_advance(i, bytes);
880         return true;
881 }
882 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
883
884 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
885 {
886         struct page *head;
887         size_t v = n + offset;
888
889         /*
890          * The general case needs to access the page order in order
891          * to compute the page size.
892          * However, we mostly deal with order-0 pages and thus can
893          * avoid a possible cache line miss for requests that fit all
894          * page orders.
895          */
896         if (n <= v && v <= PAGE_SIZE)
897                 return true;
898
899         head = compound_head(page);
900         v += (page - head) << PAGE_SHIFT;
901
902         if (likely(n <= v && v <= (page_size(head))))
903                 return true;
904         WARN_ON(1);
905         return false;
906 }
907
908 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
909                          struct iov_iter *i)
910 {
911         if (unlikely(!page_copy_sane(page, offset, bytes)))
912                 return 0;
913         if (i->type & (ITER_BVEC|ITER_KVEC)) {
914                 void *kaddr = kmap_atomic(page);
915                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
916                 kunmap_atomic(kaddr);
917                 return wanted;
918         } else if (unlikely(iov_iter_is_discard(i)))
919                 return bytes;
920         else if (likely(!iov_iter_is_pipe(i)))
921                 return copy_page_to_iter_iovec(page, offset, bytes, i);
922         else
923                 return copy_page_to_iter_pipe(page, offset, bytes, i);
924 }
925 EXPORT_SYMBOL(copy_page_to_iter);
926
927 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
928                          struct iov_iter *i)
929 {
930         if (unlikely(!page_copy_sane(page, offset, bytes)))
931                 return 0;
932         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
933                 WARN_ON(1);
934                 return 0;
935         }
936         if (i->type & (ITER_BVEC|ITER_KVEC)) {
937                 void *kaddr = kmap_atomic(page);
938                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
939                 kunmap_atomic(kaddr);
940                 return wanted;
941         } else
942                 return copy_page_from_iter_iovec(page, offset, bytes, i);
943 }
944 EXPORT_SYMBOL(copy_page_from_iter);
945
946 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
947 {
948         struct pipe_inode_info *pipe = i->pipe;
949         unsigned int p_mask = pipe->ring_size - 1;
950         unsigned int i_head;
951         size_t n, off;
952
953         if (!sanity(i))
954                 return 0;
955
956         bytes = n = push_pipe(i, bytes, &i_head, &off);
957         if (unlikely(!n))
958                 return 0;
959
960         do {
961                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
962                 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
963                 i->head = i_head;
964                 i->iov_offset = off + chunk;
965                 n -= chunk;
966                 off = 0;
967                 i_head++;
968         } while (n);
969         i->count -= bytes;
970         return bytes;
971 }
972
973 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
974 {
975         if (unlikely(iov_iter_is_pipe(i)))
976                 return pipe_zero(bytes, i);
977         iterate_and_advance(i, bytes, v,
978                 clear_user(v.iov_base, v.iov_len),
979                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
980                 memset(v.iov_base, 0, v.iov_len)
981         )
982
983         return bytes;
984 }
985 EXPORT_SYMBOL(iov_iter_zero);
986
987 size_t iov_iter_copy_from_user_atomic(struct page *page,
988                 struct iov_iter *i, unsigned long offset, size_t bytes)
989 {
990         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
991         if (unlikely(!page_copy_sane(page, offset, bytes))) {
992                 kunmap_atomic(kaddr);
993                 return 0;
994         }
995         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
996                 kunmap_atomic(kaddr);
997                 WARN_ON(1);
998                 return 0;
999         }
1000         iterate_all_kinds(i, bytes, v,
1001                 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1002                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1003                                  v.bv_offset, v.bv_len),
1004                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1005         )
1006         kunmap_atomic(kaddr);
1007         return bytes;
1008 }
1009 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1010
1011 static inline void pipe_truncate(struct iov_iter *i)
1012 {
1013         struct pipe_inode_info *pipe = i->pipe;
1014         unsigned int p_tail = pipe->tail;
1015         unsigned int p_head = pipe->head;
1016         unsigned int p_mask = pipe->ring_size - 1;
1017
1018         if (!pipe_empty(p_head, p_tail)) {
1019                 struct pipe_buffer *buf;
1020                 unsigned int i_head = i->head;
1021                 size_t off = i->iov_offset;
1022
1023                 if (off) {
1024                         buf = &pipe->bufs[i_head & p_mask];
1025                         buf->len = off - buf->offset;
1026                         i_head++;
1027                 }
1028                 while (p_head != i_head) {
1029                         p_head--;
1030                         pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1031                 }
1032
1033                 pipe->head = p_head;
1034         }
1035 }
1036
1037 static void pipe_advance(struct iov_iter *i, size_t size)
1038 {
1039         struct pipe_inode_info *pipe = i->pipe;
1040         if (unlikely(i->count < size))
1041                 size = i->count;
1042         if (size) {
1043                 struct pipe_buffer *buf;
1044                 unsigned int p_mask = pipe->ring_size - 1;
1045                 unsigned int i_head = i->head;
1046                 size_t off = i->iov_offset, left = size;
1047
1048                 if (off) /* make it relative to the beginning of buffer */
1049                         left += off - pipe->bufs[i_head & p_mask].offset;
1050                 while (1) {
1051                         buf = &pipe->bufs[i_head & p_mask];
1052                         if (left <= buf->len)
1053                                 break;
1054                         left -= buf->len;
1055                         i_head++;
1056                 }
1057                 i->head = i_head;
1058                 i->iov_offset = buf->offset + left;
1059         }
1060         i->count -= size;
1061         /* ... and discard everything past that point */
1062         pipe_truncate(i);
1063 }
1064
1065 void iov_iter_advance(struct iov_iter *i, size_t size)
1066 {
1067         if (unlikely(iov_iter_is_pipe(i))) {
1068                 pipe_advance(i, size);
1069                 return;
1070         }
1071         if (unlikely(iov_iter_is_discard(i))) {
1072                 i->count -= size;
1073                 return;
1074         }
1075         iterate_and_advance(i, size, v, 0, 0, 0)
1076 }
1077 EXPORT_SYMBOL(iov_iter_advance);
1078
1079 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1080 {
1081         if (!unroll)
1082                 return;
1083         if (WARN_ON(unroll > MAX_RW_COUNT))
1084                 return;
1085         i->count += unroll;
1086         if (unlikely(iov_iter_is_pipe(i))) {
1087                 struct pipe_inode_info *pipe = i->pipe;
1088                 unsigned int p_mask = pipe->ring_size - 1;
1089                 unsigned int i_head = i->head;
1090                 size_t off = i->iov_offset;
1091                 while (1) {
1092                         struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1093                         size_t n = off - b->offset;
1094                         if (unroll < n) {
1095                                 off -= unroll;
1096                                 break;
1097                         }
1098                         unroll -= n;
1099                         if (!unroll && i_head == i->start_head) {
1100                                 off = 0;
1101                                 break;
1102                         }
1103                         i_head--;
1104                         b = &pipe->bufs[i_head & p_mask];
1105                         off = b->offset + b->len;
1106                 }
1107                 i->iov_offset = off;
1108                 i->head = i_head;
1109                 pipe_truncate(i);
1110                 return;
1111         }
1112         if (unlikely(iov_iter_is_discard(i)))
1113                 return;
1114         if (unroll <= i->iov_offset) {
1115                 i->iov_offset -= unroll;
1116                 return;
1117         }
1118         unroll -= i->iov_offset;
1119         if (iov_iter_is_bvec(i)) {
1120                 const struct bio_vec *bvec = i->bvec;
1121                 while (1) {
1122                         size_t n = (--bvec)->bv_len;
1123                         i->nr_segs++;
1124                         if (unroll <= n) {
1125                                 i->bvec = bvec;
1126                                 i->iov_offset = n - unroll;
1127                                 return;
1128                         }
1129                         unroll -= n;
1130                 }
1131         } else { /* same logics for iovec and kvec */
1132                 const struct iovec *iov = i->iov;
1133                 while (1) {
1134                         size_t n = (--iov)->iov_len;
1135                         i->nr_segs++;
1136                         if (unroll <= n) {
1137                                 i->iov = iov;
1138                                 i->iov_offset = n - unroll;
1139                                 return;
1140                         }
1141                         unroll -= n;
1142                 }
1143         }
1144 }
1145 EXPORT_SYMBOL(iov_iter_revert);
1146
1147 /*
1148  * Return the count of just the current iov_iter segment.
1149  */
1150 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1151 {
1152         if (unlikely(iov_iter_is_pipe(i)))
1153                 return i->count;        // it is a silly place, anyway
1154         if (i->nr_segs == 1)
1155                 return i->count;
1156         if (unlikely(iov_iter_is_discard(i)))
1157                 return i->count;
1158         else if (iov_iter_is_bvec(i))
1159                 return min(i->count, i->bvec->bv_len - i->iov_offset);
1160         else
1161                 return min(i->count, i->iov->iov_len - i->iov_offset);
1162 }
1163 EXPORT_SYMBOL(iov_iter_single_seg_count);
1164
1165 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1166                         const struct kvec *kvec, unsigned long nr_segs,
1167                         size_t count)
1168 {
1169         WARN_ON(direction & ~(READ | WRITE));
1170         i->type = ITER_KVEC | (direction & (READ | WRITE));
1171         i->kvec = kvec;
1172         i->nr_segs = nr_segs;
1173         i->iov_offset = 0;
1174         i->count = count;
1175 }
1176 EXPORT_SYMBOL(iov_iter_kvec);
1177
1178 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1179                         const struct bio_vec *bvec, unsigned long nr_segs,
1180                         size_t count)
1181 {
1182         WARN_ON(direction & ~(READ | WRITE));
1183         i->type = ITER_BVEC | (direction & (READ | WRITE));
1184         i->bvec = bvec;
1185         i->nr_segs = nr_segs;
1186         i->iov_offset = 0;
1187         i->count = count;
1188 }
1189 EXPORT_SYMBOL(iov_iter_bvec);
1190
1191 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1192                         struct pipe_inode_info *pipe,
1193                         size_t count)
1194 {
1195         BUG_ON(direction != READ);
1196         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1197         i->type = ITER_PIPE | READ;
1198         i->pipe = pipe;
1199         i->head = pipe->head;
1200         i->iov_offset = 0;
1201         i->count = count;
1202         i->start_head = i->head;
1203 }
1204 EXPORT_SYMBOL(iov_iter_pipe);
1205
1206 /**
1207  * iov_iter_discard - Initialise an I/O iterator that discards data
1208  * @i: The iterator to initialise.
1209  * @direction: The direction of the transfer.
1210  * @count: The size of the I/O buffer in bytes.
1211  *
1212  * Set up an I/O iterator that just discards everything that's written to it.
1213  * It's only available as a READ iterator.
1214  */
1215 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1216 {
1217         BUG_ON(direction != READ);
1218         i->type = ITER_DISCARD | READ;
1219         i->count = count;
1220         i->iov_offset = 0;
1221 }
1222 EXPORT_SYMBOL(iov_iter_discard);
1223
1224 unsigned long iov_iter_alignment(const struct iov_iter *i)
1225 {
1226         unsigned long res = 0;
1227         size_t size = i->count;
1228
1229         if (unlikely(iov_iter_is_pipe(i))) {
1230                 unsigned int p_mask = i->pipe->ring_size - 1;
1231
1232                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1233                         return size | i->iov_offset;
1234                 return size;
1235         }
1236         iterate_all_kinds(i, size, v,
1237                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1238                 res |= v.bv_offset | v.bv_len,
1239                 res |= (unsigned long)v.iov_base | v.iov_len
1240         )
1241         return res;
1242 }
1243 EXPORT_SYMBOL(iov_iter_alignment);
1244
1245 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1246 {
1247         unsigned long res = 0;
1248         size_t size = i->count;
1249
1250         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1251                 WARN_ON(1);
1252                 return ~0U;
1253         }
1254
1255         iterate_all_kinds(i, size, v,
1256                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1257                         (size != v.iov_len ? size : 0), 0),
1258                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1259                         (size != v.bv_len ? size : 0)),
1260                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1261                         (size != v.iov_len ? size : 0))
1262                 );
1263         return res;
1264 }
1265 EXPORT_SYMBOL(iov_iter_gap_alignment);
1266
1267 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1268                                 size_t maxsize,
1269                                 struct page **pages,
1270                                 int iter_head,
1271                                 size_t *start)
1272 {
1273         struct pipe_inode_info *pipe = i->pipe;
1274         unsigned int p_mask = pipe->ring_size - 1;
1275         ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1276         if (!n)
1277                 return -EFAULT;
1278
1279         maxsize = n;
1280         n += *start;
1281         while (n > 0) {
1282                 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1283                 iter_head++;
1284                 n -= PAGE_SIZE;
1285         }
1286
1287         return maxsize;
1288 }
1289
1290 static ssize_t pipe_get_pages(struct iov_iter *i,
1291                    struct page **pages, size_t maxsize, unsigned maxpages,
1292                    size_t *start)
1293 {
1294         unsigned int iter_head, npages;
1295         size_t capacity;
1296
1297         if (!maxsize)
1298                 return 0;
1299
1300         if (!sanity(i))
1301                 return -EFAULT;
1302
1303         data_start(i, &iter_head, start);
1304         /* Amount of free space: some of this one + all after this one */
1305         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1306         capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1307
1308         return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1309 }
1310
1311 ssize_t iov_iter_get_pages(struct iov_iter *i,
1312                    struct page **pages, size_t maxsize, unsigned maxpages,
1313                    size_t *start)
1314 {
1315         if (maxsize > i->count)
1316                 maxsize = i->count;
1317
1318         if (unlikely(iov_iter_is_pipe(i)))
1319                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1320         if (unlikely(iov_iter_is_discard(i)))
1321                 return -EFAULT;
1322
1323         iterate_all_kinds(i, maxsize, v, ({
1324                 unsigned long addr = (unsigned long)v.iov_base;
1325                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1326                 int n;
1327                 int res;
1328
1329                 if (len > maxpages * PAGE_SIZE)
1330                         len = maxpages * PAGE_SIZE;
1331                 addr &= ~(PAGE_SIZE - 1);
1332                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1333                 res = get_user_pages_fast(addr, n,
1334                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
1335                                 pages);
1336                 if (unlikely(res < 0))
1337                         return res;
1338                 return (res == n ? len : res * PAGE_SIZE) - *start;
1339         0;}),({
1340                 /* can't be more than PAGE_SIZE */
1341                 *start = v.bv_offset;
1342                 get_page(*pages = v.bv_page);
1343                 return v.bv_len;
1344         }),({
1345                 return -EFAULT;
1346         })
1347         )
1348         return 0;
1349 }
1350 EXPORT_SYMBOL(iov_iter_get_pages);
1351
1352 static struct page **get_pages_array(size_t n)
1353 {
1354         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1355 }
1356
1357 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1358                    struct page ***pages, size_t maxsize,
1359                    size_t *start)
1360 {
1361         struct page **p;
1362         unsigned int iter_head, npages;
1363         ssize_t n;
1364
1365         if (!maxsize)
1366                 return 0;
1367
1368         if (!sanity(i))
1369                 return -EFAULT;
1370
1371         data_start(i, &iter_head, start);
1372         /* Amount of free space: some of this one + all after this one */
1373         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1374         n = npages * PAGE_SIZE - *start;
1375         if (maxsize > n)
1376                 maxsize = n;
1377         else
1378                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1379         p = get_pages_array(npages);
1380         if (!p)
1381                 return -ENOMEM;
1382         n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1383         if (n > 0)
1384                 *pages = p;
1385         else
1386                 kvfree(p);
1387         return n;
1388 }
1389
1390 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1391                    struct page ***pages, size_t maxsize,
1392                    size_t *start)
1393 {
1394         struct page **p;
1395
1396         if (maxsize > i->count)
1397                 maxsize = i->count;
1398
1399         if (unlikely(iov_iter_is_pipe(i)))
1400                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1401         if (unlikely(iov_iter_is_discard(i)))
1402                 return -EFAULT;
1403
1404         iterate_all_kinds(i, maxsize, v, ({
1405                 unsigned long addr = (unsigned long)v.iov_base;
1406                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1407                 int n;
1408                 int res;
1409
1410                 addr &= ~(PAGE_SIZE - 1);
1411                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1412                 p = get_pages_array(n);
1413                 if (!p)
1414                         return -ENOMEM;
1415                 res = get_user_pages_fast(addr, n,
1416                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1417                 if (unlikely(res < 0)) {
1418                         kvfree(p);
1419                         return res;
1420                 }
1421                 *pages = p;
1422                 return (res == n ? len : res * PAGE_SIZE) - *start;
1423         0;}),({
1424                 /* can't be more than PAGE_SIZE */
1425                 *start = v.bv_offset;
1426                 *pages = p = get_pages_array(1);
1427                 if (!p)
1428                         return -ENOMEM;
1429                 get_page(*p = v.bv_page);
1430                 return v.bv_len;
1431         }),({
1432                 return -EFAULT;
1433         })
1434         )
1435         return 0;
1436 }
1437 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1438
1439 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1440                                struct iov_iter *i)
1441 {
1442         char *to = addr;
1443         __wsum sum, next;
1444         size_t off = 0;
1445         sum = *csum;
1446         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1447                 WARN_ON(1);
1448                 return 0;
1449         }
1450         iterate_and_advance(i, bytes, v, ({
1451                 int err = 0;
1452                 next = csum_and_copy_from_user(v.iov_base,
1453                                                (to += v.iov_len) - v.iov_len,
1454                                                v.iov_len, 0, &err);
1455                 if (!err) {
1456                         sum = csum_block_add(sum, next, off);
1457                         off += v.iov_len;
1458                 }
1459                 err ? v.iov_len : 0;
1460         }), ({
1461                 char *p = kmap_atomic(v.bv_page);
1462                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1463                                       p + v.bv_offset, v.bv_len,
1464                                       sum, off);
1465                 kunmap_atomic(p);
1466                 off += v.bv_len;
1467         }),({
1468                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1469                                       v.iov_base, v.iov_len,
1470                                       sum, off);
1471                 off += v.iov_len;
1472         })
1473         )
1474         *csum = sum;
1475         return bytes;
1476 }
1477 EXPORT_SYMBOL(csum_and_copy_from_iter);
1478
1479 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1480                                struct iov_iter *i)
1481 {
1482         char *to = addr;
1483         __wsum sum, next;
1484         size_t off = 0;
1485         sum = *csum;
1486         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1487                 WARN_ON(1);
1488                 return false;
1489         }
1490         if (unlikely(i->count < bytes))
1491                 return false;
1492         iterate_all_kinds(i, bytes, v, ({
1493                 int err = 0;
1494                 next = csum_and_copy_from_user(v.iov_base,
1495                                                (to += v.iov_len) - v.iov_len,
1496                                                v.iov_len, 0, &err);
1497                 if (err)
1498                         return false;
1499                 sum = csum_block_add(sum, next, off);
1500                 off += v.iov_len;
1501                 0;
1502         }), ({
1503                 char *p = kmap_atomic(v.bv_page);
1504                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1505                                       p + v.bv_offset, v.bv_len,
1506                                       sum, off);
1507                 kunmap_atomic(p);
1508                 off += v.bv_len;
1509         }),({
1510                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1511                                       v.iov_base, v.iov_len,
1512                                       sum, off);
1513                 off += v.iov_len;
1514         })
1515         )
1516         *csum = sum;
1517         iov_iter_advance(i, bytes);
1518         return true;
1519 }
1520 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1521
1522 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1523                              struct iov_iter *i)
1524 {
1525         const char *from = addr;
1526         __wsum *csum = csump;
1527         __wsum sum, next;
1528         size_t off = 0;
1529
1530         if (unlikely(iov_iter_is_pipe(i)))
1531                 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1532
1533         sum = *csum;
1534         if (unlikely(iov_iter_is_discard(i))) {
1535                 WARN_ON(1);     /* for now */
1536                 return 0;
1537         }
1538         iterate_and_advance(i, bytes, v, ({
1539                 int err = 0;
1540                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1541                                              v.iov_base,
1542                                              v.iov_len, 0, &err);
1543                 if (!err) {
1544                         sum = csum_block_add(sum, next, off);
1545                         off += v.iov_len;
1546                 }
1547                 err ? v.iov_len : 0;
1548         }), ({
1549                 char *p = kmap_atomic(v.bv_page);
1550                 sum = csum_and_memcpy(p + v.bv_offset,
1551                                       (from += v.bv_len) - v.bv_len,
1552                                       v.bv_len, sum, off);
1553                 kunmap_atomic(p);
1554                 off += v.bv_len;
1555         }),({
1556                 sum = csum_and_memcpy(v.iov_base,
1557                                      (from += v.iov_len) - v.iov_len,
1558                                      v.iov_len, sum, off);
1559                 off += v.iov_len;
1560         })
1561         )
1562         *csum = sum;
1563         return bytes;
1564 }
1565 EXPORT_SYMBOL(csum_and_copy_to_iter);
1566
1567 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1568                 struct iov_iter *i)
1569 {
1570 #ifdef CONFIG_CRYPTO
1571         struct ahash_request *hash = hashp;
1572         struct scatterlist sg;
1573         size_t copied;
1574
1575         copied = copy_to_iter(addr, bytes, i);
1576         sg_init_one(&sg, addr, copied);
1577         ahash_request_set_crypt(hash, &sg, NULL, copied);
1578         crypto_ahash_update(hash);
1579         return copied;
1580 #else
1581         return 0;
1582 #endif
1583 }
1584 EXPORT_SYMBOL(hash_and_copy_to_iter);
1585
1586 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1587 {
1588         size_t size = i->count;
1589         int npages = 0;
1590
1591         if (!size)
1592                 return 0;
1593         if (unlikely(iov_iter_is_discard(i)))
1594                 return 0;
1595
1596         if (unlikely(iov_iter_is_pipe(i))) {
1597                 struct pipe_inode_info *pipe = i->pipe;
1598                 unsigned int iter_head;
1599                 size_t off;
1600
1601                 if (!sanity(i))
1602                         return 0;
1603
1604                 data_start(i, &iter_head, &off);
1605                 /* some of this one + all after this one */
1606                 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1607                 if (npages >= maxpages)
1608                         return maxpages;
1609         } else iterate_all_kinds(i, size, v, ({
1610                 unsigned long p = (unsigned long)v.iov_base;
1611                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1612                         - p / PAGE_SIZE;
1613                 if (npages >= maxpages)
1614                         return maxpages;
1615         0;}),({
1616                 npages++;
1617                 if (npages >= maxpages)
1618                         return maxpages;
1619         }),({
1620                 unsigned long p = (unsigned long)v.iov_base;
1621                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1622                         - p / PAGE_SIZE;
1623                 if (npages >= maxpages)
1624                         return maxpages;
1625         })
1626         )
1627         return npages;
1628 }
1629 EXPORT_SYMBOL(iov_iter_npages);
1630
1631 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1632 {
1633         *new = *old;
1634         if (unlikely(iov_iter_is_pipe(new))) {
1635                 WARN_ON(1);
1636                 return NULL;
1637         }
1638         if (unlikely(iov_iter_is_discard(new)))
1639                 return NULL;
1640         if (iov_iter_is_bvec(new))
1641                 return new->bvec = kmemdup(new->bvec,
1642                                     new->nr_segs * sizeof(struct bio_vec),
1643                                     flags);
1644         else
1645                 /* iovec and kvec have identical layout */
1646                 return new->iov = kmemdup(new->iov,
1647                                    new->nr_segs * sizeof(struct iovec),
1648                                    flags);
1649 }
1650 EXPORT_SYMBOL(dup_iter);
1651
1652 /**
1653  * import_iovec() - Copy an array of &struct iovec from userspace
1654  *     into the kernel, check that it is valid, and initialize a new
1655  *     &struct iov_iter iterator to access it.
1656  *
1657  * @type: One of %READ or %WRITE.
1658  * @uvector: Pointer to the userspace array.
1659  * @nr_segs: Number of elements in userspace array.
1660  * @fast_segs: Number of elements in @iov.
1661  * @iov: (input and output parameter) Pointer to pointer to (usually small
1662  *     on-stack) kernel array.
1663  * @i: Pointer to iterator that will be initialized on success.
1664  *
1665  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1666  * then this function places %NULL in *@iov on return. Otherwise, a new
1667  * array will be allocated and the result placed in *@iov. This means that
1668  * the caller may call kfree() on *@iov regardless of whether the small
1669  * on-stack array was used or not (and regardless of whether this function
1670  * returns an error or not).
1671  *
1672  * Return: Negative error code on error, bytes imported on success
1673  */
1674 ssize_t import_iovec(int type, const struct iovec __user * uvector,
1675                  unsigned nr_segs, unsigned fast_segs,
1676                  struct iovec **iov, struct iov_iter *i)
1677 {
1678         ssize_t n;
1679         struct iovec *p;
1680         n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1681                                   *iov, &p);
1682         if (n < 0) {
1683                 if (p != *iov)
1684                         kfree(p);
1685                 *iov = NULL;
1686                 return n;
1687         }
1688         iov_iter_init(i, type, p, nr_segs, n);
1689         *iov = p == *iov ? NULL : p;
1690         return n;
1691 }
1692 EXPORT_SYMBOL(import_iovec);
1693
1694 #ifdef CONFIG_COMPAT
1695 #include <linux/compat.h>
1696
1697 ssize_t compat_import_iovec(int type,
1698                 const struct compat_iovec __user * uvector,
1699                 unsigned nr_segs, unsigned fast_segs,
1700                 struct iovec **iov, struct iov_iter *i)
1701 {
1702         ssize_t n;
1703         struct iovec *p;
1704         n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1705                                   *iov, &p);
1706         if (n < 0) {
1707                 if (p != *iov)
1708                         kfree(p);
1709                 *iov = NULL;
1710                 return n;
1711         }
1712         iov_iter_init(i, type, p, nr_segs, n);
1713         *iov = p == *iov ? NULL : p;
1714         return n;
1715 }
1716 EXPORT_SYMBOL(compat_import_iovec);
1717 #endif
1718
1719 int import_single_range(int rw, void __user *buf, size_t len,
1720                  struct iovec *iov, struct iov_iter *i)
1721 {
1722         if (len > MAX_RW_COUNT)
1723                 len = MAX_RW_COUNT;
1724         if (unlikely(!access_ok(buf, len)))
1725                 return -EFAULT;
1726
1727         iov->iov_base = buf;
1728         iov->iov_len = len;
1729         iov_iter_init(i, rw, iov, 1, len);
1730         return 0;
1731 }
1732 EXPORT_SYMBOL(import_single_range);
1733
1734 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1735                             int (*f)(struct kvec *vec, void *context),
1736                             void *context)
1737 {
1738         struct kvec w;
1739         int err = -EINVAL;
1740         if (!bytes)
1741                 return 0;
1742
1743         iterate_all_kinds(i, bytes, v, -EINVAL, ({
1744                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1745                 w.iov_len = v.bv_len;
1746                 err = f(&w, context);
1747                 kunmap(v.bv_page);
1748                 err;}), ({
1749                 w = v;
1750                 err = f(&w, context);})
1751         )
1752         return err;
1753 }
1754 EXPORT_SYMBOL(iov_iter_for_each_range);