Merge tag 'iomap-5.13-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16
17 #define PIPE_PARANOIA /* for now */
18
19 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
20         size_t left;                                    \
21         size_t wanted = n;                              \
22         __p = i->iov;                                   \
23         __v.iov_len = min(n, __p->iov_len - skip);      \
24         if (likely(__v.iov_len)) {                      \
25                 __v.iov_base = __p->iov_base + skip;    \
26                 left = (STEP);                          \
27                 __v.iov_len -= left;                    \
28                 skip += __v.iov_len;                    \
29                 n -= __v.iov_len;                       \
30         } else {                                        \
31                 left = 0;                               \
32         }                                               \
33         while (unlikely(!left && n)) {                  \
34                 __p++;                                  \
35                 __v.iov_len = min(n, __p->iov_len);     \
36                 if (unlikely(!__v.iov_len))             \
37                         continue;                       \
38                 __v.iov_base = __p->iov_base;           \
39                 left = (STEP);                          \
40                 __v.iov_len -= left;                    \
41                 skip = __v.iov_len;                     \
42                 n -= __v.iov_len;                       \
43         }                                               \
44         n = wanted - n;                                 \
45 }
46
47 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
48         size_t wanted = n;                              \
49         __p = i->kvec;                                  \
50         __v.iov_len = min(n, __p->iov_len - skip);      \
51         if (likely(__v.iov_len)) {                      \
52                 __v.iov_base = __p->iov_base + skip;    \
53                 (void)(STEP);                           \
54                 skip += __v.iov_len;                    \
55                 n -= __v.iov_len;                       \
56         }                                               \
57         while (unlikely(n)) {                           \
58                 __p++;                                  \
59                 __v.iov_len = min(n, __p->iov_len);     \
60                 if (unlikely(!__v.iov_len))             \
61                         continue;                       \
62                 __v.iov_base = __p->iov_base;           \
63                 (void)(STEP);                           \
64                 skip = __v.iov_len;                     \
65                 n -= __v.iov_len;                       \
66         }                                               \
67         n = wanted;                                     \
68 }
69
70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
71         struct bvec_iter __start;                       \
72         __start.bi_size = n;                            \
73         __start.bi_bvec_done = skip;                    \
74         __start.bi_idx = 0;                             \
75         for_each_bvec(__v, i->bvec, __bi, __start) {    \
76                 (void)(STEP);                           \
77         }                                               \
78 }
79
80 #define iterate_xarray(i, n, __v, skip, STEP) {         \
81         struct page *head = NULL;                               \
82         size_t wanted = n, seg, offset;                         \
83         loff_t start = i->xarray_start + skip;                  \
84         pgoff_t index = start >> PAGE_SHIFT;                    \
85         int j;                                                  \
86                                                                 \
87         XA_STATE(xas, i->xarray, index);                        \
88                                                                 \
89         rcu_read_lock();                                                \
90         xas_for_each(&xas, head, ULONG_MAX) {                           \
91                 if (xas_retry(&xas, head))                              \
92                         continue;                                       \
93                 if (WARN_ON(xa_is_value(head)))                         \
94                         break;                                          \
95                 if (WARN_ON(PageHuge(head)))                            \
96                         break;                                          \
97                 for (j = (head->index < index) ? index - head->index : 0; \
98                      j < thp_nr_pages(head); j++) {                     \
99                         __v.bv_page = head + j;                         \
100                         offset = (i->xarray_start + skip) & ~PAGE_MASK; \
101                         seg = PAGE_SIZE - offset;                       \
102                         __v.bv_offset = offset;                         \
103                         __v.bv_len = min(n, seg);                       \
104                         (void)(STEP);                                   \
105                         n -= __v.bv_len;                                \
106                         skip += __v.bv_len;                             \
107                         if (n == 0)                                     \
108                                 break;                                  \
109                 }                                                       \
110                 if (n == 0)                                             \
111                         break;                                          \
112         }                                                       \
113         rcu_read_unlock();                                      \
114         n = wanted - n;                                         \
115 }
116
117 #define iterate_all_kinds(i, n, v, I, B, K, X) {                \
118         if (likely(n)) {                                        \
119                 size_t skip = i->iov_offset;                    \
120                 if (unlikely(i->type & ITER_BVEC)) {            \
121                         struct bio_vec v;                       \
122                         struct bvec_iter __bi;                  \
123                         iterate_bvec(i, n, v, __bi, skip, (B))  \
124                 } else if (unlikely(i->type & ITER_KVEC)) {     \
125                         const struct kvec *kvec;                \
126                         struct kvec v;                          \
127                         iterate_kvec(i, n, v, kvec, skip, (K))  \
128                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
129                 } else if (unlikely(i->type & ITER_XARRAY)) {   \
130                         struct bio_vec v;                       \
131                         iterate_xarray(i, n, v, skip, (X));     \
132                 } else {                                        \
133                         const struct iovec *iov;                \
134                         struct iovec v;                         \
135                         iterate_iovec(i, n, v, iov, skip, (I))  \
136                 }                                               \
137         }                                                       \
138 }
139
140 #define iterate_and_advance(i, n, v, I, B, K, X) {              \
141         if (unlikely(i->count < n))                             \
142                 n = i->count;                                   \
143         if (i->count) {                                         \
144                 size_t skip = i->iov_offset;                    \
145                 if (unlikely(i->type & ITER_BVEC)) {            \
146                         const struct bio_vec *bvec = i->bvec;   \
147                         struct bio_vec v;                       \
148                         struct bvec_iter __bi;                  \
149                         iterate_bvec(i, n, v, __bi, skip, (B))  \
150                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
151                         i->nr_segs -= i->bvec - bvec;           \
152                         skip = __bi.bi_bvec_done;               \
153                 } else if (unlikely(i->type & ITER_KVEC)) {     \
154                         const struct kvec *kvec;                \
155                         struct kvec v;                          \
156                         iterate_kvec(i, n, v, kvec, skip, (K))  \
157                         if (skip == kvec->iov_len) {            \
158                                 kvec++;                         \
159                                 skip = 0;                       \
160                         }                                       \
161                         i->nr_segs -= kvec - i->kvec;           \
162                         i->kvec = kvec;                         \
163                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
164                         skip += n;                              \
165                 } else if (unlikely(i->type & ITER_XARRAY)) {   \
166                         struct bio_vec v;                       \
167                         iterate_xarray(i, n, v, skip, (X))      \
168                 } else {                                        \
169                         const struct iovec *iov;                \
170                         struct iovec v;                         \
171                         iterate_iovec(i, n, v, iov, skip, (I))  \
172                         if (skip == iov->iov_len) {             \
173                                 iov++;                          \
174                                 skip = 0;                       \
175                         }                                       \
176                         i->nr_segs -= iov - i->iov;             \
177                         i->iov = iov;                           \
178                 }                                               \
179                 i->count -= n;                                  \
180                 i->iov_offset = skip;                           \
181         }                                                       \
182 }
183
184 static int copyout(void __user *to, const void *from, size_t n)
185 {
186         if (should_fail_usercopy())
187                 return n;
188         if (access_ok(to, n)) {
189                 instrument_copy_to_user(to, from, n);
190                 n = raw_copy_to_user(to, from, n);
191         }
192         return n;
193 }
194
195 static int copyin(void *to, const void __user *from, size_t n)
196 {
197         if (should_fail_usercopy())
198                 return n;
199         if (access_ok(from, n)) {
200                 instrument_copy_from_user(to, from, n);
201                 n = raw_copy_from_user(to, from, n);
202         }
203         return n;
204 }
205
206 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
207                          struct iov_iter *i)
208 {
209         size_t skip, copy, left, wanted;
210         const struct iovec *iov;
211         char __user *buf;
212         void *kaddr, *from;
213
214         if (unlikely(bytes > i->count))
215                 bytes = i->count;
216
217         if (unlikely(!bytes))
218                 return 0;
219
220         might_fault();
221         wanted = bytes;
222         iov = i->iov;
223         skip = i->iov_offset;
224         buf = iov->iov_base + skip;
225         copy = min(bytes, iov->iov_len - skip);
226
227         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
228                 kaddr = kmap_atomic(page);
229                 from = kaddr + offset;
230
231                 /* first chunk, usually the only one */
232                 left = copyout(buf, from, copy);
233                 copy -= left;
234                 skip += copy;
235                 from += copy;
236                 bytes -= copy;
237
238                 while (unlikely(!left && bytes)) {
239                         iov++;
240                         buf = iov->iov_base;
241                         copy = min(bytes, iov->iov_len);
242                         left = copyout(buf, from, copy);
243                         copy -= left;
244                         skip = copy;
245                         from += copy;
246                         bytes -= copy;
247                 }
248                 if (likely(!bytes)) {
249                         kunmap_atomic(kaddr);
250                         goto done;
251                 }
252                 offset = from - kaddr;
253                 buf += copy;
254                 kunmap_atomic(kaddr);
255                 copy = min(bytes, iov->iov_len - skip);
256         }
257         /* Too bad - revert to non-atomic kmap */
258
259         kaddr = kmap(page);
260         from = kaddr + offset;
261         left = copyout(buf, from, copy);
262         copy -= left;
263         skip += copy;
264         from += copy;
265         bytes -= copy;
266         while (unlikely(!left && bytes)) {
267                 iov++;
268                 buf = iov->iov_base;
269                 copy = min(bytes, iov->iov_len);
270                 left = copyout(buf, from, copy);
271                 copy -= left;
272                 skip = copy;
273                 from += copy;
274                 bytes -= copy;
275         }
276         kunmap(page);
277
278 done:
279         if (skip == iov->iov_len) {
280                 iov++;
281                 skip = 0;
282         }
283         i->count -= wanted - bytes;
284         i->nr_segs -= iov - i->iov;
285         i->iov = iov;
286         i->iov_offset = skip;
287         return wanted - bytes;
288 }
289
290 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
291                          struct iov_iter *i)
292 {
293         size_t skip, copy, left, wanted;
294         const struct iovec *iov;
295         char __user *buf;
296         void *kaddr, *to;
297
298         if (unlikely(bytes > i->count))
299                 bytes = i->count;
300
301         if (unlikely(!bytes))
302                 return 0;
303
304         might_fault();
305         wanted = bytes;
306         iov = i->iov;
307         skip = i->iov_offset;
308         buf = iov->iov_base + skip;
309         copy = min(bytes, iov->iov_len - skip);
310
311         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
312                 kaddr = kmap_atomic(page);
313                 to = kaddr + offset;
314
315                 /* first chunk, usually the only one */
316                 left = copyin(to, buf, copy);
317                 copy -= left;
318                 skip += copy;
319                 to += copy;
320                 bytes -= copy;
321
322                 while (unlikely(!left && bytes)) {
323                         iov++;
324                         buf = iov->iov_base;
325                         copy = min(bytes, iov->iov_len);
326                         left = copyin(to, buf, copy);
327                         copy -= left;
328                         skip = copy;
329                         to += copy;
330                         bytes -= copy;
331                 }
332                 if (likely(!bytes)) {
333                         kunmap_atomic(kaddr);
334                         goto done;
335                 }
336                 offset = to - kaddr;
337                 buf += copy;
338                 kunmap_atomic(kaddr);
339                 copy = min(bytes, iov->iov_len - skip);
340         }
341         /* Too bad - revert to non-atomic kmap */
342
343         kaddr = kmap(page);
344         to = kaddr + offset;
345         left = copyin(to, buf, copy);
346         copy -= left;
347         skip += copy;
348         to += copy;
349         bytes -= copy;
350         while (unlikely(!left && bytes)) {
351                 iov++;
352                 buf = iov->iov_base;
353                 copy = min(bytes, iov->iov_len);
354                 left = copyin(to, buf, copy);
355                 copy -= left;
356                 skip = copy;
357                 to += copy;
358                 bytes -= copy;
359         }
360         kunmap(page);
361
362 done:
363         if (skip == iov->iov_len) {
364                 iov++;
365                 skip = 0;
366         }
367         i->count -= wanted - bytes;
368         i->nr_segs -= iov - i->iov;
369         i->iov = iov;
370         i->iov_offset = skip;
371         return wanted - bytes;
372 }
373
374 #ifdef PIPE_PARANOIA
375 static bool sanity(const struct iov_iter *i)
376 {
377         struct pipe_inode_info *pipe = i->pipe;
378         unsigned int p_head = pipe->head;
379         unsigned int p_tail = pipe->tail;
380         unsigned int p_mask = pipe->ring_size - 1;
381         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
382         unsigned int i_head = i->head;
383         unsigned int idx;
384
385         if (i->iov_offset) {
386                 struct pipe_buffer *p;
387                 if (unlikely(p_occupancy == 0))
388                         goto Bad;       // pipe must be non-empty
389                 if (unlikely(i_head != p_head - 1))
390                         goto Bad;       // must be at the last buffer...
391
392                 p = &pipe->bufs[i_head & p_mask];
393                 if (unlikely(p->offset + p->len != i->iov_offset))
394                         goto Bad;       // ... at the end of segment
395         } else {
396                 if (i_head != p_head)
397                         goto Bad;       // must be right after the last buffer
398         }
399         return true;
400 Bad:
401         printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
402         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
403                         p_head, p_tail, pipe->ring_size);
404         for (idx = 0; idx < pipe->ring_size; idx++)
405                 printk(KERN_ERR "[%p %p %d %d]\n",
406                         pipe->bufs[idx].ops,
407                         pipe->bufs[idx].page,
408                         pipe->bufs[idx].offset,
409                         pipe->bufs[idx].len);
410         WARN_ON(1);
411         return false;
412 }
413 #else
414 #define sanity(i) true
415 #endif
416
417 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
418                          struct iov_iter *i)
419 {
420         struct pipe_inode_info *pipe = i->pipe;
421         struct pipe_buffer *buf;
422         unsigned int p_tail = pipe->tail;
423         unsigned int p_mask = pipe->ring_size - 1;
424         unsigned int i_head = i->head;
425         size_t off;
426
427         if (unlikely(bytes > i->count))
428                 bytes = i->count;
429
430         if (unlikely(!bytes))
431                 return 0;
432
433         if (!sanity(i))
434                 return 0;
435
436         off = i->iov_offset;
437         buf = &pipe->bufs[i_head & p_mask];
438         if (off) {
439                 if (offset == off && buf->page == page) {
440                         /* merge with the last one */
441                         buf->len += bytes;
442                         i->iov_offset += bytes;
443                         goto out;
444                 }
445                 i_head++;
446                 buf = &pipe->bufs[i_head & p_mask];
447         }
448         if (pipe_full(i_head, p_tail, pipe->max_usage))
449                 return 0;
450
451         buf->ops = &page_cache_pipe_buf_ops;
452         get_page(page);
453         buf->page = page;
454         buf->offset = offset;
455         buf->len = bytes;
456
457         pipe->head = i_head + 1;
458         i->iov_offset = offset + bytes;
459         i->head = i_head;
460 out:
461         i->count -= bytes;
462         return bytes;
463 }
464
465 /*
466  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
467  * bytes.  For each iovec, fault in each page that constitutes the iovec.
468  *
469  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
470  * because it is an invalid address).
471  */
472 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
473 {
474         size_t skip = i->iov_offset;
475         const struct iovec *iov;
476         int err;
477         struct iovec v;
478
479         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
480                 iterate_iovec(i, bytes, v, iov, skip, ({
481                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
482                         if (unlikely(err))
483                         return err;
484                 0;}))
485         }
486         return 0;
487 }
488 EXPORT_SYMBOL(iov_iter_fault_in_readable);
489
490 void iov_iter_init(struct iov_iter *i, unsigned int direction,
491                         const struct iovec *iov, unsigned long nr_segs,
492                         size_t count)
493 {
494         WARN_ON(direction & ~(READ | WRITE));
495         direction &= READ | WRITE;
496
497         /* It will get better.  Eventually... */
498         if (uaccess_kernel()) {
499                 i->type = ITER_KVEC | direction;
500                 i->kvec = (struct kvec *)iov;
501         } else {
502                 i->type = ITER_IOVEC | direction;
503                 i->iov = iov;
504         }
505         i->nr_segs = nr_segs;
506         i->iov_offset = 0;
507         i->count = count;
508 }
509 EXPORT_SYMBOL(iov_iter_init);
510
511 static inline bool allocated(struct pipe_buffer *buf)
512 {
513         return buf->ops == &default_pipe_buf_ops;
514 }
515
516 static inline void data_start(const struct iov_iter *i,
517                               unsigned int *iter_headp, size_t *offp)
518 {
519         unsigned int p_mask = i->pipe->ring_size - 1;
520         unsigned int iter_head = i->head;
521         size_t off = i->iov_offset;
522
523         if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
524                     off == PAGE_SIZE)) {
525                 iter_head++;
526                 off = 0;
527         }
528         *iter_headp = iter_head;
529         *offp = off;
530 }
531
532 static size_t push_pipe(struct iov_iter *i, size_t size,
533                         int *iter_headp, size_t *offp)
534 {
535         struct pipe_inode_info *pipe = i->pipe;
536         unsigned int p_tail = pipe->tail;
537         unsigned int p_mask = pipe->ring_size - 1;
538         unsigned int iter_head;
539         size_t off;
540         ssize_t left;
541
542         if (unlikely(size > i->count))
543                 size = i->count;
544         if (unlikely(!size))
545                 return 0;
546
547         left = size;
548         data_start(i, &iter_head, &off);
549         *iter_headp = iter_head;
550         *offp = off;
551         if (off) {
552                 left -= PAGE_SIZE - off;
553                 if (left <= 0) {
554                         pipe->bufs[iter_head & p_mask].len += size;
555                         return size;
556                 }
557                 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
558                 iter_head++;
559         }
560         while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
561                 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
562                 struct page *page = alloc_page(GFP_USER);
563                 if (!page)
564                         break;
565
566                 buf->ops = &default_pipe_buf_ops;
567                 buf->page = page;
568                 buf->offset = 0;
569                 buf->len = min_t(ssize_t, left, PAGE_SIZE);
570                 left -= buf->len;
571                 iter_head++;
572                 pipe->head = iter_head;
573
574                 if (left == 0)
575                         return size;
576         }
577         return size - left;
578 }
579
580 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
581                                 struct iov_iter *i)
582 {
583         struct pipe_inode_info *pipe = i->pipe;
584         unsigned int p_mask = pipe->ring_size - 1;
585         unsigned int i_head;
586         size_t n, off;
587
588         if (!sanity(i))
589                 return 0;
590
591         bytes = n = push_pipe(i, bytes, &i_head, &off);
592         if (unlikely(!n))
593                 return 0;
594         do {
595                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
596                 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
597                 i->head = i_head;
598                 i->iov_offset = off + chunk;
599                 n -= chunk;
600                 addr += chunk;
601                 off = 0;
602                 i_head++;
603         } while (n);
604         i->count -= bytes;
605         return bytes;
606 }
607
608 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
609                               __wsum sum, size_t off)
610 {
611         __wsum next = csum_partial_copy_nocheck(from, to, len);
612         return csum_block_add(sum, next, off);
613 }
614
615 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
616                                          struct csum_state *csstate,
617                                          struct iov_iter *i)
618 {
619         struct pipe_inode_info *pipe = i->pipe;
620         unsigned int p_mask = pipe->ring_size - 1;
621         __wsum sum = csstate->csum;
622         size_t off = csstate->off;
623         unsigned int i_head;
624         size_t n, r;
625
626         if (!sanity(i))
627                 return 0;
628
629         bytes = n = push_pipe(i, bytes, &i_head, &r);
630         if (unlikely(!n))
631                 return 0;
632         do {
633                 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
634                 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
635                 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
636                 kunmap_atomic(p);
637                 i->head = i_head;
638                 i->iov_offset = r + chunk;
639                 n -= chunk;
640                 off += chunk;
641                 addr += chunk;
642                 r = 0;
643                 i_head++;
644         } while (n);
645         i->count -= bytes;
646         csstate->csum = sum;
647         csstate->off = off;
648         return bytes;
649 }
650
651 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
652 {
653         const char *from = addr;
654         if (unlikely(iov_iter_is_pipe(i)))
655                 return copy_pipe_to_iter(addr, bytes, i);
656         if (iter_is_iovec(i))
657                 might_fault();
658         iterate_and_advance(i, bytes, v,
659                 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
660                 memcpy_to_page(v.bv_page, v.bv_offset,
661                                (from += v.bv_len) - v.bv_len, v.bv_len),
662                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
663                 memcpy_to_page(v.bv_page, v.bv_offset,
664                                (from += v.bv_len) - v.bv_len, v.bv_len)
665         )
666
667         return bytes;
668 }
669 EXPORT_SYMBOL(_copy_to_iter);
670
671 #ifdef CONFIG_ARCH_HAS_COPY_MC
672 static int copyout_mc(void __user *to, const void *from, size_t n)
673 {
674         if (access_ok(to, n)) {
675                 instrument_copy_to_user(to, from, n);
676                 n = copy_mc_to_user((__force void *) to, from, n);
677         }
678         return n;
679 }
680
681 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
682                 const char *from, size_t len)
683 {
684         unsigned long ret;
685         char *to;
686
687         to = kmap_atomic(page);
688         ret = copy_mc_to_kernel(to + offset, from, len);
689         kunmap_atomic(to);
690
691         return ret;
692 }
693
694 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
695                                 struct iov_iter *i)
696 {
697         struct pipe_inode_info *pipe = i->pipe;
698         unsigned int p_mask = pipe->ring_size - 1;
699         unsigned int i_head;
700         size_t n, off, xfer = 0;
701
702         if (!sanity(i))
703                 return 0;
704
705         bytes = n = push_pipe(i, bytes, &i_head, &off);
706         if (unlikely(!n))
707                 return 0;
708         do {
709                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
710                 unsigned long rem;
711
712                 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
713                                             off, addr, chunk);
714                 i->head = i_head;
715                 i->iov_offset = off + chunk - rem;
716                 xfer += chunk - rem;
717                 if (rem)
718                         break;
719                 n -= chunk;
720                 addr += chunk;
721                 off = 0;
722                 i_head++;
723         } while (n);
724         i->count -= xfer;
725         return xfer;
726 }
727
728 /**
729  * _copy_mc_to_iter - copy to iter with source memory error exception handling
730  * @addr: source kernel address
731  * @bytes: total transfer length
732  * @iter: destination iterator
733  *
734  * The pmem driver deploys this for the dax operation
735  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
736  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
737  * successfully copied.
738  *
739  * The main differences between this and typical _copy_to_iter().
740  *
741  * * Typical tail/residue handling after a fault retries the copy
742  *   byte-by-byte until the fault happens again. Re-triggering machine
743  *   checks is potentially fatal so the implementation uses source
744  *   alignment and poison alignment assumptions to avoid re-triggering
745  *   hardware exceptions.
746  *
747  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
748  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
749  *   a short copy.
750  */
751 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
752 {
753         const char *from = addr;
754         unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
755
756         if (unlikely(iov_iter_is_pipe(i)))
757                 return copy_mc_pipe_to_iter(addr, bytes, i);
758         if (iter_is_iovec(i))
759                 might_fault();
760         iterate_and_advance(i, bytes, v,
761                 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
762                            v.iov_len),
763                 ({
764                 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
765                                       (from += v.bv_len) - v.bv_len, v.bv_len);
766                 if (rem) {
767                         curr_addr = (unsigned long) from;
768                         bytes = curr_addr - s_addr - rem;
769                         return bytes;
770                 }
771                 }),
772                 ({
773                 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
774                                         - v.iov_len, v.iov_len);
775                 if (rem) {
776                         curr_addr = (unsigned long) from;
777                         bytes = curr_addr - s_addr - rem;
778                         return bytes;
779                 }
780                 }),
781                 ({
782                 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
783                                       (from += v.bv_len) - v.bv_len, v.bv_len);
784                 if (rem) {
785                         curr_addr = (unsigned long) from;
786                         bytes = curr_addr - s_addr - rem;
787                         rcu_read_unlock();
788                         i->iov_offset += bytes;
789                         i->count -= bytes;
790                         return bytes;
791                 }
792                 })
793         )
794
795         return bytes;
796 }
797 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
798 #endif /* CONFIG_ARCH_HAS_COPY_MC */
799
800 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
801 {
802         char *to = addr;
803         if (unlikely(iov_iter_is_pipe(i))) {
804                 WARN_ON(1);
805                 return 0;
806         }
807         if (iter_is_iovec(i))
808                 might_fault();
809         iterate_and_advance(i, bytes, v,
810                 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
811                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
812                                  v.bv_offset, v.bv_len),
813                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
814                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
815                                  v.bv_offset, v.bv_len)
816         )
817
818         return bytes;
819 }
820 EXPORT_SYMBOL(_copy_from_iter);
821
822 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
823 {
824         char *to = addr;
825         if (unlikely(iov_iter_is_pipe(i))) {
826                 WARN_ON(1);
827                 return false;
828         }
829         if (unlikely(i->count < bytes))
830                 return false;
831
832         if (iter_is_iovec(i))
833                 might_fault();
834         iterate_all_kinds(i, bytes, v, ({
835                 if (copyin((to += v.iov_len) - v.iov_len,
836                                       v.iov_base, v.iov_len))
837                         return false;
838                 0;}),
839                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
840                                  v.bv_offset, v.bv_len),
841                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
842                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
843                                  v.bv_offset, v.bv_len)
844         )
845
846         iov_iter_advance(i, bytes);
847         return true;
848 }
849 EXPORT_SYMBOL(_copy_from_iter_full);
850
851 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
852 {
853         char *to = addr;
854         if (unlikely(iov_iter_is_pipe(i))) {
855                 WARN_ON(1);
856                 return 0;
857         }
858         iterate_and_advance(i, bytes, v,
859                 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
860                                          v.iov_base, v.iov_len),
861                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
862                                  v.bv_offset, v.bv_len),
863                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
864                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
865                                  v.bv_offset, v.bv_len)
866         )
867
868         return bytes;
869 }
870 EXPORT_SYMBOL(_copy_from_iter_nocache);
871
872 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
873 /**
874  * _copy_from_iter_flushcache - write destination through cpu cache
875  * @addr: destination kernel address
876  * @bytes: total transfer length
877  * @iter: source iterator
878  *
879  * The pmem driver arranges for filesystem-dax to use this facility via
880  * dax_copy_from_iter() for ensuring that writes to persistent memory
881  * are flushed through the CPU cache. It is differentiated from
882  * _copy_from_iter_nocache() in that guarantees all data is flushed for
883  * all iterator types. The _copy_from_iter_nocache() only attempts to
884  * bypass the cache for the ITER_IOVEC case, and on some archs may use
885  * instructions that strand dirty-data in the cache.
886  */
887 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
888 {
889         char *to = addr;
890         if (unlikely(iov_iter_is_pipe(i))) {
891                 WARN_ON(1);
892                 return 0;
893         }
894         iterate_and_advance(i, bytes, v,
895                 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
896                                          v.iov_base, v.iov_len),
897                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
898                                  v.bv_offset, v.bv_len),
899                 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
900                         v.iov_len),
901                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
902                                  v.bv_offset, v.bv_len)
903         )
904
905         return bytes;
906 }
907 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
908 #endif
909
910 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
911 {
912         char *to = addr;
913         if (unlikely(iov_iter_is_pipe(i))) {
914                 WARN_ON(1);
915                 return false;
916         }
917         if (unlikely(i->count < bytes))
918                 return false;
919         iterate_all_kinds(i, bytes, v, ({
920                 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
921                                              v.iov_base, v.iov_len))
922                         return false;
923                 0;}),
924                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
925                                  v.bv_offset, v.bv_len),
926                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
927                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
928                                  v.bv_offset, v.bv_len)
929         )
930
931         iov_iter_advance(i, bytes);
932         return true;
933 }
934 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
935
936 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
937 {
938         struct page *head;
939         size_t v = n + offset;
940
941         /*
942          * The general case needs to access the page order in order
943          * to compute the page size.
944          * However, we mostly deal with order-0 pages and thus can
945          * avoid a possible cache line miss for requests that fit all
946          * page orders.
947          */
948         if (n <= v && v <= PAGE_SIZE)
949                 return true;
950
951         head = compound_head(page);
952         v += (page - head) << PAGE_SHIFT;
953
954         if (likely(n <= v && v <= (page_size(head))))
955                 return true;
956         WARN_ON(1);
957         return false;
958 }
959
960 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
961                          struct iov_iter *i)
962 {
963         if (unlikely(!page_copy_sane(page, offset, bytes)))
964                 return 0;
965         if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
966                 void *kaddr = kmap_atomic(page);
967                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
968                 kunmap_atomic(kaddr);
969                 return wanted;
970         } else if (unlikely(iov_iter_is_discard(i)))
971                 return bytes;
972         else if (likely(!iov_iter_is_pipe(i)))
973                 return copy_page_to_iter_iovec(page, offset, bytes, i);
974         else
975                 return copy_page_to_iter_pipe(page, offset, bytes, i);
976 }
977 EXPORT_SYMBOL(copy_page_to_iter);
978
979 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
980                          struct iov_iter *i)
981 {
982         if (unlikely(!page_copy_sane(page, offset, bytes)))
983                 return 0;
984         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
985                 WARN_ON(1);
986                 return 0;
987         }
988         if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
989                 void *kaddr = kmap_atomic(page);
990                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
991                 kunmap_atomic(kaddr);
992                 return wanted;
993         } else
994                 return copy_page_from_iter_iovec(page, offset, bytes, i);
995 }
996 EXPORT_SYMBOL(copy_page_from_iter);
997
998 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
999 {
1000         struct pipe_inode_info *pipe = i->pipe;
1001         unsigned int p_mask = pipe->ring_size - 1;
1002         unsigned int i_head;
1003         size_t n, off;
1004
1005         if (!sanity(i))
1006                 return 0;
1007
1008         bytes = n = push_pipe(i, bytes, &i_head, &off);
1009         if (unlikely(!n))
1010                 return 0;
1011
1012         do {
1013                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
1014                 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
1015                 i->head = i_head;
1016                 i->iov_offset = off + chunk;
1017                 n -= chunk;
1018                 off = 0;
1019                 i_head++;
1020         } while (n);
1021         i->count -= bytes;
1022         return bytes;
1023 }
1024
1025 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
1026 {
1027         if (unlikely(iov_iter_is_pipe(i)))
1028                 return pipe_zero(bytes, i);
1029         iterate_and_advance(i, bytes, v,
1030                 clear_user(v.iov_base, v.iov_len),
1031                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
1032                 memset(v.iov_base, 0, v.iov_len),
1033                 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
1034         )
1035
1036         return bytes;
1037 }
1038 EXPORT_SYMBOL(iov_iter_zero);
1039
1040 size_t iov_iter_copy_from_user_atomic(struct page *page,
1041                 struct iov_iter *i, unsigned long offset, size_t bytes)
1042 {
1043         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
1044         if (unlikely(!page_copy_sane(page, offset, bytes))) {
1045                 kunmap_atomic(kaddr);
1046                 return 0;
1047         }
1048         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1049                 kunmap_atomic(kaddr);
1050                 WARN_ON(1);
1051                 return 0;
1052         }
1053         iterate_all_kinds(i, bytes, v,
1054                 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1055                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1056                                  v.bv_offset, v.bv_len),
1057                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1058                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1059                                  v.bv_offset, v.bv_len)
1060         )
1061         kunmap_atomic(kaddr);
1062         return bytes;
1063 }
1064 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1065
1066 static inline void pipe_truncate(struct iov_iter *i)
1067 {
1068         struct pipe_inode_info *pipe = i->pipe;
1069         unsigned int p_tail = pipe->tail;
1070         unsigned int p_head = pipe->head;
1071         unsigned int p_mask = pipe->ring_size - 1;
1072
1073         if (!pipe_empty(p_head, p_tail)) {
1074                 struct pipe_buffer *buf;
1075                 unsigned int i_head = i->head;
1076                 size_t off = i->iov_offset;
1077
1078                 if (off) {
1079                         buf = &pipe->bufs[i_head & p_mask];
1080                         buf->len = off - buf->offset;
1081                         i_head++;
1082                 }
1083                 while (p_head != i_head) {
1084                         p_head--;
1085                         pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1086                 }
1087
1088                 pipe->head = p_head;
1089         }
1090 }
1091
1092 static void pipe_advance(struct iov_iter *i, size_t size)
1093 {
1094         struct pipe_inode_info *pipe = i->pipe;
1095         if (unlikely(i->count < size))
1096                 size = i->count;
1097         if (size) {
1098                 struct pipe_buffer *buf;
1099                 unsigned int p_mask = pipe->ring_size - 1;
1100                 unsigned int i_head = i->head;
1101                 size_t off = i->iov_offset, left = size;
1102
1103                 if (off) /* make it relative to the beginning of buffer */
1104                         left += off - pipe->bufs[i_head & p_mask].offset;
1105                 while (1) {
1106                         buf = &pipe->bufs[i_head & p_mask];
1107                         if (left <= buf->len)
1108                                 break;
1109                         left -= buf->len;
1110                         i_head++;
1111                 }
1112                 i->head = i_head;
1113                 i->iov_offset = buf->offset + left;
1114         }
1115         i->count -= size;
1116         /* ... and discard everything past that point */
1117         pipe_truncate(i);
1118 }
1119
1120 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1121 {
1122         struct bvec_iter bi;
1123
1124         bi.bi_size = i->count;
1125         bi.bi_bvec_done = i->iov_offset;
1126         bi.bi_idx = 0;
1127         bvec_iter_advance(i->bvec, &bi, size);
1128
1129         i->bvec += bi.bi_idx;
1130         i->nr_segs -= bi.bi_idx;
1131         i->count = bi.bi_size;
1132         i->iov_offset = bi.bi_bvec_done;
1133 }
1134
1135 void iov_iter_advance(struct iov_iter *i, size_t size)
1136 {
1137         if (unlikely(iov_iter_is_pipe(i))) {
1138                 pipe_advance(i, size);
1139                 return;
1140         }
1141         if (unlikely(iov_iter_is_discard(i))) {
1142                 i->count -= size;
1143                 return;
1144         }
1145         if (unlikely(iov_iter_is_xarray(i))) {
1146                 size = min(size, i->count);
1147                 i->iov_offset += size;
1148                 i->count -= size;
1149                 return;
1150         }
1151         if (iov_iter_is_bvec(i)) {
1152                 iov_iter_bvec_advance(i, size);
1153                 return;
1154         }
1155         iterate_and_advance(i, size, v, 0, 0, 0, 0)
1156 }
1157 EXPORT_SYMBOL(iov_iter_advance);
1158
1159 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1160 {
1161         if (!unroll)
1162                 return;
1163         if (WARN_ON(unroll > MAX_RW_COUNT))
1164                 return;
1165         i->count += unroll;
1166         if (unlikely(iov_iter_is_pipe(i))) {
1167                 struct pipe_inode_info *pipe = i->pipe;
1168                 unsigned int p_mask = pipe->ring_size - 1;
1169                 unsigned int i_head = i->head;
1170                 size_t off = i->iov_offset;
1171                 while (1) {
1172                         struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1173                         size_t n = off - b->offset;
1174                         if (unroll < n) {
1175                                 off -= unroll;
1176                                 break;
1177                         }
1178                         unroll -= n;
1179                         if (!unroll && i_head == i->start_head) {
1180                                 off = 0;
1181                                 break;
1182                         }
1183                         i_head--;
1184                         b = &pipe->bufs[i_head & p_mask];
1185                         off = b->offset + b->len;
1186                 }
1187                 i->iov_offset = off;
1188                 i->head = i_head;
1189                 pipe_truncate(i);
1190                 return;
1191         }
1192         if (unlikely(iov_iter_is_discard(i)))
1193                 return;
1194         if (unroll <= i->iov_offset) {
1195                 i->iov_offset -= unroll;
1196                 return;
1197         }
1198         unroll -= i->iov_offset;
1199         if (iov_iter_is_xarray(i)) {
1200                 BUG(); /* We should never go beyond the start of the specified
1201                         * range since we might then be straying into pages that
1202                         * aren't pinned.
1203                         */
1204         } else if (iov_iter_is_bvec(i)) {
1205                 const struct bio_vec *bvec = i->bvec;
1206                 while (1) {
1207                         size_t n = (--bvec)->bv_len;
1208                         i->nr_segs++;
1209                         if (unroll <= n) {
1210                                 i->bvec = bvec;
1211                                 i->iov_offset = n - unroll;
1212                                 return;
1213                         }
1214                         unroll -= n;
1215                 }
1216         } else { /* same logics for iovec and kvec */
1217                 const struct iovec *iov = i->iov;
1218                 while (1) {
1219                         size_t n = (--iov)->iov_len;
1220                         i->nr_segs++;
1221                         if (unroll <= n) {
1222                                 i->iov = iov;
1223                                 i->iov_offset = n - unroll;
1224                                 return;
1225                         }
1226                         unroll -= n;
1227                 }
1228         }
1229 }
1230 EXPORT_SYMBOL(iov_iter_revert);
1231
1232 /*
1233  * Return the count of just the current iov_iter segment.
1234  */
1235 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1236 {
1237         if (unlikely(iov_iter_is_pipe(i)))
1238                 return i->count;        // it is a silly place, anyway
1239         if (i->nr_segs == 1)
1240                 return i->count;
1241         if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
1242                 return i->count;
1243         if (iov_iter_is_bvec(i))
1244                 return min(i->count, i->bvec->bv_len - i->iov_offset);
1245         else
1246                 return min(i->count, i->iov->iov_len - i->iov_offset);
1247 }
1248 EXPORT_SYMBOL(iov_iter_single_seg_count);
1249
1250 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1251                         const struct kvec *kvec, unsigned long nr_segs,
1252                         size_t count)
1253 {
1254         WARN_ON(direction & ~(READ | WRITE));
1255         i->type = ITER_KVEC | (direction & (READ | WRITE));
1256         i->kvec = kvec;
1257         i->nr_segs = nr_segs;
1258         i->iov_offset = 0;
1259         i->count = count;
1260 }
1261 EXPORT_SYMBOL(iov_iter_kvec);
1262
1263 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1264                         const struct bio_vec *bvec, unsigned long nr_segs,
1265                         size_t count)
1266 {
1267         WARN_ON(direction & ~(READ | WRITE));
1268         i->type = ITER_BVEC | (direction & (READ | WRITE));
1269         i->bvec = bvec;
1270         i->nr_segs = nr_segs;
1271         i->iov_offset = 0;
1272         i->count = count;
1273 }
1274 EXPORT_SYMBOL(iov_iter_bvec);
1275
1276 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1277                         struct pipe_inode_info *pipe,
1278                         size_t count)
1279 {
1280         BUG_ON(direction != READ);
1281         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1282         i->type = ITER_PIPE | READ;
1283         i->pipe = pipe;
1284         i->head = pipe->head;
1285         i->iov_offset = 0;
1286         i->count = count;
1287         i->start_head = i->head;
1288 }
1289 EXPORT_SYMBOL(iov_iter_pipe);
1290
1291 /**
1292  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1293  * @i: The iterator to initialise.
1294  * @direction: The direction of the transfer.
1295  * @xarray: The xarray to access.
1296  * @start: The start file position.
1297  * @count: The size of the I/O buffer in bytes.
1298  *
1299  * Set up an I/O iterator to either draw data out of the pages attached to an
1300  * inode or to inject data into those pages.  The pages *must* be prevented
1301  * from evaporation, either by taking a ref on them or locking them by the
1302  * caller.
1303  */
1304 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1305                      struct xarray *xarray, loff_t start, size_t count)
1306 {
1307         BUG_ON(direction & ~1);
1308         i->type = ITER_XARRAY | (direction & (READ | WRITE));
1309         i->xarray = xarray;
1310         i->xarray_start = start;
1311         i->count = count;
1312         i->iov_offset = 0;
1313 }
1314 EXPORT_SYMBOL(iov_iter_xarray);
1315
1316 /**
1317  * iov_iter_discard - Initialise an I/O iterator that discards data
1318  * @i: The iterator to initialise.
1319  * @direction: The direction of the transfer.
1320  * @count: The size of the I/O buffer in bytes.
1321  *
1322  * Set up an I/O iterator that just discards everything that's written to it.
1323  * It's only available as a READ iterator.
1324  */
1325 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1326 {
1327         BUG_ON(direction != READ);
1328         i->type = ITER_DISCARD | READ;
1329         i->count = count;
1330         i->iov_offset = 0;
1331 }
1332 EXPORT_SYMBOL(iov_iter_discard);
1333
1334 unsigned long iov_iter_alignment(const struct iov_iter *i)
1335 {
1336         unsigned long res = 0;
1337         size_t size = i->count;
1338
1339         if (unlikely(iov_iter_is_pipe(i))) {
1340                 unsigned int p_mask = i->pipe->ring_size - 1;
1341
1342                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1343                         return size | i->iov_offset;
1344                 return size;
1345         }
1346         if (unlikely(iov_iter_is_xarray(i)))
1347                 return (i->xarray_start + i->iov_offset) | i->count;
1348         iterate_all_kinds(i, size, v,
1349                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1350                 res |= v.bv_offset | v.bv_len,
1351                 res |= (unsigned long)v.iov_base | v.iov_len,
1352                 res |= v.bv_offset | v.bv_len
1353         )
1354         return res;
1355 }
1356 EXPORT_SYMBOL(iov_iter_alignment);
1357
1358 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1359 {
1360         unsigned long res = 0;
1361         size_t size = i->count;
1362
1363         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1364                 WARN_ON(1);
1365                 return ~0U;
1366         }
1367
1368         iterate_all_kinds(i, size, v,
1369                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1370                         (size != v.iov_len ? size : 0), 0),
1371                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1372                         (size != v.bv_len ? size : 0)),
1373                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1374                         (size != v.iov_len ? size : 0)),
1375                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1376                         (size != v.bv_len ? size : 0))
1377                 );
1378         return res;
1379 }
1380 EXPORT_SYMBOL(iov_iter_gap_alignment);
1381
1382 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1383                                 size_t maxsize,
1384                                 struct page **pages,
1385                                 int iter_head,
1386                                 size_t *start)
1387 {
1388         struct pipe_inode_info *pipe = i->pipe;
1389         unsigned int p_mask = pipe->ring_size - 1;
1390         ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1391         if (!n)
1392                 return -EFAULT;
1393
1394         maxsize = n;
1395         n += *start;
1396         while (n > 0) {
1397                 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1398                 iter_head++;
1399                 n -= PAGE_SIZE;
1400         }
1401
1402         return maxsize;
1403 }
1404
1405 static ssize_t pipe_get_pages(struct iov_iter *i,
1406                    struct page **pages, size_t maxsize, unsigned maxpages,
1407                    size_t *start)
1408 {
1409         unsigned int iter_head, npages;
1410         size_t capacity;
1411
1412         if (!maxsize)
1413                 return 0;
1414
1415         if (!sanity(i))
1416                 return -EFAULT;
1417
1418         data_start(i, &iter_head, start);
1419         /* Amount of free space: some of this one + all after this one */
1420         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1421         capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1422
1423         return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1424 }
1425
1426 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1427                                           pgoff_t index, unsigned int nr_pages)
1428 {
1429         XA_STATE(xas, xa, index);
1430         struct page *page;
1431         unsigned int ret = 0;
1432
1433         rcu_read_lock();
1434         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1435                 if (xas_retry(&xas, page))
1436                         continue;
1437
1438                 /* Has the page moved or been split? */
1439                 if (unlikely(page != xas_reload(&xas))) {
1440                         xas_reset(&xas);
1441                         continue;
1442                 }
1443
1444                 pages[ret] = find_subpage(page, xas.xa_index);
1445                 get_page(pages[ret]);
1446                 if (++ret == nr_pages)
1447                         break;
1448         }
1449         rcu_read_unlock();
1450         return ret;
1451 }
1452
1453 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1454                                      struct page **pages, size_t maxsize,
1455                                      unsigned maxpages, size_t *_start_offset)
1456 {
1457         unsigned nr, offset;
1458         pgoff_t index, count;
1459         size_t size = maxsize, actual;
1460         loff_t pos;
1461
1462         if (!size || !maxpages)
1463                 return 0;
1464
1465         pos = i->xarray_start + i->iov_offset;
1466         index = pos >> PAGE_SHIFT;
1467         offset = pos & ~PAGE_MASK;
1468         *_start_offset = offset;
1469
1470         count = 1;
1471         if (size > PAGE_SIZE - offset) {
1472                 size -= PAGE_SIZE - offset;
1473                 count += size >> PAGE_SHIFT;
1474                 size &= ~PAGE_MASK;
1475                 if (size)
1476                         count++;
1477         }
1478
1479         if (count > maxpages)
1480                 count = maxpages;
1481
1482         nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1483         if (nr == 0)
1484                 return 0;
1485
1486         actual = PAGE_SIZE * nr;
1487         actual -= offset;
1488         if (nr == count && size > 0) {
1489                 unsigned last_offset = (nr > 1) ? 0 : offset;
1490                 actual -= PAGE_SIZE - (last_offset + size);
1491         }
1492         return actual;
1493 }
1494
1495 ssize_t iov_iter_get_pages(struct iov_iter *i,
1496                    struct page **pages, size_t maxsize, unsigned maxpages,
1497                    size_t *start)
1498 {
1499         if (maxsize > i->count)
1500                 maxsize = i->count;
1501
1502         if (unlikely(iov_iter_is_pipe(i)))
1503                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1504         if (unlikely(iov_iter_is_xarray(i)))
1505                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1506         if (unlikely(iov_iter_is_discard(i)))
1507                 return -EFAULT;
1508
1509         iterate_all_kinds(i, maxsize, v, ({
1510                 unsigned long addr = (unsigned long)v.iov_base;
1511                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1512                 int n;
1513                 int res;
1514
1515                 if (len > maxpages * PAGE_SIZE)
1516                         len = maxpages * PAGE_SIZE;
1517                 addr &= ~(PAGE_SIZE - 1);
1518                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1519                 res = get_user_pages_fast(addr, n,
1520                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
1521                                 pages);
1522                 if (unlikely(res < 0))
1523                         return res;
1524                 return (res == n ? len : res * PAGE_SIZE) - *start;
1525         0;}),({
1526                 /* can't be more than PAGE_SIZE */
1527                 *start = v.bv_offset;
1528                 get_page(*pages = v.bv_page);
1529                 return v.bv_len;
1530         }),({
1531                 return -EFAULT;
1532         }),
1533         0
1534         )
1535         return 0;
1536 }
1537 EXPORT_SYMBOL(iov_iter_get_pages);
1538
1539 static struct page **get_pages_array(size_t n)
1540 {
1541         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1542 }
1543
1544 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1545                    struct page ***pages, size_t maxsize,
1546                    size_t *start)
1547 {
1548         struct page **p;
1549         unsigned int iter_head, npages;
1550         ssize_t n;
1551
1552         if (!maxsize)
1553                 return 0;
1554
1555         if (!sanity(i))
1556                 return -EFAULT;
1557
1558         data_start(i, &iter_head, start);
1559         /* Amount of free space: some of this one + all after this one */
1560         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1561         n = npages * PAGE_SIZE - *start;
1562         if (maxsize > n)
1563                 maxsize = n;
1564         else
1565                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1566         p = get_pages_array(npages);
1567         if (!p)
1568                 return -ENOMEM;
1569         n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1570         if (n > 0)
1571                 *pages = p;
1572         else
1573                 kvfree(p);
1574         return n;
1575 }
1576
1577 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1578                                            struct page ***pages, size_t maxsize,
1579                                            size_t *_start_offset)
1580 {
1581         struct page **p;
1582         unsigned nr, offset;
1583         pgoff_t index, count;
1584         size_t size = maxsize, actual;
1585         loff_t pos;
1586
1587         if (!size)
1588                 return 0;
1589
1590         pos = i->xarray_start + i->iov_offset;
1591         index = pos >> PAGE_SHIFT;
1592         offset = pos & ~PAGE_MASK;
1593         *_start_offset = offset;
1594
1595         count = 1;
1596         if (size > PAGE_SIZE - offset) {
1597                 size -= PAGE_SIZE - offset;
1598                 count += size >> PAGE_SHIFT;
1599                 size &= ~PAGE_MASK;
1600                 if (size)
1601                         count++;
1602         }
1603
1604         p = get_pages_array(count);
1605         if (!p)
1606                 return -ENOMEM;
1607         *pages = p;
1608
1609         nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1610         if (nr == 0)
1611                 return 0;
1612
1613         actual = PAGE_SIZE * nr;
1614         actual -= offset;
1615         if (nr == count && size > 0) {
1616                 unsigned last_offset = (nr > 1) ? 0 : offset;
1617                 actual -= PAGE_SIZE - (last_offset + size);
1618         }
1619         return actual;
1620 }
1621
1622 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1623                    struct page ***pages, size_t maxsize,
1624                    size_t *start)
1625 {
1626         struct page **p;
1627
1628         if (maxsize > i->count)
1629                 maxsize = i->count;
1630
1631         if (unlikely(iov_iter_is_pipe(i)))
1632                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1633         if (unlikely(iov_iter_is_xarray(i)))
1634                 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1635         if (unlikely(iov_iter_is_discard(i)))
1636                 return -EFAULT;
1637
1638         iterate_all_kinds(i, maxsize, v, ({
1639                 unsigned long addr = (unsigned long)v.iov_base;
1640                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1641                 int n;
1642                 int res;
1643
1644                 addr &= ~(PAGE_SIZE - 1);
1645                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1646                 p = get_pages_array(n);
1647                 if (!p)
1648                         return -ENOMEM;
1649                 res = get_user_pages_fast(addr, n,
1650                                 iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1651                 if (unlikely(res < 0)) {
1652                         kvfree(p);
1653                         return res;
1654                 }
1655                 *pages = p;
1656                 return (res == n ? len : res * PAGE_SIZE) - *start;
1657         0;}),({
1658                 /* can't be more than PAGE_SIZE */
1659                 *start = v.bv_offset;
1660                 *pages = p = get_pages_array(1);
1661                 if (!p)
1662                         return -ENOMEM;
1663                 get_page(*p = v.bv_page);
1664                 return v.bv_len;
1665         }),({
1666                 return -EFAULT;
1667         }), 0
1668         )
1669         return 0;
1670 }
1671 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1672
1673 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1674                                struct iov_iter *i)
1675 {
1676         char *to = addr;
1677         __wsum sum, next;
1678         size_t off = 0;
1679         sum = *csum;
1680         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1681                 WARN_ON(1);
1682                 return 0;
1683         }
1684         iterate_and_advance(i, bytes, v, ({
1685                 next = csum_and_copy_from_user(v.iov_base,
1686                                                (to += v.iov_len) - v.iov_len,
1687                                                v.iov_len);
1688                 if (next) {
1689                         sum = csum_block_add(sum, next, off);
1690                         off += v.iov_len;
1691                 }
1692                 next ? 0 : v.iov_len;
1693         }), ({
1694                 char *p = kmap_atomic(v.bv_page);
1695                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1696                                       p + v.bv_offset, v.bv_len,
1697                                       sum, off);
1698                 kunmap_atomic(p);
1699                 off += v.bv_len;
1700         }),({
1701                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1702                                       v.iov_base, v.iov_len,
1703                                       sum, off);
1704                 off += v.iov_len;
1705         }), ({
1706                 char *p = kmap_atomic(v.bv_page);
1707                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1708                                       p + v.bv_offset, v.bv_len,
1709                                       sum, off);
1710                 kunmap_atomic(p);
1711                 off += v.bv_len;
1712         })
1713         )
1714         *csum = sum;
1715         return bytes;
1716 }
1717 EXPORT_SYMBOL(csum_and_copy_from_iter);
1718
1719 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1720                                struct iov_iter *i)
1721 {
1722         char *to = addr;
1723         __wsum sum, next;
1724         size_t off = 0;
1725         sum = *csum;
1726         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1727                 WARN_ON(1);
1728                 return false;
1729         }
1730         if (unlikely(i->count < bytes))
1731                 return false;
1732         iterate_all_kinds(i, bytes, v, ({
1733                 next = csum_and_copy_from_user(v.iov_base,
1734                                                (to += v.iov_len) - v.iov_len,
1735                                                v.iov_len);
1736                 if (!next)
1737                         return false;
1738                 sum = csum_block_add(sum, next, off);
1739                 off += v.iov_len;
1740                 0;
1741         }), ({
1742                 char *p = kmap_atomic(v.bv_page);
1743                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1744                                       p + v.bv_offset, v.bv_len,
1745                                       sum, off);
1746                 kunmap_atomic(p);
1747                 off += v.bv_len;
1748         }),({
1749                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1750                                       v.iov_base, v.iov_len,
1751                                       sum, off);
1752                 off += v.iov_len;
1753         }), ({
1754                 char *p = kmap_atomic(v.bv_page);
1755                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1756                                       p + v.bv_offset, v.bv_len,
1757                                       sum, off);
1758                 kunmap_atomic(p);
1759                 off += v.bv_len;
1760         })
1761         )
1762         *csum = sum;
1763         iov_iter_advance(i, bytes);
1764         return true;
1765 }
1766 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1767
1768 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1769                              struct iov_iter *i)
1770 {
1771         struct csum_state *csstate = _csstate;
1772         const char *from = addr;
1773         __wsum sum, next;
1774         size_t off;
1775
1776         if (unlikely(iov_iter_is_pipe(i)))
1777                 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1778
1779         sum = csstate->csum;
1780         off = csstate->off;
1781         if (unlikely(iov_iter_is_discard(i))) {
1782                 WARN_ON(1);     /* for now */
1783                 return 0;
1784         }
1785         iterate_and_advance(i, bytes, v, ({
1786                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1787                                              v.iov_base,
1788                                              v.iov_len);
1789                 if (next) {
1790                         sum = csum_block_add(sum, next, off);
1791                         off += v.iov_len;
1792                 }
1793                 next ? 0 : v.iov_len;
1794         }), ({
1795                 char *p = kmap_atomic(v.bv_page);
1796                 sum = csum_and_memcpy(p + v.bv_offset,
1797                                       (from += v.bv_len) - v.bv_len,
1798                                       v.bv_len, sum, off);
1799                 kunmap_atomic(p);
1800                 off += v.bv_len;
1801         }),({
1802                 sum = csum_and_memcpy(v.iov_base,
1803                                      (from += v.iov_len) - v.iov_len,
1804                                      v.iov_len, sum, off);
1805                 off += v.iov_len;
1806         }), ({
1807                 char *p = kmap_atomic(v.bv_page);
1808                 sum = csum_and_memcpy(p + v.bv_offset,
1809                                       (from += v.bv_len) - v.bv_len,
1810                                       v.bv_len, sum, off);
1811                 kunmap_atomic(p);
1812                 off += v.bv_len;
1813         })
1814         )
1815         csstate->csum = sum;
1816         csstate->off = off;
1817         return bytes;
1818 }
1819 EXPORT_SYMBOL(csum_and_copy_to_iter);
1820
1821 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1822                 struct iov_iter *i)
1823 {
1824 #ifdef CONFIG_CRYPTO_HASH
1825         struct ahash_request *hash = hashp;
1826         struct scatterlist sg;
1827         size_t copied;
1828
1829         copied = copy_to_iter(addr, bytes, i);
1830         sg_init_one(&sg, addr, copied);
1831         ahash_request_set_crypt(hash, &sg, NULL, copied);
1832         crypto_ahash_update(hash);
1833         return copied;
1834 #else
1835         return 0;
1836 #endif
1837 }
1838 EXPORT_SYMBOL(hash_and_copy_to_iter);
1839
1840 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1841 {
1842         size_t size = i->count;
1843         int npages = 0;
1844
1845         if (!size)
1846                 return 0;
1847         if (unlikely(iov_iter_is_discard(i)))
1848                 return 0;
1849
1850         if (unlikely(iov_iter_is_pipe(i))) {
1851                 struct pipe_inode_info *pipe = i->pipe;
1852                 unsigned int iter_head;
1853                 size_t off;
1854
1855                 if (!sanity(i))
1856                         return 0;
1857
1858                 data_start(i, &iter_head, &off);
1859                 /* some of this one + all after this one */
1860                 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1861                 if (npages >= maxpages)
1862                         return maxpages;
1863         } else if (unlikely(iov_iter_is_xarray(i))) {
1864                 unsigned offset;
1865
1866                 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
1867
1868                 npages = 1;
1869                 if (size > PAGE_SIZE - offset) {
1870                         size -= PAGE_SIZE - offset;
1871                         npages += size >> PAGE_SHIFT;
1872                         size &= ~PAGE_MASK;
1873                         if (size)
1874                                 npages++;
1875                 }
1876                 if (npages >= maxpages)
1877                         return maxpages;
1878         } else iterate_all_kinds(i, size, v, ({
1879                 unsigned long p = (unsigned long)v.iov_base;
1880                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1881                         - p / PAGE_SIZE;
1882                 if (npages >= maxpages)
1883                         return maxpages;
1884         0;}),({
1885                 npages++;
1886                 if (npages >= maxpages)
1887                         return maxpages;
1888         }),({
1889                 unsigned long p = (unsigned long)v.iov_base;
1890                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1891                         - p / PAGE_SIZE;
1892                 if (npages >= maxpages)
1893                         return maxpages;
1894         }),
1895         0
1896         )
1897         return npages;
1898 }
1899 EXPORT_SYMBOL(iov_iter_npages);
1900
1901 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1902 {
1903         *new = *old;
1904         if (unlikely(iov_iter_is_pipe(new))) {
1905                 WARN_ON(1);
1906                 return NULL;
1907         }
1908         if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1909                 return NULL;
1910         if (iov_iter_is_bvec(new))
1911                 return new->bvec = kmemdup(new->bvec,
1912                                     new->nr_segs * sizeof(struct bio_vec),
1913                                     flags);
1914         else
1915                 /* iovec and kvec have identical layout */
1916                 return new->iov = kmemdup(new->iov,
1917                                    new->nr_segs * sizeof(struct iovec),
1918                                    flags);
1919 }
1920 EXPORT_SYMBOL(dup_iter);
1921
1922 static int copy_compat_iovec_from_user(struct iovec *iov,
1923                 const struct iovec __user *uvec, unsigned long nr_segs)
1924 {
1925         const struct compat_iovec __user *uiov =
1926                 (const struct compat_iovec __user *)uvec;
1927         int ret = -EFAULT, i;
1928
1929         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1930                 return -EFAULT;
1931
1932         for (i = 0; i < nr_segs; i++) {
1933                 compat_uptr_t buf;
1934                 compat_ssize_t len;
1935
1936                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1937                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1938
1939                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1940                 if (len < 0) {
1941                         ret = -EINVAL;
1942                         goto uaccess_end;
1943                 }
1944                 iov[i].iov_base = compat_ptr(buf);
1945                 iov[i].iov_len = len;
1946         }
1947
1948         ret = 0;
1949 uaccess_end:
1950         user_access_end();
1951         return ret;
1952 }
1953
1954 static int copy_iovec_from_user(struct iovec *iov,
1955                 const struct iovec __user *uvec, unsigned long nr_segs)
1956 {
1957         unsigned long seg;
1958
1959         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1960                 return -EFAULT;
1961         for (seg = 0; seg < nr_segs; seg++) {
1962                 if ((ssize_t)iov[seg].iov_len < 0)
1963                         return -EINVAL;
1964         }
1965
1966         return 0;
1967 }
1968
1969 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1970                 unsigned long nr_segs, unsigned long fast_segs,
1971                 struct iovec *fast_iov, bool compat)
1972 {
1973         struct iovec *iov = fast_iov;
1974         int ret;
1975
1976         /*
1977          * SuS says "The readv() function *may* fail if the iovcnt argument was
1978          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1979          * traditionally returned zero for zero segments, so...
1980          */
1981         if (nr_segs == 0)
1982                 return iov;
1983         if (nr_segs > UIO_MAXIOV)
1984                 return ERR_PTR(-EINVAL);
1985         if (nr_segs > fast_segs) {
1986                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1987                 if (!iov)
1988                         return ERR_PTR(-ENOMEM);
1989         }
1990
1991         if (compat)
1992                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1993         else
1994                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1995         if (ret) {
1996                 if (iov != fast_iov)
1997                         kfree(iov);
1998                 return ERR_PTR(ret);
1999         }
2000
2001         return iov;
2002 }
2003
2004 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
2005                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
2006                  struct iov_iter *i, bool compat)
2007 {
2008         ssize_t total_len = 0;
2009         unsigned long seg;
2010         struct iovec *iov;
2011
2012         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
2013         if (IS_ERR(iov)) {
2014                 *iovp = NULL;
2015                 return PTR_ERR(iov);
2016         }
2017
2018         /*
2019          * According to the Single Unix Specification we should return EINVAL if
2020          * an element length is < 0 when cast to ssize_t or if the total length
2021          * would overflow the ssize_t return value of the system call.
2022          *
2023          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
2024          * overflow case.
2025          */
2026         for (seg = 0; seg < nr_segs; seg++) {
2027                 ssize_t len = (ssize_t)iov[seg].iov_len;
2028
2029                 if (!access_ok(iov[seg].iov_base, len)) {
2030                         if (iov != *iovp)
2031                                 kfree(iov);
2032                         *iovp = NULL;
2033                         return -EFAULT;
2034                 }
2035
2036                 if (len > MAX_RW_COUNT - total_len) {
2037                         len = MAX_RW_COUNT - total_len;
2038                         iov[seg].iov_len = len;
2039                 }
2040                 total_len += len;
2041         }
2042
2043         iov_iter_init(i, type, iov, nr_segs, total_len);
2044         if (iov == *iovp)
2045                 *iovp = NULL;
2046         else
2047                 *iovp = iov;
2048         return total_len;
2049 }
2050
2051 /**
2052  * import_iovec() - Copy an array of &struct iovec from userspace
2053  *     into the kernel, check that it is valid, and initialize a new
2054  *     &struct iov_iter iterator to access it.
2055  *
2056  * @type: One of %READ or %WRITE.
2057  * @uvec: Pointer to the userspace array.
2058  * @nr_segs: Number of elements in userspace array.
2059  * @fast_segs: Number of elements in @iov.
2060  * @iovp: (input and output parameter) Pointer to pointer to (usually small
2061  *     on-stack) kernel array.
2062  * @i: Pointer to iterator that will be initialized on success.
2063  *
2064  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2065  * then this function places %NULL in *@iov on return. Otherwise, a new
2066  * array will be allocated and the result placed in *@iov. This means that
2067  * the caller may call kfree() on *@iov regardless of whether the small
2068  * on-stack array was used or not (and regardless of whether this function
2069  * returns an error or not).
2070  *
2071  * Return: Negative error code on error, bytes imported on success
2072  */
2073 ssize_t import_iovec(int type, const struct iovec __user *uvec,
2074                  unsigned nr_segs, unsigned fast_segs,
2075                  struct iovec **iovp, struct iov_iter *i)
2076 {
2077         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2078                               in_compat_syscall());
2079 }
2080 EXPORT_SYMBOL(import_iovec);
2081
2082 int import_single_range(int rw, void __user *buf, size_t len,
2083                  struct iovec *iov, struct iov_iter *i)
2084 {
2085         if (len > MAX_RW_COUNT)
2086                 len = MAX_RW_COUNT;
2087         if (unlikely(!access_ok(buf, len)))
2088                 return -EFAULT;
2089
2090         iov->iov_base = buf;
2091         iov->iov_len = len;
2092         iov_iter_init(i, rw, iov, 1, len);
2093         return 0;
2094 }
2095 EXPORT_SYMBOL(import_single_range);
2096
2097 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
2098                             int (*f)(struct kvec *vec, void *context),
2099                             void *context)
2100 {
2101         struct kvec w;
2102         int err = -EINVAL;
2103         if (!bytes)
2104                 return 0;
2105
2106         iterate_all_kinds(i, bytes, v, -EINVAL, ({
2107                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
2108                 w.iov_len = v.bv_len;
2109                 err = f(&w, context);
2110                 kunmap(v.bv_page);
2111                 err;}), ({
2112                 w = v;
2113                 err = f(&w, context);}), ({
2114                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
2115                 w.iov_len = v.bv_len;
2116                 err = f(&w, context);
2117                 kunmap(v.bv_page);
2118                 err;})
2119         )
2120         return err;
2121 }
2122 EXPORT_SYMBOL(iov_iter_for_each_range);