Merge branch 'for-5.20/io_uring' into for-5.20/io_uring-zerocopy-send
[linux-2.6-microblaze.git] / lib / iov_iter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
16
17 #define PIPE_PARANOIA /* for now */
18
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) {        \
21         size_t off = 0;                                         \
22         size_t skip = i->iov_offset;                            \
23         do {                                                    \
24                 len = min(n, __p->iov_len - skip);              \
25                 if (likely(len)) {                              \
26                         base = __p->iov_base + skip;            \
27                         len -= (STEP);                          \
28                         off += len;                             \
29                         skip += len;                            \
30                         n -= len;                               \
31                         if (skip < __p->iov_len)                \
32                                 break;                          \
33                 }                                               \
34                 __p++;                                          \
35                 skip = 0;                                       \
36         } while (n);                                            \
37         i->iov_offset = skip;                                   \
38         n = off;                                                \
39 }
40
41 #define iterate_bvec(i, n, base, len, off, p, STEP) {           \
42         size_t off = 0;                                         \
43         unsigned skip = i->iov_offset;                          \
44         while (n) {                                             \
45                 unsigned offset = p->bv_offset + skip;          \
46                 unsigned left;                                  \
47                 void *kaddr = kmap_local_page(p->bv_page +      \
48                                         offset / PAGE_SIZE);    \
49                 base = kaddr + offset % PAGE_SIZE;              \
50                 len = min(min(n, (size_t)(p->bv_len - skip)),   \
51                      (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
52                 left = (STEP);                                  \
53                 kunmap_local(kaddr);                            \
54                 len -= left;                                    \
55                 off += len;                                     \
56                 skip += len;                                    \
57                 if (skip == p->bv_len) {                        \
58                         skip = 0;                               \
59                         p++;                                    \
60                 }                                               \
61                 n -= len;                                       \
62                 if (left)                                       \
63                         break;                                  \
64         }                                                       \
65         i->iov_offset = skip;                                   \
66         n = off;                                                \
67 }
68
69 #define iterate_xarray(i, n, base, len, __off, STEP) {          \
70         __label__ __out;                                        \
71         size_t __off = 0;                                       \
72         struct folio *folio;                                    \
73         loff_t start = i->xarray_start + i->iov_offset;         \
74         pgoff_t index = start / PAGE_SIZE;                      \
75         XA_STATE(xas, i->xarray, index);                        \
76                                                                 \
77         len = PAGE_SIZE - offset_in_page(start);                \
78         rcu_read_lock();                                        \
79         xas_for_each(&xas, folio, ULONG_MAX) {                  \
80                 unsigned left;                                  \
81                 size_t offset;                                  \
82                 if (xas_retry(&xas, folio))                     \
83                         continue;                               \
84                 if (WARN_ON(xa_is_value(folio)))                \
85                         break;                                  \
86                 if (WARN_ON(folio_test_hugetlb(folio)))         \
87                         break;                                  \
88                 offset = offset_in_folio(folio, start + __off); \
89                 while (offset < folio_size(folio)) {            \
90                         base = kmap_local_folio(folio, offset); \
91                         len = min(n, len);                      \
92                         left = (STEP);                          \
93                         kunmap_local(base);                     \
94                         len -= left;                            \
95                         __off += len;                           \
96                         n -= len;                               \
97                         if (left || n == 0)                     \
98                                 goto __out;                     \
99                         offset += len;                          \
100                         len = PAGE_SIZE;                        \
101                 }                                               \
102         }                                                       \
103 __out:                                                          \
104         rcu_read_unlock();                                      \
105         i->iov_offset += __off;                                 \
106         n = __off;                                              \
107 }
108
109 #define __iterate_and_advance(i, n, base, len, off, I, K) {     \
110         if (unlikely(i->count < n))                             \
111                 n = i->count;                                   \
112         if (likely(n)) {                                        \
113                 if (likely(iter_is_iovec(i))) {                 \
114                         const struct iovec *iov = i->iov;       \
115                         void __user *base;                      \
116                         size_t len;                             \
117                         iterate_iovec(i, n, base, len, off,     \
118                                                 iov, (I))       \
119                         i->nr_segs -= iov - i->iov;             \
120                         i->iov = iov;                           \
121                 } else if (iov_iter_is_bvec(i)) {               \
122                         const struct bio_vec *bvec = i->bvec;   \
123                         void *base;                             \
124                         size_t len;                             \
125                         iterate_bvec(i, n, base, len, off,      \
126                                                 bvec, (K))      \
127                         i->nr_segs -= bvec - i->bvec;           \
128                         i->bvec = bvec;                         \
129                 } else if (iov_iter_is_kvec(i)) {               \
130                         const struct kvec *kvec = i->kvec;      \
131                         void *base;                             \
132                         size_t len;                             \
133                         iterate_iovec(i, n, base, len, off,     \
134                                                 kvec, (K))      \
135                         i->nr_segs -= kvec - i->kvec;           \
136                         i->kvec = kvec;                         \
137                 } else if (iov_iter_is_xarray(i)) {             \
138                         void *base;                             \
139                         size_t len;                             \
140                         iterate_xarray(i, n, base, len, off,    \
141                                                         (K))    \
142                 }                                               \
143                 i->count -= n;                                  \
144         }                                                       \
145 }
146 #define iterate_and_advance(i, n, base, len, off, I, K) \
147         __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
148
149 static int copyout(void __user *to, const void *from, size_t n)
150 {
151         if (should_fail_usercopy())
152                 return n;
153         if (access_ok(to, n)) {
154                 instrument_copy_to_user(to, from, n);
155                 n = raw_copy_to_user(to, from, n);
156         }
157         return n;
158 }
159
160 static int copyin(void *to, const void __user *from, size_t n)
161 {
162         if (should_fail_usercopy())
163                 return n;
164         if (access_ok(from, n)) {
165                 instrument_copy_from_user(to, from, n);
166                 n = raw_copy_from_user(to, from, n);
167         }
168         return n;
169 }
170
171 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
172                          struct iov_iter *i)
173 {
174         size_t skip, copy, left, wanted;
175         const struct iovec *iov;
176         char __user *buf;
177         void *kaddr, *from;
178
179         if (unlikely(bytes > i->count))
180                 bytes = i->count;
181
182         if (unlikely(!bytes))
183                 return 0;
184
185         might_fault();
186         wanted = bytes;
187         iov = i->iov;
188         skip = i->iov_offset;
189         buf = iov->iov_base + skip;
190         copy = min(bytes, iov->iov_len - skip);
191
192         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
193                 kaddr = kmap_atomic(page);
194                 from = kaddr + offset;
195
196                 /* first chunk, usually the only one */
197                 left = copyout(buf, from, copy);
198                 copy -= left;
199                 skip += copy;
200                 from += copy;
201                 bytes -= copy;
202
203                 while (unlikely(!left && bytes)) {
204                         iov++;
205                         buf = iov->iov_base;
206                         copy = min(bytes, iov->iov_len);
207                         left = copyout(buf, from, copy);
208                         copy -= left;
209                         skip = copy;
210                         from += copy;
211                         bytes -= copy;
212                 }
213                 if (likely(!bytes)) {
214                         kunmap_atomic(kaddr);
215                         goto done;
216                 }
217                 offset = from - kaddr;
218                 buf += copy;
219                 kunmap_atomic(kaddr);
220                 copy = min(bytes, iov->iov_len - skip);
221         }
222         /* Too bad - revert to non-atomic kmap */
223
224         kaddr = kmap(page);
225         from = kaddr + offset;
226         left = copyout(buf, from, copy);
227         copy -= left;
228         skip += copy;
229         from += copy;
230         bytes -= copy;
231         while (unlikely(!left && bytes)) {
232                 iov++;
233                 buf = iov->iov_base;
234                 copy = min(bytes, iov->iov_len);
235                 left = copyout(buf, from, copy);
236                 copy -= left;
237                 skip = copy;
238                 from += copy;
239                 bytes -= copy;
240         }
241         kunmap(page);
242
243 done:
244         if (skip == iov->iov_len) {
245                 iov++;
246                 skip = 0;
247         }
248         i->count -= wanted - bytes;
249         i->nr_segs -= iov - i->iov;
250         i->iov = iov;
251         i->iov_offset = skip;
252         return wanted - bytes;
253 }
254
255 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
256                          struct iov_iter *i)
257 {
258         size_t skip, copy, left, wanted;
259         const struct iovec *iov;
260         char __user *buf;
261         void *kaddr, *to;
262
263         if (unlikely(bytes > i->count))
264                 bytes = i->count;
265
266         if (unlikely(!bytes))
267                 return 0;
268
269         might_fault();
270         wanted = bytes;
271         iov = i->iov;
272         skip = i->iov_offset;
273         buf = iov->iov_base + skip;
274         copy = min(bytes, iov->iov_len - skip);
275
276         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
277                 kaddr = kmap_atomic(page);
278                 to = kaddr + offset;
279
280                 /* first chunk, usually the only one */
281                 left = copyin(to, buf, copy);
282                 copy -= left;
283                 skip += copy;
284                 to += copy;
285                 bytes -= copy;
286
287                 while (unlikely(!left && bytes)) {
288                         iov++;
289                         buf = iov->iov_base;
290                         copy = min(bytes, iov->iov_len);
291                         left = copyin(to, buf, copy);
292                         copy -= left;
293                         skip = copy;
294                         to += copy;
295                         bytes -= copy;
296                 }
297                 if (likely(!bytes)) {
298                         kunmap_atomic(kaddr);
299                         goto done;
300                 }
301                 offset = to - kaddr;
302                 buf += copy;
303                 kunmap_atomic(kaddr);
304                 copy = min(bytes, iov->iov_len - skip);
305         }
306         /* Too bad - revert to non-atomic kmap */
307
308         kaddr = kmap(page);
309         to = kaddr + offset;
310         left = copyin(to, buf, copy);
311         copy -= left;
312         skip += copy;
313         to += copy;
314         bytes -= copy;
315         while (unlikely(!left && bytes)) {
316                 iov++;
317                 buf = iov->iov_base;
318                 copy = min(bytes, iov->iov_len);
319                 left = copyin(to, buf, copy);
320                 copy -= left;
321                 skip = copy;
322                 to += copy;
323                 bytes -= copy;
324         }
325         kunmap(page);
326
327 done:
328         if (skip == iov->iov_len) {
329                 iov++;
330                 skip = 0;
331         }
332         i->count -= wanted - bytes;
333         i->nr_segs -= iov - i->iov;
334         i->iov = iov;
335         i->iov_offset = skip;
336         return wanted - bytes;
337 }
338
339 #ifdef PIPE_PARANOIA
340 static bool sanity(const struct iov_iter *i)
341 {
342         struct pipe_inode_info *pipe = i->pipe;
343         unsigned int p_head = pipe->head;
344         unsigned int p_tail = pipe->tail;
345         unsigned int p_mask = pipe->ring_size - 1;
346         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
347         unsigned int i_head = i->head;
348         unsigned int idx;
349
350         if (i->iov_offset) {
351                 struct pipe_buffer *p;
352                 if (unlikely(p_occupancy == 0))
353                         goto Bad;       // pipe must be non-empty
354                 if (unlikely(i_head != p_head - 1))
355                         goto Bad;       // must be at the last buffer...
356
357                 p = &pipe->bufs[i_head & p_mask];
358                 if (unlikely(p->offset + p->len != i->iov_offset))
359                         goto Bad;       // ... at the end of segment
360         } else {
361                 if (i_head != p_head)
362                         goto Bad;       // must be right after the last buffer
363         }
364         return true;
365 Bad:
366         printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
367         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
368                         p_head, p_tail, pipe->ring_size);
369         for (idx = 0; idx < pipe->ring_size; idx++)
370                 printk(KERN_ERR "[%p %p %d %d]\n",
371                         pipe->bufs[idx].ops,
372                         pipe->bufs[idx].page,
373                         pipe->bufs[idx].offset,
374                         pipe->bufs[idx].len);
375         WARN_ON(1);
376         return false;
377 }
378 #else
379 #define sanity(i) true
380 #endif
381
382 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
383                          struct iov_iter *i)
384 {
385         struct pipe_inode_info *pipe = i->pipe;
386         struct pipe_buffer *buf;
387         unsigned int p_tail = pipe->tail;
388         unsigned int p_mask = pipe->ring_size - 1;
389         unsigned int i_head = i->head;
390         size_t off;
391
392         if (unlikely(bytes > i->count))
393                 bytes = i->count;
394
395         if (unlikely(!bytes))
396                 return 0;
397
398         if (!sanity(i))
399                 return 0;
400
401         off = i->iov_offset;
402         buf = &pipe->bufs[i_head & p_mask];
403         if (off) {
404                 if (offset == off && buf->page == page) {
405                         /* merge with the last one */
406                         buf->len += bytes;
407                         i->iov_offset += bytes;
408                         goto out;
409                 }
410                 i_head++;
411                 buf = &pipe->bufs[i_head & p_mask];
412         }
413         if (pipe_full(i_head, p_tail, pipe->max_usage))
414                 return 0;
415
416         buf->ops = &page_cache_pipe_buf_ops;
417         buf->flags = 0;
418         get_page(page);
419         buf->page = page;
420         buf->offset = offset;
421         buf->len = bytes;
422
423         pipe->head = i_head + 1;
424         i->iov_offset = offset + bytes;
425         i->head = i_head;
426 out:
427         i->count -= bytes;
428         return bytes;
429 }
430
431 /*
432  * fault_in_iov_iter_readable - fault in iov iterator for reading
433  * @i: iterator
434  * @size: maximum length
435  *
436  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
437  * @size.  For each iovec, fault in each page that constitutes the iovec.
438  *
439  * Returns the number of bytes not faulted in (like copy_to_user() and
440  * copy_from_user()).
441  *
442  * Always returns 0 for non-userspace iterators.
443  */
444 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
445 {
446         if (iter_is_iovec(i)) {
447                 size_t count = min(size, iov_iter_count(i));
448                 const struct iovec *p;
449                 size_t skip;
450
451                 size -= count;
452                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
453                         size_t len = min(count, p->iov_len - skip);
454                         size_t ret;
455
456                         if (unlikely(!len))
457                                 continue;
458                         ret = fault_in_readable(p->iov_base + skip, len);
459                         count -= len - ret;
460                         if (ret)
461                                 break;
462                 }
463                 return count + size;
464         }
465         return 0;
466 }
467 EXPORT_SYMBOL(fault_in_iov_iter_readable);
468
469 /*
470  * fault_in_iov_iter_writeable - fault in iov iterator for writing
471  * @i: iterator
472  * @size: maximum length
473  *
474  * Faults in the iterator using get_user_pages(), i.e., without triggering
475  * hardware page faults.  This is primarily useful when we already know that
476  * some or all of the pages in @i aren't in memory.
477  *
478  * Returns the number of bytes not faulted in, like copy_to_user() and
479  * copy_from_user().
480  *
481  * Always returns 0 for non-user-space iterators.
482  */
483 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
484 {
485         if (iter_is_iovec(i)) {
486                 size_t count = min(size, iov_iter_count(i));
487                 const struct iovec *p;
488                 size_t skip;
489
490                 size -= count;
491                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
492                         size_t len = min(count, p->iov_len - skip);
493                         size_t ret;
494
495                         if (unlikely(!len))
496                                 continue;
497                         ret = fault_in_safe_writeable(p->iov_base + skip, len);
498                         count -= len - ret;
499                         if (ret)
500                                 break;
501                 }
502                 return count + size;
503         }
504         return 0;
505 }
506 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
507
508 void iov_iter_init(struct iov_iter *i, unsigned int direction,
509                         const struct iovec *iov, unsigned long nr_segs,
510                         size_t count)
511 {
512         WARN_ON(direction & ~(READ | WRITE));
513         *i = (struct iov_iter) {
514                 .iter_type = ITER_IOVEC,
515                 .nofault = false,
516                 .data_source = direction,
517                 .iov = iov,
518                 .nr_segs = nr_segs,
519                 .iov_offset = 0,
520                 .count = count
521         };
522 }
523 EXPORT_SYMBOL(iov_iter_init);
524
525 static inline bool allocated(struct pipe_buffer *buf)
526 {
527         return buf->ops == &default_pipe_buf_ops;
528 }
529
530 static inline void data_start(const struct iov_iter *i,
531                               unsigned int *iter_headp, size_t *offp)
532 {
533         unsigned int p_mask = i->pipe->ring_size - 1;
534         unsigned int iter_head = i->head;
535         size_t off = i->iov_offset;
536
537         if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
538                     off == PAGE_SIZE)) {
539                 iter_head++;
540                 off = 0;
541         }
542         *iter_headp = iter_head;
543         *offp = off;
544 }
545
546 static size_t push_pipe(struct iov_iter *i, size_t size,
547                         int *iter_headp, size_t *offp)
548 {
549         struct pipe_inode_info *pipe = i->pipe;
550         unsigned int p_tail = pipe->tail;
551         unsigned int p_mask = pipe->ring_size - 1;
552         unsigned int iter_head;
553         size_t off;
554         ssize_t left;
555
556         if (unlikely(size > i->count))
557                 size = i->count;
558         if (unlikely(!size))
559                 return 0;
560
561         left = size;
562         data_start(i, &iter_head, &off);
563         *iter_headp = iter_head;
564         *offp = off;
565         if (off) {
566                 left -= PAGE_SIZE - off;
567                 if (left <= 0) {
568                         pipe->bufs[iter_head & p_mask].len += size;
569                         return size;
570                 }
571                 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
572                 iter_head++;
573         }
574         while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
575                 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
576                 struct page *page = alloc_page(GFP_USER);
577                 if (!page)
578                         break;
579
580                 buf->ops = &default_pipe_buf_ops;
581                 buf->flags = 0;
582                 buf->page = page;
583                 buf->offset = 0;
584                 buf->len = min_t(ssize_t, left, PAGE_SIZE);
585                 left -= buf->len;
586                 iter_head++;
587                 pipe->head = iter_head;
588
589                 if (left == 0)
590                         return size;
591         }
592         return size - left;
593 }
594
595 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
596                                 struct iov_iter *i)
597 {
598         struct pipe_inode_info *pipe = i->pipe;
599         unsigned int p_mask = pipe->ring_size - 1;
600         unsigned int i_head;
601         size_t n, off;
602
603         if (!sanity(i))
604                 return 0;
605
606         bytes = n = push_pipe(i, bytes, &i_head, &off);
607         if (unlikely(!n))
608                 return 0;
609         do {
610                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
611                 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
612                 i->head = i_head;
613                 i->iov_offset = off + chunk;
614                 n -= chunk;
615                 addr += chunk;
616                 off = 0;
617                 i_head++;
618         } while (n);
619         i->count -= bytes;
620         return bytes;
621 }
622
623 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
624                               __wsum sum, size_t off)
625 {
626         __wsum next = csum_partial_copy_nocheck(from, to, len);
627         return csum_block_add(sum, next, off);
628 }
629
630 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
631                                          struct iov_iter *i, __wsum *sump)
632 {
633         struct pipe_inode_info *pipe = i->pipe;
634         unsigned int p_mask = pipe->ring_size - 1;
635         __wsum sum = *sump;
636         size_t off = 0;
637         unsigned int i_head;
638         size_t r;
639
640         if (!sanity(i))
641                 return 0;
642
643         bytes = push_pipe(i, bytes, &i_head, &r);
644         while (bytes) {
645                 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
646                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
647                 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
648                 kunmap_local(p);
649                 i->head = i_head;
650                 i->iov_offset = r + chunk;
651                 bytes -= chunk;
652                 off += chunk;
653                 r = 0;
654                 i_head++;
655         }
656         *sump = sum;
657         i->count -= off;
658         return off;
659 }
660
661 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
662 {
663         if (unlikely(iov_iter_is_pipe(i)))
664                 return copy_pipe_to_iter(addr, bytes, i);
665         if (iter_is_iovec(i))
666                 might_fault();
667         iterate_and_advance(i, bytes, base, len, off,
668                 copyout(base, addr + off, len),
669                 memcpy(base, addr + off, len)
670         )
671
672         return bytes;
673 }
674 EXPORT_SYMBOL(_copy_to_iter);
675
676 #ifdef CONFIG_ARCH_HAS_COPY_MC
677 static int copyout_mc(void __user *to, const void *from, size_t n)
678 {
679         if (access_ok(to, n)) {
680                 instrument_copy_to_user(to, from, n);
681                 n = copy_mc_to_user((__force void *) to, from, n);
682         }
683         return n;
684 }
685
686 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
687                                 struct iov_iter *i)
688 {
689         struct pipe_inode_info *pipe = i->pipe;
690         unsigned int p_mask = pipe->ring_size - 1;
691         unsigned int i_head;
692         size_t n, off, xfer = 0;
693
694         if (!sanity(i))
695                 return 0;
696
697         n = push_pipe(i, bytes, &i_head, &off);
698         while (n) {
699                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
700                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
701                 unsigned long rem;
702                 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
703                 chunk -= rem;
704                 kunmap_local(p);
705                 i->head = i_head;
706                 i->iov_offset = off + chunk;
707                 xfer += chunk;
708                 if (rem)
709                         break;
710                 n -= chunk;
711                 off = 0;
712                 i_head++;
713         }
714         i->count -= xfer;
715         return xfer;
716 }
717
718 /**
719  * _copy_mc_to_iter - copy to iter with source memory error exception handling
720  * @addr: source kernel address
721  * @bytes: total transfer length
722  * @i: destination iterator
723  *
724  * The pmem driver deploys this for the dax operation
725  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
726  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
727  * successfully copied.
728  *
729  * The main differences between this and typical _copy_to_iter().
730  *
731  * * Typical tail/residue handling after a fault retries the copy
732  *   byte-by-byte until the fault happens again. Re-triggering machine
733  *   checks is potentially fatal so the implementation uses source
734  *   alignment and poison alignment assumptions to avoid re-triggering
735  *   hardware exceptions.
736  *
737  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
738  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
739  *   a short copy.
740  *
741  * Return: number of bytes copied (may be %0)
742  */
743 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
744 {
745         if (unlikely(iov_iter_is_pipe(i)))
746                 return copy_mc_pipe_to_iter(addr, bytes, i);
747         if (iter_is_iovec(i))
748                 might_fault();
749         __iterate_and_advance(i, bytes, base, len, off,
750                 copyout_mc(base, addr + off, len),
751                 copy_mc_to_kernel(base, addr + off, len)
752         )
753
754         return bytes;
755 }
756 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
757 #endif /* CONFIG_ARCH_HAS_COPY_MC */
758
759 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
760 {
761         if (unlikely(iov_iter_is_pipe(i))) {
762                 WARN_ON(1);
763                 return 0;
764         }
765         if (iter_is_iovec(i))
766                 might_fault();
767         iterate_and_advance(i, bytes, base, len, off,
768                 copyin(addr + off, base, len),
769                 memcpy(addr + off, base, len)
770         )
771
772         return bytes;
773 }
774 EXPORT_SYMBOL(_copy_from_iter);
775
776 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
777 {
778         if (unlikely(iov_iter_is_pipe(i))) {
779                 WARN_ON(1);
780                 return 0;
781         }
782         iterate_and_advance(i, bytes, base, len, off,
783                 __copy_from_user_inatomic_nocache(addr + off, base, len),
784                 memcpy(addr + off, base, len)
785         )
786
787         return bytes;
788 }
789 EXPORT_SYMBOL(_copy_from_iter_nocache);
790
791 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
792 /**
793  * _copy_from_iter_flushcache - write destination through cpu cache
794  * @addr: destination kernel address
795  * @bytes: total transfer length
796  * @i: source iterator
797  *
798  * The pmem driver arranges for filesystem-dax to use this facility via
799  * dax_copy_from_iter() for ensuring that writes to persistent memory
800  * are flushed through the CPU cache. It is differentiated from
801  * _copy_from_iter_nocache() in that guarantees all data is flushed for
802  * all iterator types. The _copy_from_iter_nocache() only attempts to
803  * bypass the cache for the ITER_IOVEC case, and on some archs may use
804  * instructions that strand dirty-data in the cache.
805  *
806  * Return: number of bytes copied (may be %0)
807  */
808 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
809 {
810         if (unlikely(iov_iter_is_pipe(i))) {
811                 WARN_ON(1);
812                 return 0;
813         }
814         iterate_and_advance(i, bytes, base, len, off,
815                 __copy_from_user_flushcache(addr + off, base, len),
816                 memcpy_flushcache(addr + off, base, len)
817         )
818
819         return bytes;
820 }
821 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
822 #endif
823
824 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
825 {
826         struct page *head;
827         size_t v = n + offset;
828
829         /*
830          * The general case needs to access the page order in order
831          * to compute the page size.
832          * However, we mostly deal with order-0 pages and thus can
833          * avoid a possible cache line miss for requests that fit all
834          * page orders.
835          */
836         if (n <= v && v <= PAGE_SIZE)
837                 return true;
838
839         head = compound_head(page);
840         v += (page - head) << PAGE_SHIFT;
841
842         if (likely(n <= v && v <= (page_size(head))))
843                 return true;
844         WARN_ON(1);
845         return false;
846 }
847
848 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
849                          struct iov_iter *i)
850 {
851         if (likely(iter_is_iovec(i)))
852                 return copy_page_to_iter_iovec(page, offset, bytes, i);
853         if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
854                 void *kaddr = kmap_local_page(page);
855                 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
856                 kunmap_local(kaddr);
857                 return wanted;
858         }
859         if (iov_iter_is_pipe(i))
860                 return copy_page_to_iter_pipe(page, offset, bytes, i);
861         if (unlikely(iov_iter_is_discard(i))) {
862                 if (unlikely(i->count < bytes))
863                         bytes = i->count;
864                 i->count -= bytes;
865                 return bytes;
866         }
867         WARN_ON(1);
868         return 0;
869 }
870
871 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
872                          struct iov_iter *i)
873 {
874         size_t res = 0;
875         if (unlikely(!page_copy_sane(page, offset, bytes)))
876                 return 0;
877         page += offset / PAGE_SIZE; // first subpage
878         offset %= PAGE_SIZE;
879         while (1) {
880                 size_t n = __copy_page_to_iter(page, offset,
881                                 min(bytes, (size_t)PAGE_SIZE - offset), i);
882                 res += n;
883                 bytes -= n;
884                 if (!bytes || !n)
885                         break;
886                 offset += n;
887                 if (offset == PAGE_SIZE) {
888                         page++;
889                         offset = 0;
890                 }
891         }
892         return res;
893 }
894 EXPORT_SYMBOL(copy_page_to_iter);
895
896 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
897                          struct iov_iter *i)
898 {
899         if (unlikely(!page_copy_sane(page, offset, bytes)))
900                 return 0;
901         if (likely(iter_is_iovec(i)))
902                 return copy_page_from_iter_iovec(page, offset, bytes, i);
903         if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
904                 void *kaddr = kmap_local_page(page);
905                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
906                 kunmap_local(kaddr);
907                 return wanted;
908         }
909         WARN_ON(1);
910         return 0;
911 }
912 EXPORT_SYMBOL(copy_page_from_iter);
913
914 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
915 {
916         struct pipe_inode_info *pipe = i->pipe;
917         unsigned int p_mask = pipe->ring_size - 1;
918         unsigned int i_head;
919         size_t n, off;
920
921         if (!sanity(i))
922                 return 0;
923
924         bytes = n = push_pipe(i, bytes, &i_head, &off);
925         if (unlikely(!n))
926                 return 0;
927
928         do {
929                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
930                 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
931                 memset(p + off, 0, chunk);
932                 kunmap_local(p);
933                 i->head = i_head;
934                 i->iov_offset = off + chunk;
935                 n -= chunk;
936                 off = 0;
937                 i_head++;
938         } while (n);
939         i->count -= bytes;
940         return bytes;
941 }
942
943 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
944 {
945         if (unlikely(iov_iter_is_pipe(i)))
946                 return pipe_zero(bytes, i);
947         iterate_and_advance(i, bytes, base, len, count,
948                 clear_user(base, len),
949                 memset(base, 0, len)
950         )
951
952         return bytes;
953 }
954 EXPORT_SYMBOL(iov_iter_zero);
955
956 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
957                                   struct iov_iter *i)
958 {
959         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
960         if (unlikely(!page_copy_sane(page, offset, bytes))) {
961                 kunmap_atomic(kaddr);
962                 return 0;
963         }
964         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
965                 kunmap_atomic(kaddr);
966                 WARN_ON(1);
967                 return 0;
968         }
969         iterate_and_advance(i, bytes, base, len, off,
970                 copyin(p + off, base, len),
971                 memcpy(p + off, base, len)
972         )
973         kunmap_atomic(kaddr);
974         return bytes;
975 }
976 EXPORT_SYMBOL(copy_page_from_iter_atomic);
977
978 static inline void pipe_truncate(struct iov_iter *i)
979 {
980         struct pipe_inode_info *pipe = i->pipe;
981         unsigned int p_tail = pipe->tail;
982         unsigned int p_head = pipe->head;
983         unsigned int p_mask = pipe->ring_size - 1;
984
985         if (!pipe_empty(p_head, p_tail)) {
986                 struct pipe_buffer *buf;
987                 unsigned int i_head = i->head;
988                 size_t off = i->iov_offset;
989
990                 if (off) {
991                         buf = &pipe->bufs[i_head & p_mask];
992                         buf->len = off - buf->offset;
993                         i_head++;
994                 }
995                 while (p_head != i_head) {
996                         p_head--;
997                         pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
998                 }
999
1000                 pipe->head = p_head;
1001         }
1002 }
1003
1004 static void pipe_advance(struct iov_iter *i, size_t size)
1005 {
1006         struct pipe_inode_info *pipe = i->pipe;
1007         if (size) {
1008                 struct pipe_buffer *buf;
1009                 unsigned int p_mask = pipe->ring_size - 1;
1010                 unsigned int i_head = i->head;
1011                 size_t off = i->iov_offset, left = size;
1012
1013                 if (off) /* make it relative to the beginning of buffer */
1014                         left += off - pipe->bufs[i_head & p_mask].offset;
1015                 while (1) {
1016                         buf = &pipe->bufs[i_head & p_mask];
1017                         if (left <= buf->len)
1018                                 break;
1019                         left -= buf->len;
1020                         i_head++;
1021                 }
1022                 i->head = i_head;
1023                 i->iov_offset = buf->offset + left;
1024         }
1025         i->count -= size;
1026         /* ... and discard everything past that point */
1027         pipe_truncate(i);
1028 }
1029
1030 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1031 {
1032         struct bvec_iter bi;
1033
1034         bi.bi_size = i->count;
1035         bi.bi_bvec_done = i->iov_offset;
1036         bi.bi_idx = 0;
1037         bvec_iter_advance(i->bvec, &bi, size);
1038
1039         i->bvec += bi.bi_idx;
1040         i->nr_segs -= bi.bi_idx;
1041         i->count = bi.bi_size;
1042         i->iov_offset = bi.bi_bvec_done;
1043 }
1044
1045 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1046 {
1047         const struct iovec *iov, *end;
1048
1049         if (!i->count)
1050                 return;
1051         i->count -= size;
1052
1053         size += i->iov_offset; // from beginning of current segment
1054         for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1055                 if (likely(size < iov->iov_len))
1056                         break;
1057                 size -= iov->iov_len;
1058         }
1059         i->iov_offset = size;
1060         i->nr_segs -= iov - i->iov;
1061         i->iov = iov;
1062 }
1063
1064 void iov_iter_advance(struct iov_iter *i, size_t size)
1065 {
1066         if (unlikely(i->count < size))
1067                 size = i->count;
1068         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1069                 /* iovec and kvec have identical layouts */
1070                 iov_iter_iovec_advance(i, size);
1071         } else if (iov_iter_is_bvec(i)) {
1072                 iov_iter_bvec_advance(i, size);
1073         } else if (iov_iter_is_pipe(i)) {
1074                 pipe_advance(i, size);
1075         } else if (unlikely(iov_iter_is_xarray(i))) {
1076                 i->iov_offset += size;
1077                 i->count -= size;
1078         } else if (iov_iter_is_discard(i)) {
1079                 i->count -= size;
1080         }
1081 }
1082 EXPORT_SYMBOL(iov_iter_advance);
1083
1084 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1085 {
1086         if (!unroll)
1087                 return;
1088         if (WARN_ON(unroll > MAX_RW_COUNT))
1089                 return;
1090         i->count += unroll;
1091         if (unlikely(iov_iter_is_pipe(i))) {
1092                 struct pipe_inode_info *pipe = i->pipe;
1093                 unsigned int p_mask = pipe->ring_size - 1;
1094                 unsigned int i_head = i->head;
1095                 size_t off = i->iov_offset;
1096                 while (1) {
1097                         struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1098                         size_t n = off - b->offset;
1099                         if (unroll < n) {
1100                                 off -= unroll;
1101                                 break;
1102                         }
1103                         unroll -= n;
1104                         if (!unroll && i_head == i->start_head) {
1105                                 off = 0;
1106                                 break;
1107                         }
1108                         i_head--;
1109                         b = &pipe->bufs[i_head & p_mask];
1110                         off = b->offset + b->len;
1111                 }
1112                 i->iov_offset = off;
1113                 i->head = i_head;
1114                 pipe_truncate(i);
1115                 return;
1116         }
1117         if (unlikely(iov_iter_is_discard(i)))
1118                 return;
1119         if (unroll <= i->iov_offset) {
1120                 i->iov_offset -= unroll;
1121                 return;
1122         }
1123         unroll -= i->iov_offset;
1124         if (iov_iter_is_xarray(i)) {
1125                 BUG(); /* We should never go beyond the start of the specified
1126                         * range since we might then be straying into pages that
1127                         * aren't pinned.
1128                         */
1129         } else if (iov_iter_is_bvec(i)) {
1130                 const struct bio_vec *bvec = i->bvec;
1131                 while (1) {
1132                         size_t n = (--bvec)->bv_len;
1133                         i->nr_segs++;
1134                         if (unroll <= n) {
1135                                 i->bvec = bvec;
1136                                 i->iov_offset = n - unroll;
1137                                 return;
1138                         }
1139                         unroll -= n;
1140                 }
1141         } else { /* same logics for iovec and kvec */
1142                 const struct iovec *iov = i->iov;
1143                 while (1) {
1144                         size_t n = (--iov)->iov_len;
1145                         i->nr_segs++;
1146                         if (unroll <= n) {
1147                                 i->iov = iov;
1148                                 i->iov_offset = n - unroll;
1149                                 return;
1150                         }
1151                         unroll -= n;
1152                 }
1153         }
1154 }
1155 EXPORT_SYMBOL(iov_iter_revert);
1156
1157 /*
1158  * Return the count of just the current iov_iter segment.
1159  */
1160 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1161 {
1162         if (i->nr_segs > 1) {
1163                 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1164                         return min(i->count, i->iov->iov_len - i->iov_offset);
1165                 if (iov_iter_is_bvec(i))
1166                         return min(i->count, i->bvec->bv_len - i->iov_offset);
1167         }
1168         return i->count;
1169 }
1170 EXPORT_SYMBOL(iov_iter_single_seg_count);
1171
1172 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1173                         const struct kvec *kvec, unsigned long nr_segs,
1174                         size_t count)
1175 {
1176         WARN_ON(direction & ~(READ | WRITE));
1177         *i = (struct iov_iter){
1178                 .iter_type = ITER_KVEC,
1179                 .data_source = direction,
1180                 .kvec = kvec,
1181                 .nr_segs = nr_segs,
1182                 .iov_offset = 0,
1183                 .count = count
1184         };
1185 }
1186 EXPORT_SYMBOL(iov_iter_kvec);
1187
1188 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1189                         const struct bio_vec *bvec, unsigned long nr_segs,
1190                         size_t count)
1191 {
1192         WARN_ON(direction & ~(READ | WRITE));
1193         *i = (struct iov_iter){
1194                 .iter_type = ITER_BVEC,
1195                 .data_source = direction,
1196                 .bvec = bvec,
1197                 .nr_segs = nr_segs,
1198                 .iov_offset = 0,
1199                 .count = count
1200         };
1201 }
1202 EXPORT_SYMBOL(iov_iter_bvec);
1203
1204 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1205                         struct pipe_inode_info *pipe,
1206                         size_t count)
1207 {
1208         BUG_ON(direction != READ);
1209         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1210         *i = (struct iov_iter){
1211                 .iter_type = ITER_PIPE,
1212                 .data_source = false,
1213                 .pipe = pipe,
1214                 .head = pipe->head,
1215                 .start_head = pipe->head,
1216                 .iov_offset = 0,
1217                 .count = count
1218         };
1219 }
1220 EXPORT_SYMBOL(iov_iter_pipe);
1221
1222 /**
1223  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1224  * @i: The iterator to initialise.
1225  * @direction: The direction of the transfer.
1226  * @xarray: The xarray to access.
1227  * @start: The start file position.
1228  * @count: The size of the I/O buffer in bytes.
1229  *
1230  * Set up an I/O iterator to either draw data out of the pages attached to an
1231  * inode or to inject data into those pages.  The pages *must* be prevented
1232  * from evaporation, either by taking a ref on them or locking them by the
1233  * caller.
1234  */
1235 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1236                      struct xarray *xarray, loff_t start, size_t count)
1237 {
1238         BUG_ON(direction & ~1);
1239         *i = (struct iov_iter) {
1240                 .iter_type = ITER_XARRAY,
1241                 .data_source = direction,
1242                 .xarray = xarray,
1243                 .xarray_start = start,
1244                 .count = count,
1245                 .iov_offset = 0
1246         };
1247 }
1248 EXPORT_SYMBOL(iov_iter_xarray);
1249
1250 /**
1251  * iov_iter_discard - Initialise an I/O iterator that discards data
1252  * @i: The iterator to initialise.
1253  * @direction: The direction of the transfer.
1254  * @count: The size of the I/O buffer in bytes.
1255  *
1256  * Set up an I/O iterator that just discards everything that's written to it.
1257  * It's only available as a READ iterator.
1258  */
1259 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1260 {
1261         BUG_ON(direction != READ);
1262         *i = (struct iov_iter){
1263                 .iter_type = ITER_DISCARD,
1264                 .data_source = false,
1265                 .count = count,
1266                 .iov_offset = 0
1267         };
1268 }
1269 EXPORT_SYMBOL(iov_iter_discard);
1270
1271 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1272 {
1273         unsigned long res = 0;
1274         size_t size = i->count;
1275         size_t skip = i->iov_offset;
1276         unsigned k;
1277
1278         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1279                 size_t len = i->iov[k].iov_len - skip;
1280                 if (len) {
1281                         res |= (unsigned long)i->iov[k].iov_base + skip;
1282                         if (len > size)
1283                                 len = size;
1284                         res |= len;
1285                         size -= len;
1286                         if (!size)
1287                                 break;
1288                 }
1289         }
1290         return res;
1291 }
1292
1293 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1294 {
1295         unsigned res = 0;
1296         size_t size = i->count;
1297         unsigned skip = i->iov_offset;
1298         unsigned k;
1299
1300         for (k = 0; k < i->nr_segs; k++, skip = 0) {
1301                 size_t len = i->bvec[k].bv_len - skip;
1302                 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1303                 if (len > size)
1304                         len = size;
1305                 res |= len;
1306                 size -= len;
1307                 if (!size)
1308                         break;
1309         }
1310         return res;
1311 }
1312
1313 unsigned long iov_iter_alignment(const struct iov_iter *i)
1314 {
1315         /* iovec and kvec have identical layouts */
1316         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1317                 return iov_iter_alignment_iovec(i);
1318
1319         if (iov_iter_is_bvec(i))
1320                 return iov_iter_alignment_bvec(i);
1321
1322         if (iov_iter_is_pipe(i)) {
1323                 unsigned int p_mask = i->pipe->ring_size - 1;
1324                 size_t size = i->count;
1325
1326                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1327                         return size | i->iov_offset;
1328                 return size;
1329         }
1330
1331         if (iov_iter_is_xarray(i))
1332                 return (i->xarray_start + i->iov_offset) | i->count;
1333
1334         return 0;
1335 }
1336 EXPORT_SYMBOL(iov_iter_alignment);
1337
1338 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1339 {
1340         unsigned long res = 0;
1341         unsigned long v = 0;
1342         size_t size = i->count;
1343         unsigned k;
1344
1345         if (WARN_ON(!iter_is_iovec(i)))
1346                 return ~0U;
1347
1348         for (k = 0; k < i->nr_segs; k++) {
1349                 if (i->iov[k].iov_len) {
1350                         unsigned long base = (unsigned long)i->iov[k].iov_base;
1351                         if (v) // if not the first one
1352                                 res |= base | v; // this start | previous end
1353                         v = base + i->iov[k].iov_len;
1354                         if (size <= i->iov[k].iov_len)
1355                                 break;
1356                         size -= i->iov[k].iov_len;
1357                 }
1358         }
1359         return res;
1360 }
1361 EXPORT_SYMBOL(iov_iter_gap_alignment);
1362
1363 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1364                                 size_t maxsize,
1365                                 struct page **pages,
1366                                 int iter_head,
1367                                 size_t *start)
1368 {
1369         struct pipe_inode_info *pipe = i->pipe;
1370         unsigned int p_mask = pipe->ring_size - 1;
1371         ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1372         if (!n)
1373                 return -EFAULT;
1374
1375         maxsize = n;
1376         n += *start;
1377         while (n > 0) {
1378                 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1379                 iter_head++;
1380                 n -= PAGE_SIZE;
1381         }
1382
1383         return maxsize;
1384 }
1385
1386 static ssize_t pipe_get_pages(struct iov_iter *i,
1387                    struct page **pages, size_t maxsize, unsigned maxpages,
1388                    size_t *start)
1389 {
1390         unsigned int iter_head, npages;
1391         size_t capacity;
1392
1393         if (!sanity(i))
1394                 return -EFAULT;
1395
1396         data_start(i, &iter_head, start);
1397         /* Amount of free space: some of this one + all after this one */
1398         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1399         capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1400
1401         return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1402 }
1403
1404 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1405                                           pgoff_t index, unsigned int nr_pages)
1406 {
1407         XA_STATE(xas, xa, index);
1408         struct page *page;
1409         unsigned int ret = 0;
1410
1411         rcu_read_lock();
1412         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1413                 if (xas_retry(&xas, page))
1414                         continue;
1415
1416                 /* Has the page moved or been split? */
1417                 if (unlikely(page != xas_reload(&xas))) {
1418                         xas_reset(&xas);
1419                         continue;
1420                 }
1421
1422                 pages[ret] = find_subpage(page, xas.xa_index);
1423                 get_page(pages[ret]);
1424                 if (++ret == nr_pages)
1425                         break;
1426         }
1427         rcu_read_unlock();
1428         return ret;
1429 }
1430
1431 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1432                                      struct page **pages, size_t maxsize,
1433                                      unsigned maxpages, size_t *_start_offset)
1434 {
1435         unsigned nr, offset;
1436         pgoff_t index, count;
1437         size_t size = maxsize;
1438         loff_t pos;
1439
1440         if (!size || !maxpages)
1441                 return 0;
1442
1443         pos = i->xarray_start + i->iov_offset;
1444         index = pos >> PAGE_SHIFT;
1445         offset = pos & ~PAGE_MASK;
1446         *_start_offset = offset;
1447
1448         count = 1;
1449         if (size > PAGE_SIZE - offset) {
1450                 size -= PAGE_SIZE - offset;
1451                 count += size >> PAGE_SHIFT;
1452                 size &= ~PAGE_MASK;
1453                 if (size)
1454                         count++;
1455         }
1456
1457         if (count > maxpages)
1458                 count = maxpages;
1459
1460         nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1461         if (nr == 0)
1462                 return 0;
1463
1464         return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1465 }
1466
1467 /* must be done on non-empty ITER_IOVEC one */
1468 static unsigned long first_iovec_segment(const struct iov_iter *i,
1469                                          size_t *size, size_t *start,
1470                                          size_t maxsize, unsigned maxpages)
1471 {
1472         size_t skip;
1473         long k;
1474
1475         for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1476                 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1477                 size_t len = i->iov[k].iov_len - skip;
1478
1479                 if (unlikely(!len))
1480                         continue;
1481                 if (len > maxsize)
1482                         len = maxsize;
1483                 len += (*start = addr % PAGE_SIZE);
1484                 if (len > maxpages * PAGE_SIZE)
1485                         len = maxpages * PAGE_SIZE;
1486                 *size = len;
1487                 return addr & PAGE_MASK;
1488         }
1489         BUG(); // if it had been empty, we wouldn't get called
1490 }
1491
1492 /* must be done on non-empty ITER_BVEC one */
1493 static struct page *first_bvec_segment(const struct iov_iter *i,
1494                                        size_t *size, size_t *start,
1495                                        size_t maxsize, unsigned maxpages)
1496 {
1497         struct page *page;
1498         size_t skip = i->iov_offset, len;
1499
1500         len = i->bvec->bv_len - skip;
1501         if (len > maxsize)
1502                 len = maxsize;
1503         skip += i->bvec->bv_offset;
1504         page = i->bvec->bv_page + skip / PAGE_SIZE;
1505         len += (*start = skip % PAGE_SIZE);
1506         if (len > maxpages * PAGE_SIZE)
1507                 len = maxpages * PAGE_SIZE;
1508         *size = len;
1509         return page;
1510 }
1511
1512 ssize_t iov_iter_get_pages(struct iov_iter *i,
1513                    struct page **pages, size_t maxsize, unsigned maxpages,
1514                    size_t *start)
1515 {
1516         size_t len;
1517         int n, res;
1518
1519         if (maxsize > i->count)
1520                 maxsize = i->count;
1521         if (!maxsize)
1522                 return 0;
1523
1524         if (likely(iter_is_iovec(i))) {
1525                 unsigned int gup_flags = 0;
1526                 unsigned long addr;
1527
1528                 if (iov_iter_rw(i) != WRITE)
1529                         gup_flags |= FOLL_WRITE;
1530                 if (i->nofault)
1531                         gup_flags |= FOLL_NOFAULT;
1532
1533                 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1534                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1535                 res = get_user_pages_fast(addr, n, gup_flags, pages);
1536                 if (unlikely(res <= 0))
1537                         return res;
1538                 return (res == n ? len : res * PAGE_SIZE) - *start;
1539         }
1540         if (iov_iter_is_bvec(i)) {
1541                 struct page *page;
1542
1543                 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1544                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1545                 while (n--)
1546                         get_page(*pages++ = page++);
1547                 return len - *start;
1548         }
1549         if (iov_iter_is_pipe(i))
1550                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1551         if (iov_iter_is_xarray(i))
1552                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1553         return -EFAULT;
1554 }
1555 EXPORT_SYMBOL(iov_iter_get_pages);
1556
1557 static struct page **get_pages_array(size_t n)
1558 {
1559         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1560 }
1561
1562 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1563                    struct page ***pages, size_t maxsize,
1564                    size_t *start)
1565 {
1566         struct page **p;
1567         unsigned int iter_head, npages;
1568         ssize_t n;
1569
1570         if (!sanity(i))
1571                 return -EFAULT;
1572
1573         data_start(i, &iter_head, start);
1574         /* Amount of free space: some of this one + all after this one */
1575         npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1576         n = npages * PAGE_SIZE - *start;
1577         if (maxsize > n)
1578                 maxsize = n;
1579         else
1580                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1581         p = get_pages_array(npages);
1582         if (!p)
1583                 return -ENOMEM;
1584         n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1585         if (n > 0)
1586                 *pages = p;
1587         else
1588                 kvfree(p);
1589         return n;
1590 }
1591
1592 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1593                                            struct page ***pages, size_t maxsize,
1594                                            size_t *_start_offset)
1595 {
1596         struct page **p;
1597         unsigned nr, offset;
1598         pgoff_t index, count;
1599         size_t size = maxsize;
1600         loff_t pos;
1601
1602         if (!size)
1603                 return 0;
1604
1605         pos = i->xarray_start + i->iov_offset;
1606         index = pos >> PAGE_SHIFT;
1607         offset = pos & ~PAGE_MASK;
1608         *_start_offset = offset;
1609
1610         count = 1;
1611         if (size > PAGE_SIZE - offset) {
1612                 size -= PAGE_SIZE - offset;
1613                 count += size >> PAGE_SHIFT;
1614                 size &= ~PAGE_MASK;
1615                 if (size)
1616                         count++;
1617         }
1618
1619         p = get_pages_array(count);
1620         if (!p)
1621                 return -ENOMEM;
1622         *pages = p;
1623
1624         nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1625         if (nr == 0)
1626                 return 0;
1627
1628         return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1629 }
1630
1631 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1632                    struct page ***pages, size_t maxsize,
1633                    size_t *start)
1634 {
1635         struct page **p;
1636         size_t len;
1637         int n, res;
1638
1639         if (maxsize > i->count)
1640                 maxsize = i->count;
1641         if (!maxsize)
1642                 return 0;
1643
1644         if (likely(iter_is_iovec(i))) {
1645                 unsigned int gup_flags = 0;
1646                 unsigned long addr;
1647
1648                 if (iov_iter_rw(i) != WRITE)
1649                         gup_flags |= FOLL_WRITE;
1650                 if (i->nofault)
1651                         gup_flags |= FOLL_NOFAULT;
1652
1653                 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1654                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1655                 p = get_pages_array(n);
1656                 if (!p)
1657                         return -ENOMEM;
1658                 res = get_user_pages_fast(addr, n, gup_flags, p);
1659                 if (unlikely(res <= 0)) {
1660                         kvfree(p);
1661                         *pages = NULL;
1662                         return res;
1663                 }
1664                 *pages = p;
1665                 return (res == n ? len : res * PAGE_SIZE) - *start;
1666         }
1667         if (iov_iter_is_bvec(i)) {
1668                 struct page *page;
1669
1670                 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1671                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1672                 *pages = p = get_pages_array(n);
1673                 if (!p)
1674                         return -ENOMEM;
1675                 while (n--)
1676                         get_page(*p++ = page++);
1677                 return len - *start;
1678         }
1679         if (iov_iter_is_pipe(i))
1680                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1681         if (iov_iter_is_xarray(i))
1682                 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1683         return -EFAULT;
1684 }
1685 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1686
1687 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1688                                struct iov_iter *i)
1689 {
1690         __wsum sum, next;
1691         sum = *csum;
1692         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1693                 WARN_ON(1);
1694                 return 0;
1695         }
1696         iterate_and_advance(i, bytes, base, len, off, ({
1697                 next = csum_and_copy_from_user(base, addr + off, len);
1698                 sum = csum_block_add(sum, next, off);
1699                 next ? 0 : len;
1700         }), ({
1701                 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1702         })
1703         )
1704         *csum = sum;
1705         return bytes;
1706 }
1707 EXPORT_SYMBOL(csum_and_copy_from_iter);
1708
1709 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1710                              struct iov_iter *i)
1711 {
1712         struct csum_state *csstate = _csstate;
1713         __wsum sum, next;
1714
1715         if (unlikely(iov_iter_is_discard(i))) {
1716                 WARN_ON(1);     /* for now */
1717                 return 0;
1718         }
1719
1720         sum = csum_shift(csstate->csum, csstate->off);
1721         if (unlikely(iov_iter_is_pipe(i)))
1722                 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1723         else iterate_and_advance(i, bytes, base, len, off, ({
1724                 next = csum_and_copy_to_user(addr + off, base, len);
1725                 sum = csum_block_add(sum, next, off);
1726                 next ? 0 : len;
1727         }), ({
1728                 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1729         })
1730         )
1731         csstate->csum = csum_shift(sum, csstate->off);
1732         csstate->off += bytes;
1733         return bytes;
1734 }
1735 EXPORT_SYMBOL(csum_and_copy_to_iter);
1736
1737 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1738                 struct iov_iter *i)
1739 {
1740 #ifdef CONFIG_CRYPTO_HASH
1741         struct ahash_request *hash = hashp;
1742         struct scatterlist sg;
1743         size_t copied;
1744
1745         copied = copy_to_iter(addr, bytes, i);
1746         sg_init_one(&sg, addr, copied);
1747         ahash_request_set_crypt(hash, &sg, NULL, copied);
1748         crypto_ahash_update(hash);
1749         return copied;
1750 #else
1751         return 0;
1752 #endif
1753 }
1754 EXPORT_SYMBOL(hash_and_copy_to_iter);
1755
1756 static int iov_npages(const struct iov_iter *i, int maxpages)
1757 {
1758         size_t skip = i->iov_offset, size = i->count;
1759         const struct iovec *p;
1760         int npages = 0;
1761
1762         for (p = i->iov; size; skip = 0, p++) {
1763                 unsigned offs = offset_in_page(p->iov_base + skip);
1764                 size_t len = min(p->iov_len - skip, size);
1765
1766                 if (len) {
1767                         size -= len;
1768                         npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1769                         if (unlikely(npages > maxpages))
1770                                 return maxpages;
1771                 }
1772         }
1773         return npages;
1774 }
1775
1776 static int bvec_npages(const struct iov_iter *i, int maxpages)
1777 {
1778         size_t skip = i->iov_offset, size = i->count;
1779         const struct bio_vec *p;
1780         int npages = 0;
1781
1782         for (p = i->bvec; size; skip = 0, p++) {
1783                 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1784                 size_t len = min(p->bv_len - skip, size);
1785
1786                 size -= len;
1787                 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1788                 if (unlikely(npages > maxpages))
1789                         return maxpages;
1790         }
1791         return npages;
1792 }
1793
1794 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1795 {
1796         if (unlikely(!i->count))
1797                 return 0;
1798         /* iovec and kvec have identical layouts */
1799         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1800                 return iov_npages(i, maxpages);
1801         if (iov_iter_is_bvec(i))
1802                 return bvec_npages(i, maxpages);
1803         if (iov_iter_is_pipe(i)) {
1804                 unsigned int iter_head;
1805                 int npages;
1806                 size_t off;
1807
1808                 if (!sanity(i))
1809                         return 0;
1810
1811                 data_start(i, &iter_head, &off);
1812                 /* some of this one + all after this one */
1813                 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1814                 return min(npages, maxpages);
1815         }
1816         if (iov_iter_is_xarray(i)) {
1817                 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1818                 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1819                 return min(npages, maxpages);
1820         }
1821         return 0;
1822 }
1823 EXPORT_SYMBOL(iov_iter_npages);
1824
1825 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1826 {
1827         *new = *old;
1828         if (unlikely(iov_iter_is_pipe(new))) {
1829                 WARN_ON(1);
1830                 return NULL;
1831         }
1832         if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1833                 return NULL;
1834         if (iov_iter_is_bvec(new))
1835                 return new->bvec = kmemdup(new->bvec,
1836                                     new->nr_segs * sizeof(struct bio_vec),
1837                                     flags);
1838         else
1839                 /* iovec and kvec have identical layout */
1840                 return new->iov = kmemdup(new->iov,
1841                                    new->nr_segs * sizeof(struct iovec),
1842                                    flags);
1843 }
1844 EXPORT_SYMBOL(dup_iter);
1845
1846 static int copy_compat_iovec_from_user(struct iovec *iov,
1847                 const struct iovec __user *uvec, unsigned long nr_segs)
1848 {
1849         const struct compat_iovec __user *uiov =
1850                 (const struct compat_iovec __user *)uvec;
1851         int ret = -EFAULT, i;
1852
1853         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1854                 return -EFAULT;
1855
1856         for (i = 0; i < nr_segs; i++) {
1857                 compat_uptr_t buf;
1858                 compat_ssize_t len;
1859
1860                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1861                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1862
1863                 /* check for compat_size_t not fitting in compat_ssize_t .. */
1864                 if (len < 0) {
1865                         ret = -EINVAL;
1866                         goto uaccess_end;
1867                 }
1868                 iov[i].iov_base = compat_ptr(buf);
1869                 iov[i].iov_len = len;
1870         }
1871
1872         ret = 0;
1873 uaccess_end:
1874         user_access_end();
1875         return ret;
1876 }
1877
1878 static int copy_iovec_from_user(struct iovec *iov,
1879                 const struct iovec __user *uvec, unsigned long nr_segs)
1880 {
1881         unsigned long seg;
1882
1883         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1884                 return -EFAULT;
1885         for (seg = 0; seg < nr_segs; seg++) {
1886                 if ((ssize_t)iov[seg].iov_len < 0)
1887                         return -EINVAL;
1888         }
1889
1890         return 0;
1891 }
1892
1893 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1894                 unsigned long nr_segs, unsigned long fast_segs,
1895                 struct iovec *fast_iov, bool compat)
1896 {
1897         struct iovec *iov = fast_iov;
1898         int ret;
1899
1900         /*
1901          * SuS says "The readv() function *may* fail if the iovcnt argument was
1902          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1903          * traditionally returned zero for zero segments, so...
1904          */
1905         if (nr_segs == 0)
1906                 return iov;
1907         if (nr_segs > UIO_MAXIOV)
1908                 return ERR_PTR(-EINVAL);
1909         if (nr_segs > fast_segs) {
1910                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1911                 if (!iov)
1912                         return ERR_PTR(-ENOMEM);
1913         }
1914
1915         if (compat)
1916                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1917         else
1918                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1919         if (ret) {
1920                 if (iov != fast_iov)
1921                         kfree(iov);
1922                 return ERR_PTR(ret);
1923         }
1924
1925         return iov;
1926 }
1927
1928 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1929                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1930                  struct iov_iter *i, bool compat)
1931 {
1932         ssize_t total_len = 0;
1933         unsigned long seg;
1934         struct iovec *iov;
1935
1936         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1937         if (IS_ERR(iov)) {
1938                 *iovp = NULL;
1939                 return PTR_ERR(iov);
1940         }
1941
1942         /*
1943          * According to the Single Unix Specification we should return EINVAL if
1944          * an element length is < 0 when cast to ssize_t or if the total length
1945          * would overflow the ssize_t return value of the system call.
1946          *
1947          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1948          * overflow case.
1949          */
1950         for (seg = 0; seg < nr_segs; seg++) {
1951                 ssize_t len = (ssize_t)iov[seg].iov_len;
1952
1953                 if (!access_ok(iov[seg].iov_base, len)) {
1954                         if (iov != *iovp)
1955                                 kfree(iov);
1956                         *iovp = NULL;
1957                         return -EFAULT;
1958                 }
1959
1960                 if (len > MAX_RW_COUNT - total_len) {
1961                         len = MAX_RW_COUNT - total_len;
1962                         iov[seg].iov_len = len;
1963                 }
1964                 total_len += len;
1965         }
1966
1967         iov_iter_init(i, type, iov, nr_segs, total_len);
1968         if (iov == *iovp)
1969                 *iovp = NULL;
1970         else
1971                 *iovp = iov;
1972         return total_len;
1973 }
1974
1975 /**
1976  * import_iovec() - Copy an array of &struct iovec from userspace
1977  *     into the kernel, check that it is valid, and initialize a new
1978  *     &struct iov_iter iterator to access it.
1979  *
1980  * @type: One of %READ or %WRITE.
1981  * @uvec: Pointer to the userspace array.
1982  * @nr_segs: Number of elements in userspace array.
1983  * @fast_segs: Number of elements in @iov.
1984  * @iovp: (input and output parameter) Pointer to pointer to (usually small
1985  *     on-stack) kernel array.
1986  * @i: Pointer to iterator that will be initialized on success.
1987  *
1988  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1989  * then this function places %NULL in *@iov on return. Otherwise, a new
1990  * array will be allocated and the result placed in *@iov. This means that
1991  * the caller may call kfree() on *@iov regardless of whether the small
1992  * on-stack array was used or not (and regardless of whether this function
1993  * returns an error or not).
1994  *
1995  * Return: Negative error code on error, bytes imported on success
1996  */
1997 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1998                  unsigned nr_segs, unsigned fast_segs,
1999                  struct iovec **iovp, struct iov_iter *i)
2000 {
2001         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2002                               in_compat_syscall());
2003 }
2004 EXPORT_SYMBOL(import_iovec);
2005
2006 int import_single_range(int rw, void __user *buf, size_t len,
2007                  struct iovec *iov, struct iov_iter *i)
2008 {
2009         if (len > MAX_RW_COUNT)
2010                 len = MAX_RW_COUNT;
2011         if (unlikely(!access_ok(buf, len)))
2012                 return -EFAULT;
2013
2014         iov->iov_base = buf;
2015         iov->iov_len = len;
2016         iov_iter_init(i, rw, iov, 1, len);
2017         return 0;
2018 }
2019 EXPORT_SYMBOL(import_single_range);
2020
2021 /**
2022  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
2023  *     iov_iter_save_state() was called.
2024  *
2025  * @i: &struct iov_iter to restore
2026  * @state: state to restore from
2027  *
2028  * Used after iov_iter_save_state() to bring restore @i, if operations may
2029  * have advanced it.
2030  *
2031  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2032  */
2033 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2034 {
2035         if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
2036                          !iov_iter_is_kvec(i))
2037                 return;
2038         i->iov_offset = state->iov_offset;
2039         i->count = state->count;
2040         /*
2041          * For the *vec iters, nr_segs + iov is constant - if we increment
2042          * the vec, then we also decrement the nr_segs count. Hence we don't
2043          * need to track both of these, just one is enough and we can deduct
2044          * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2045          * size, so we can just increment the iov pointer as they are unionzed.
2046          * ITER_BVEC _may_ be the same size on some archs, but on others it is
2047          * not. Be safe and handle it separately.
2048          */
2049         BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2050         if (iov_iter_is_bvec(i))
2051                 i->bvec -= state->nr_segs - i->nr_segs;
2052         else
2053                 i->iov -= state->nr_segs - i->nr_segs;
2054         i->nr_segs = state->nr_segs;
2055 }