Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / lib / iov_iter.c
index 2e452ce..14cae25 100644 (file)
@@ -582,7 +582,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
                              __wsum sum, size_t off)
 {
-       __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
+       __wsum next = csum_partial_copy_nocheck(from, to, len);
        return csum_block_add(sum, next, off);
 }
 
@@ -638,30 +638,30 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 }
 EXPORT_SYMBOL(_copy_to_iter);
 
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+static int copyout_mc(void __user *to, const void *from, size_t n)
 {
        if (access_ok(to, n)) {
                instrument_copy_to_user(to, from, n);
-               n = copy_to_user_mcsafe((__force void *) to, from, n);
+               n = copy_mc_to_user((__force void *) to, from, n);
        }
        return n;
 }
 
-static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+static unsigned long copy_mc_to_page(struct page *page, size_t offset,
                const char *from, size_t len)
 {
        unsigned long ret;
        char *to;
 
        to = kmap_atomic(page);
-       ret = memcpy_mcsafe(to + offset, from, len);
+       ret = copy_mc_to_kernel(to + offset, from, len);
        kunmap_atomic(to);
 
        return ret;
 }
 
-static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
                                struct iov_iter *i)
 {
        struct pipe_inode_info *pipe = i->pipe;
@@ -679,7 +679,7 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
                size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
                unsigned long rem;
 
-               rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
+               rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
                                            off, addr, chunk);
                i->head = i_head;
                i->iov_offset = off + chunk - rem;
@@ -696,18 +696,17 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
 }
 
 /**
- * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * _copy_mc_to_iter - copy to iter with source memory error exception handling
  * @addr: source kernel address
  * @bytes: total transfer length
  * @iter: destination iterator
  *
- * The pmem driver arranges for filesystem-dax to use this facility via
- * dax_copy_to_iter() for protecting read/write to persistent memory.
- * Unless / until an architecture can guarantee identical performance
- * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
- * performance regression to switch more users to the mcsafe version.
+ * The pmem driver deploys this for the dax operation
+ * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
+ * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
+ * successfully copied.
  *
- * Otherwise, the main differences between this and typical _copy_to_iter().
+ * The main differences between this and typical _copy_to_iter().
  *
  * * Typical tail/residue handling after a fault retries the copy
  *   byte-by-byte until the fault happens again. Re-triggering machine
@@ -718,23 +717,22 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
  *   a short copy.
- *
- * See MCSAFE_TEST for self-test.
  */
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 {
        const char *from = addr;
        unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
 
        if (unlikely(iov_iter_is_pipe(i)))
-               return copy_pipe_to_iter_mcsafe(addr, bytes, i);
+               return copy_mc_pipe_to_iter(addr, bytes, i);
        if (iter_is_iovec(i))
                might_fault();
        iterate_and_advance(i, bytes, v,
-               copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+               copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
+                          v.iov_len),
                ({
-               rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
-                               (from += v.bv_len) - v.bv_len, v.bv_len);
+               rem = copy_mc_to_page(v.bv_page, v.bv_offset,
+                                     (from += v.bv_len) - v.bv_len, v.bv_len);
                if (rem) {
                        curr_addr = (unsigned long) from;
                        bytes = curr_addr - s_addr - rem;
@@ -742,8 +740,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
                }
                }),
                ({
-               rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
-                               v.iov_len);
+               rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
+                                       - v.iov_len, v.iov_len);
                if (rem) {
                        curr_addr = (unsigned long) from;
                        bytes = curr_addr - s_addr - rem;
@@ -754,8 +752,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
 
        return bytes;
 }
-EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
-#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
+EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
+#endif /* CONFIG_ARCH_HAS_COPY_MC */
 
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
@@ -1450,15 +1448,14 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
                return 0;
        }
        iterate_and_advance(i, bytes, v, ({
-               int err = 0;
                next = csum_and_copy_from_user(v.iov_base,
                                               (to += v.iov_len) - v.iov_len,
-                                              v.iov_len, 0, &err);
-               if (!err) {
+                                              v.iov_len);
+               if (next) {
                        sum = csum_block_add(sum, next, off);
                        off += v.iov_len;
                }
-               err ? v.iov_len : 0;
+               next ? 0 : v.iov_len;
        }), ({
                char *p = kmap_atomic(v.bv_page);
                sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
@@ -1492,11 +1489,10 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
        if (unlikely(i->count < bytes))
                return false;
        iterate_all_kinds(i, bytes, v, ({
-               int err = 0;
                next = csum_and_copy_from_user(v.iov_base,
                                               (to += v.iov_len) - v.iov_len,
-                                              v.iov_len, 0, &err);
-               if (err)
+                                              v.iov_len);
+               if (!next)
                        return false;
                sum = csum_block_add(sum, next, off);
                off += v.iov_len;
@@ -1538,15 +1534,14 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
                return 0;
        }
        iterate_and_advance(i, bytes, v, ({
-               int err = 0;
                next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
                                             v.iov_base,
-                                            v.iov_len, 0, &err);
-               if (!err) {
+                                            v.iov_len);
+               if (next) {
                        sum = csum_block_add(sum, next, off);
                        off += v.iov_len;
                }
-               err ? v.iov_len : 0;
+               next ? 0 : v.iov_len;
        }), ({
                char *p = kmap_atomic(v.bv_page);
                sum = csum_and_memcpy(p + v.bv_offset,