X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=lib%2Fiov_iter.c;h=14cae2584db4c2246f131db8d94560cf97e8cc58;hb=dfef313e999058530396497fd41399c0a637c188;hp=437fbc14c9727896c93c1851db3cdeeb49e3a972;hpb=99a2c96d52d312b11a943372964226fa134de3b1;p=linux-2.6-microblaze.git diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 437fbc14c972..14cae2584db4 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -637,30 +638,30 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) } EXPORT_SYMBOL(_copy_to_iter); -#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE -static int copyout_mcsafe(void __user *to, const void *from, size_t n) +#ifdef CONFIG_ARCH_HAS_COPY_MC +static int copyout_mc(void __user *to, const void *from, size_t n) { if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); - n = copy_to_user_mcsafe((__force void *) to, from, n); + n = copy_mc_to_user((__force void *) to, from, n); } return n; } -static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, +static unsigned long copy_mc_to_page(struct page *page, size_t offset, const char *from, size_t len) { unsigned long ret; char *to; to = kmap_atomic(page); - ret = memcpy_mcsafe(to + offset, from, len); + ret = copy_mc_to_kernel(to + offset, from, len); kunmap_atomic(to); return ret; } -static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, +static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; @@ -678,7 +679,7 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, size_t chunk = min_t(size_t, n, PAGE_SIZE - off); unsigned long rem; - rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page, + rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); i->head = i_head; i->iov_offset = off + chunk - rem; @@ -695,18 +696,17 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, } /** - * _copy_to_iter_mcsafe - copy to user with source-read error exception handling + * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address * @bytes: total transfer length * @iter: destination iterator * - * The pmem driver arranges for filesystem-dax to use this facility via - * dax_copy_to_iter() for protecting read/write to persistent memory. - * Unless / until an architecture can guarantee identical performance - * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a - * performance regression to switch more users to the mcsafe version. + * The pmem driver deploys this for the dax operation + * (dax_copy_to_iter()) for dax reads (bypass page-cache and the + * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes + * successfully copied. * - * Otherwise, the main differences between this and typical _copy_to_iter(). + * The main differences between this and typical _copy_to_iter(). * * * Typical tail/residue handling after a fault retries the copy * byte-by-byte until the fault happens again. Re-triggering machine @@ -717,23 +717,22 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. * Compare to copy_to_iter() where only ITER_IOVEC attempts might return * a short copy. - * - * See MCSAFE_TEST for self-test. */ -size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; if (unlikely(iov_iter_is_pipe(i))) - return copy_pipe_to_iter_mcsafe(addr, bytes, i); + return copy_mc_pipe_to_iter(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); iterate_and_advance(i, bytes, v, - copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), + copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len, + v.iov_len), ({ - rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, - (from += v.bv_len) - v.bv_len, v.bv_len); + rem = copy_mc_to_page(v.bv_page, v.bv_offset, + (from += v.bv_len) - v.bv_len, v.bv_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; @@ -741,8 +740,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) } }), ({ - rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, - v.iov_len); + rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len) + - v.iov_len, v.iov_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; @@ -753,8 +752,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); -#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ +EXPORT_SYMBOL_GPL(_copy_mc_to_iter); +#endif /* CONFIG_ARCH_HAS_COPY_MC */ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { @@ -1449,15 +1448,14 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, return 0; } iterate_and_advance(i, bytes, v, ({ - int err = 0; next = csum_and_copy_from_user(v.iov_base, (to += v.iov_len) - v.iov_len, - v.iov_len, ~0U, &err); - if (!err) { + v.iov_len); + if (next) { sum = csum_block_add(sum, next, off); off += v.iov_len; } - err ? v.iov_len : 0; + next ? 0 : v.iov_len; }), ({ char *p = kmap_atomic(v.bv_page); sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, @@ -1491,11 +1489,10 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, if (unlikely(i->count < bytes)) return false; iterate_all_kinds(i, bytes, v, ({ - int err = 0; next = csum_and_copy_from_user(v.iov_base, (to += v.iov_len) - v.iov_len, - v.iov_len, ~0U, &err); - if (err) + v.iov_len); + if (!next) return false; sum = csum_block_add(sum, next, off); off += v.iov_len; @@ -1537,15 +1534,14 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, return 0; } iterate_and_advance(i, bytes, v, ({ - int err = 0; next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len, ~0U, &err); - if (!err) { + v.iov_len); + if (next) { sum = csum_block_add(sum, next, off); off += v.iov_len; } - err ? v.iov_len : 0; + next ? 0 : v.iov_len; }), ({ char *p = kmap_atomic(v.bv_page); sum = csum_and_memcpy(p + v.bv_offset, @@ -1650,16 +1646,145 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) } EXPORT_SYMBOL(dup_iter); +static int copy_compat_iovec_from_user(struct iovec *iov, + const struct iovec __user *uvec, unsigned long nr_segs) +{ + const struct compat_iovec __user *uiov = + (const struct compat_iovec __user *)uvec; + int ret = -EFAULT, i; + + if (!user_access_begin(uvec, nr_segs * sizeof(*uvec))) + return -EFAULT; + + for (i = 0; i < nr_segs; i++) { + compat_uptr_t buf; + compat_ssize_t len; + + unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); + unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); + + /* check for compat_size_t not fitting in compat_ssize_t .. */ + if (len < 0) { + ret = -EINVAL; + goto uaccess_end; + } + iov[i].iov_base = compat_ptr(buf); + iov[i].iov_len = len; + } + + ret = 0; +uaccess_end: + user_access_end(); + return ret; +} + +static int copy_iovec_from_user(struct iovec *iov, + const struct iovec __user *uvec, unsigned long nr_segs) +{ + unsigned long seg; + + if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) + return -EFAULT; + for (seg = 0; seg < nr_segs; seg++) { + if ((ssize_t)iov[seg].iov_len < 0) + return -EINVAL; + } + + return 0; +} + +struct iovec *iovec_from_user(const struct iovec __user *uvec, + unsigned long nr_segs, unsigned long fast_segs, + struct iovec *fast_iov, bool compat) +{ + struct iovec *iov = fast_iov; + int ret; + + /* + * SuS says "The readv() function *may* fail if the iovcnt argument was + * less than or equal to 0, or greater than {IOV_MAX}. Linux has + * traditionally returned zero for zero segments, so... + */ + if (nr_segs == 0) + return iov; + if (nr_segs > UIO_MAXIOV) + return ERR_PTR(-EINVAL); + if (nr_segs > fast_segs) { + iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return ERR_PTR(-ENOMEM); + } + + if (compat) + ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); + else + ret = copy_iovec_from_user(iov, uvec, nr_segs); + if (ret) { + if (iov != fast_iov) + kfree(iov); + return ERR_PTR(ret); + } + + return iov; +} + +ssize_t __import_iovec(int type, const struct iovec __user *uvec, + unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, + struct iov_iter *i, bool compat) +{ + ssize_t total_len = 0; + unsigned long seg; + struct iovec *iov; + + iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); + if (IS_ERR(iov)) { + *iovp = NULL; + return PTR_ERR(iov); + } + + /* + * According to the Single Unix Specification we should return EINVAL if + * an element length is < 0 when cast to ssize_t or if the total length + * would overflow the ssize_t return value of the system call. + * + * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the + * overflow case. + */ + for (seg = 0; seg < nr_segs; seg++) { + ssize_t len = (ssize_t)iov[seg].iov_len; + + if (!access_ok(iov[seg].iov_base, len)) { + if (iov != *iovp) + kfree(iov); + *iovp = NULL; + return -EFAULT; + } + + if (len > MAX_RW_COUNT - total_len) { + len = MAX_RW_COUNT - total_len; + iov[seg].iov_len = len; + } + total_len += len; + } + + iov_iter_init(i, type, iov, nr_segs, total_len); + if (iov == *iovp) + *iovp = NULL; + else + *iovp = iov; + return total_len; +} + /** * import_iovec() - Copy an array of &struct iovec from userspace * into the kernel, check that it is valid, and initialize a new * &struct iov_iter iterator to access it. * * @type: One of %READ or %WRITE. - * @uvector: Pointer to the userspace array. + * @uvec: Pointer to the userspace array. * @nr_segs: Number of elements in userspace array. * @fast_segs: Number of elements in @iov. - * @iov: (input and output parameter) Pointer to pointer to (usually small + * @iovp: (input and output parameter) Pointer to pointer to (usually small * on-stack) kernel array. * @i: Pointer to iterator that will be initialized on success. * @@ -1672,51 +1797,15 @@ EXPORT_SYMBOL(dup_iter); * * Return: Negative error code on error, bytes imported on success */ -ssize_t import_iovec(int type, const struct iovec __user * uvector, +ssize_t import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, - struct iovec **iov, struct iov_iter *i) + struct iovec **iovp, struct iov_iter *i) { - ssize_t n; - struct iovec *p; - n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, - *iov, &p); - if (n < 0) { - if (p != *iov) - kfree(p); - *iov = NULL; - return n; - } - iov_iter_init(i, type, p, nr_segs, n); - *iov = p == *iov ? NULL : p; - return n; + return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, + in_compat_syscall()); } EXPORT_SYMBOL(import_iovec); -#ifdef CONFIG_COMPAT -#include - -ssize_t compat_import_iovec(int type, - const struct compat_iovec __user * uvector, - unsigned nr_segs, unsigned fast_segs, - struct iovec **iov, struct iov_iter *i) -{ - ssize_t n; - struct iovec *p; - n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, - *iov, &p); - if (n < 0) { - if (p != *iov) - kfree(p); - *iov = NULL; - return n; - } - iov_iter_init(i, type, p, nr_segs, n); - *iov = p == *iov ? NULL : p; - return n; -} -EXPORT_SYMBOL(compat_import_iovec); -#endif - int import_single_range(int rw, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i) {