dio->submit.cookie = submit_bio(bio);
}
-static ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
const struct iomap_dio_ops *dops = dio->dops;
struct kiocb *iocb = dio->iocb;
* ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
- if (!dio->error &&
+ if (!dio->error && dio->size &&
(dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
int err;
err = invalidate_inode_pages2_range(inode->i_mapping,
dio_warn_stale_pagecache(iocb->ki_filp);
}
+ inode_dio_end(file_inode(iocb->ki_filp));
/*
* If this is a DSYNC write, make sure we push it to stable storage now
* that we've written data.
if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
ret = generic_write_sync(iocb, ret);
- inode_dio_end(file_inode(iocb->ki_filp));
kfree(dio);
return ret;
}
+EXPORT_SYMBOL_GPL(iomap_dio_complete);
static void iomap_dio_complete_work(struct work_struct *work)
{
return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
case IOMAP_INLINE:
return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
+ case IOMAP_DELALLOC:
+ /*
+ * DIO is not serialised against mmap() access at all, and so
+ * if the page_mkwrite occurs between the writeback and the
+ * iomap_apply() call in the DIO path, then it will see the
+ * DELALLOC block that the page-mkwrite allocated.
+ */
+ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
+ dio->iocb->ki_filp, current->comm);
+ return -EIO;
default:
WARN_ON_ONCE(1);
return -EIO;
* Returns -ENOTBLK In case of a page invalidation invalidation failure for
* writes. The callers needs to fall back to buffered I/O in this case.
*/
-ssize_t
-iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+struct iomap_dio *
+__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
bool wait_for_completion)
{
struct iomap_dio *dio;
if (!count)
- return 0;
+ return NULL;
if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
- return -EIO;
+ return ERR_PTR(-EIO);
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
if (!dio)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
dio->iocb = iocb;
atomic_set(&dio->ref, 1);
dio->wait_for_completion = wait_for_completion;
if (!atomic_dec_and_test(&dio->ref)) {
if (!wait_for_completion)
- return -EIOCBQUEUED;
+ return ERR_PTR(-EIOCBQUEUED);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
__set_current_state(TASK_RUNNING);
}
- return iomap_dio_complete(dio);
+ return dio;
out_free_dio:
kfree(dio);
- return ret;
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__iomap_dio_rw);
+
+ssize_t
+iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ bool wait_for_completion)
+{
+ struct iomap_dio *dio;
+
+ dio = __iomap_dio_rw(iocb, iter, ops, dops, wait_for_completion);
+ if (IS_ERR_OR_NULL(dio))
+ return PTR_ERR_OR_ZERO(dio);
+ return iomap_dio_complete(dio);
}
EXPORT_SYMBOL_GPL(iomap_dio_rw);