static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
loff_t pos, size_t count, fl_owner_t owner)
{
- struct file *file = io->file;
+ struct file *file = io->iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
static int fuse_do_readpage(struct file *file, struct page *page)
{
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ struct kiocb iocb;
+ struct fuse_io_priv io;
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
req->num_pages = 1;
req->pages[0] = page;
req->page_descs[0].length = count;
+ init_sync_kiocb(&iocb, file);
+ io = (struct fuse_io_priv) FUSE_IO_PRIV_SYNC(&iocb);
num_read = fuse_send_read(req, &io, pos, count, NULL);
err = req->out.h.error;
static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
loff_t pos, size_t count, fl_owner_t owner)
{
- struct file *file = io->file;
+ struct kiocb *iocb = io->iocb;
+ struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_write_in *inarg = &req->misc.write.in;
fuse_write_fill(req, ff, pos, count);
inarg->flags = file->f_flags;
+ if (iocb->ki_flags & IOCB_DSYNC)
+ inarg->flags |= O_DSYNC;
+ if (iocb->ki_flags & IOCB_SYNC)
+ inarg->flags |= O_SYNC;
if (owner != NULL) {
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
return ret;
}
-static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
+static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
struct inode *inode, loff_t pos,
size_t count)
{
size_t res;
unsigned offset;
unsigned i;
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
for (i = 0; i < req->num_pages; i++)
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
FUSE_MAX_PAGES_PER_REQ);
}
-static ssize_t fuse_perform_write(struct file *file,
+static ssize_t fuse_perform_write(struct kiocb *iocb,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos)
{
} else {
size_t num_written;
- num_written = fuse_send_write_pages(req, file, inode,
+ num_written = fuse_send_write_pages(req, iocb, inode,
pos, count);
err = req->out.h.error;
if (!err) {
pos += written;
- written_buffered = fuse_perform_write(file, mapping, from, pos);
+ written_buffered = fuse_perform_write(iocb, mapping, from, pos);
if (written_buffered < 0) {
err = written_buffered;
goto out;
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
} else {
- written = fuse_perform_write(file, mapping, from, iocb->ki_pos);
+ written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
if (written >= 0)
iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
inode_unlock(inode);
+ if (written > 0)
+ written = generic_write_sync(iocb, written);
return written ? written : err;
}
{
int write = flags & FUSE_DIO_WRITE;
int cuse = flags & FUSE_DIO_CUSE;
- struct file *file = io->file;
+ struct file *file = io->iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
loff_t *ppos)
{
ssize_t res;
- struct file *file = io->file;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(io->iocb->ki_filp);
if (is_bad_inode(inode))
return -EIO;
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
return __fuse_direct_read(&io, to, &iocb->ki_pos);
}
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ssize_t res;
if (is_bad_inode(inode))
io->offset = offset;
io->write = (iov_iter_rw(iter) == WRITE);
io->err = 0;
- io->file = file;
/*
* By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported.