From: Linus Torvalds Date: Wed, 3 Aug 2022 20:50:22 +0000 (-0700) Subject: Merge tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Tag: microblaze-v6.1~154 X-Git-Url: http://git.monstr.eu/?p=linux-2.6-microblaze.git;a=commitdiff_plain;h=5264406cdb66c7003eb3edf53c9773b1b20611b9 Merge tag 'pull-work.iov_iter-base' of git://git./linux/kernel/git/viro/vfs Pull vfs iov_iter updates from Al Viro: "Part 1 - isolated cleanups and optimizations. One of the goals is to reduce the overhead of using ->read_iter() and ->write_iter() instead of ->read()/->write(). new_sync_{read,write}() has a surprising amount of overhead, in particular inside iocb_flags(). That's the explanation for the beginning of the series is in this pile; it's not directly iov_iter-related, but it's a part of the same work..." * tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: first_iovec_segment(): just return address iov_iter: massage calling conventions for first_{iovec,bvec}_segment() iov_iter: first_{iovec,bvec}_segment() - simplify a bit iov_iter: lift dealing with maxpages out of first_{iovec,bvec}_segment() iov_iter_get_pages{,_alloc}(): cap the maxsize with MAX_RW_COUNT iov_iter_bvec_advance(): don't bother with bvec_iter copy_page_{to,from}_iter(): switch iovec variants to generic keep iocb_flags() result cached in struct file iocb: delay evaluation of IS_SYNC(...) until we want to check IOCB_DSYNC struct file: use anonymous union member for rcuhead and llist btrfs: use IOMAP_DIO_NOSYNC teach iomap_dio_rw() to suppress dsync No need of likely/unlikely on calls of check_copy_size() --- 5264406cdb66c7003eb3edf53c9773b1b20611b9 diff --cc block/fops.c index 92ee820e40fd,6e86931ab847..a564cd81340c --- a/block/fops.c +++ b/block/fops.c @@@ -32,21 -32,14 +32,21 @@@ static int blkdev_get_block(struct inod return 0; } -static unsigned int dio_bio_write_op(struct kiocb *iocb) +static blk_opf_t dio_bio_write_op(struct kiocb *iocb) { - unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; + blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; /* avoid the need for a I/O completion work item */ - if (iocb->ki_flags & IOCB_DSYNC) + if (iocb_is_dsync(iocb)) - op |= REQ_FUA; - return op; + opf |= REQ_FUA; + return opf; +} + +static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos, + struct iov_iter *iter) +{ + return pos & (bdev_logical_block_size(bdev) - 1) || + !bdev_iter_is_aligned(bdev, iter); } #define DIO_INLINE_BIO_VECS 4 diff --cc include/linux/fs.h index 76f09f3ee4a5,c82b9d442f56..cc64873d76c5 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@@ -2324,12 -2196,10 +2325,10 @@@ static inline bool sb_rdonly(const stru static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, struct inode *inode) { - return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || - !gid_valid(i_gid_into_mnt(mnt_userns, inode)); + return !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)) || + !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode)); } - static inline int iocb_flags(struct file *file); - static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { diff --cc io_uring/rw.c index 2b784795103c,000000000000..b20ba87e4926 mode 100644,000000..100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@@ -1,1049 -1,0 +1,1049 @@@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "io_uring.h" +#include "opdef.h" +#include "kbuf.h" +#include "rsrc.h" +#include "rw.h" + +struct io_rw { + /* NOTE: kiocb has the file as the first member, so don't do it here */ + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +static inline bool io_file_supports_nowait(struct io_kiocb *req) +{ + return req->flags & REQ_F_SUPPORT_NOWAIT; +} + +int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + unsigned ioprio; + int ret; + + rw->kiocb.ki_pos = READ_ONCE(sqe->off); + /* used for fixed read/write too - just read unconditionally */ + req->buf_index = READ_ONCE(sqe->buf_index); + + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) { + struct io_ring_ctx *ctx = req->ctx; + u16 index; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req, ctx, 0); + } + + ioprio = READ_ONCE(sqe->ioprio); + if (ioprio) { + ret = ioprio_check_cap(ioprio); + if (ret) + return ret; + + rw->kiocb.ki_ioprio = ioprio; + } else { + rw->kiocb.ki_ioprio = get_current_ioprio(); + } + + rw->addr = READ_ONCE(sqe->addr); + rw->len = READ_ONCE(sqe->len); + rw->flags = READ_ONCE(sqe->rw_flags); + return 0; +} + +void io_readv_writev_cleanup(struct io_kiocb *req) +{ + struct io_async_rw *io = req->async_data; + + kfree(io->free_iovec); +} + +static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) +{ + switch (ret) { + case -EIOCBQUEUED: + break; + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + /* + * We can't just restart the syscall, since previously + * submitted sqes may already be in progress. Just fail this + * IO with EINTR. + */ + ret = -EINTR; + fallthrough; + default: + kiocb->ki_complete(kiocb, ret); + } +} + +static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + + if (rw->kiocb.ki_pos != -1) + return &rw->kiocb.ki_pos; + + if (!(req->file->f_mode & FMODE_STREAM)) { + req->flags |= REQ_F_CUR_POS; + rw->kiocb.ki_pos = req->file->f_pos; + return &rw->kiocb.ki_pos; + } + + rw->kiocb.ki_pos = 0; + return NULL; +} + +static void io_req_task_queue_reissue(struct io_kiocb *req) +{ + req->io_task_work.func = io_queue_iowq; + io_req_task_work_add(req); +} + +#ifdef CONFIG_BLOCK +static bool io_resubmit_prep(struct io_kiocb *req) +{ + struct io_async_rw *io = req->async_data; + + if (!req_has_async_data(req)) + return !io_req_prep_async(req); + iov_iter_restore(&io->s.iter, &io->s.iter_state); + return true; +} + +static bool io_rw_should_reissue(struct io_kiocb *req) +{ + umode_t mode = file_inode(req->file)->i_mode; + struct io_ring_ctx *ctx = req->ctx; + + if (!S_ISBLK(mode) && !S_ISREG(mode)) + return false; + if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && + !(ctx->flags & IORING_SETUP_IOPOLL))) + return false; + /* + * If ref is dying, we might be running poll reap from the exit work. + * Don't attempt to reissue from that path, just let it fail with + * -EAGAIN. + */ + if (percpu_ref_is_dying(&ctx->refs)) + return false; + /* + * Play it safe and assume not safe to re-import and reissue if we're + * not in the original thread group (or in task context). + */ + if (!same_thread_group(req->task, current) || !in_task()) + return false; + return true; +} +#else +static bool io_resubmit_prep(struct io_kiocb *req) +{ + return false; +} +static bool io_rw_should_reissue(struct io_kiocb *req) +{ + return false; +} +#endif + +static void kiocb_end_write(struct io_kiocb *req) +{ + /* + * Tell lockdep we inherited freeze protection from submission + * thread. + */ + if (req->flags & REQ_F_ISREG) { + struct super_block *sb = file_inode(req->file)->i_sb; + + __sb_writers_acquired(sb, SB_FREEZE_WRITE); + sb_end_write(sb); + } +} + +static bool __io_complete_rw_common(struct io_kiocb *req, long res) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + + if (rw->kiocb.ki_flags & IOCB_WRITE) { + kiocb_end_write(req); + fsnotify_modify(req->file); + } else { + fsnotify_access(req->file); + } + if (unlikely(res != req->cqe.res)) { + if ((res == -EAGAIN || res == -EOPNOTSUPP) && + io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; + return true; + } + req_set_fail(req); + req->cqe.res = res; + } + return false; +} + +static void io_complete_rw(struct kiocb *kiocb, long res) +{ + struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); + struct io_kiocb *req = cmd_to_io_kiocb(rw); + + if (__io_complete_rw_common(req, res)) + return; + io_req_set_res(req, res, 0); + req->io_task_work.func = io_req_task_complete; + io_req_task_work_add(req); +} + +static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) +{ + struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); + struct io_kiocb *req = cmd_to_io_kiocb(rw); + + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); + if (unlikely(res != req->cqe.res)) { + if (res == -EAGAIN && io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; + return; + } + req->cqe.res = res; + } + + /* order with io_iopoll_complete() checking ->iopoll_completed */ + smp_store_release(&req->iopoll_completed, 1); +} + +static int kiocb_done(struct io_kiocb *req, ssize_t ret, + unsigned int issue_flags) +{ + struct io_async_rw *io = req->async_data; + struct io_rw *rw = io_kiocb_to_cmd(req); + + /* add previously done IO, if any */ + if (req_has_async_data(req) && io->bytes_done > 0) { + if (ret < 0) + ret = io->bytes_done; + else + ret += io->bytes_done; + } + + if (req->flags & REQ_F_CUR_POS) + req->file->f_pos = rw->kiocb.ki_pos; + if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { + if (!__io_complete_rw_common(req, ret)) { + io_req_set_res(req, req->cqe.res, + io_put_kbuf(req, issue_flags)); + return IOU_OK; + } + } else { + io_rw_done(&rw->kiocb, ret); + } + + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + if (io_resubmit_prep(req)) + io_req_task_queue_reissue(req); + else + io_req_task_queue_fail(req, ret); + } + return IOU_ISSUE_SKIP_COMPLETE; +} + +#ifdef CONFIG_COMPAT +static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, + unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct compat_iovec __user *uiov; + compat_ssize_t clen; + void __user *buf; + size_t len; + + uiov = u64_to_user_ptr(rw->addr); + if (!access_ok(uiov, sizeof(*uiov))) + return -EFAULT; + if (__get_user(clen, &uiov->iov_len)) + return -EFAULT; + if (clen < 0) + return -EINVAL; + + len = clen; + buf = io_buffer_select(req, &len, issue_flags); + if (!buf) + return -ENOBUFS; + rw->addr = (unsigned long) buf; + iov[0].iov_base = buf; + rw->len = iov[0].iov_len = (compat_size_t) len; + return 0; +} +#endif + +static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, + unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct iovec __user *uiov = u64_to_user_ptr(rw->addr); + void __user *buf; + ssize_t len; + + if (copy_from_user(iov, uiov, sizeof(*uiov))) + return -EFAULT; + + len = iov[0].iov_len; + if (len < 0) + return -EINVAL; + buf = io_buffer_select(req, &len, issue_flags); + if (!buf) + return -ENOBUFS; + rw->addr = (unsigned long) buf; + iov[0].iov_base = buf; + rw->len = iov[0].iov_len = len; + return 0; +} + +static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, + unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + + if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { + iov[0].iov_base = u64_to_user_ptr(rw->addr); + iov[0].iov_len = rw->len; + return 0; + } + if (rw->len != 1) + return -EINVAL; + +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + return io_compat_import(req, iov, issue_flags); +#endif + + return __io_iov_buffer_select(req, iov, issue_flags); +} + +static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, + struct io_rw_state *s, + unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct iov_iter *iter = &s->iter; + u8 opcode = req->opcode; + struct iovec *iovec; + void __user *buf; + size_t sqe_len; + ssize_t ret; + + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { + ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + buf = u64_to_user_ptr(rw->addr); + sqe_len = rw->len; + + if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { + if (io_do_buffer_select(req)) { + buf = io_buffer_select(req, &sqe_len, issue_flags); + if (!buf) + return ERR_PTR(-ENOBUFS); + rw->addr = (unsigned long) buf; + rw->len = sqe_len; + } + + ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + iovec = s->fast_iov; + if (req->flags & REQ_F_BUFFER_SELECT) { + ret = io_iov_buffer_select(req, iovec, issue_flags); + if (ret) + return ERR_PTR(ret); + iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len); + return NULL; + } + + ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, + req->ctx->compat); + if (unlikely(ret < 0)) + return ERR_PTR(ret); + return iovec; +} + +static inline int io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct io_rw_state *s, + unsigned int issue_flags) +{ + *iovec = __io_import_iovec(rw, req, s, issue_flags); + if (unlikely(IS_ERR(*iovec))) + return PTR_ERR(*iovec); + + iov_iter_save_state(&s->iter, &s->iter_state); + return 0; +} + +static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) +{ + return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; +} + +/* + * For files that don't have ->read_iter() and ->write_iter(), handle them + * by looping over ->read() or ->write() manually. + */ +static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) +{ + struct kiocb *kiocb = &rw->kiocb; + struct file *file = kiocb->ki_filp; + ssize_t ret = 0; + loff_t *ppos; + + /* + * Don't support polled IO through this interface, and we can't + * support non-blocking either. For the latter, this just causes + * the kiocb to be handled from an async context. + */ + if (kiocb->ki_flags & IOCB_HIPRI) + return -EOPNOTSUPP; + if ((kiocb->ki_flags & IOCB_NOWAIT) && + !(kiocb->ki_filp->f_flags & O_NONBLOCK)) + return -EAGAIN; + + ppos = io_kiocb_ppos(kiocb); + + while (iov_iter_count(iter)) { + struct iovec iovec; + ssize_t nr; + + if (!iov_iter_is_bvec(iter)) { + iovec = iov_iter_iovec(iter); + } else { + iovec.iov_base = u64_to_user_ptr(rw->addr); + iovec.iov_len = rw->len; + } + + if (ddir == READ) { + nr = file->f_op->read(file, iovec.iov_base, + iovec.iov_len, ppos); + } else { + nr = file->f_op->write(file, iovec.iov_base, + iovec.iov_len, ppos); + } + + if (nr < 0) { + if (!ret) + ret = nr; + break; + } + ret += nr; + if (!iov_iter_is_bvec(iter)) { + iov_iter_advance(iter, nr); + } else { + rw->addr += nr; + rw->len -= nr; + if (!rw->len) + break; + } + if (nr != iovec.iov_len) + break; + } + + return ret; +} + +static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, + const struct iovec *fast_iov, struct iov_iter *iter) +{ + struct io_async_rw *io = req->async_data; + + memcpy(&io->s.iter, iter, sizeof(*iter)); + io->free_iovec = iovec; + io->bytes_done = 0; + /* can only be fixed buffers, no need to do anything */ + if (iov_iter_is_bvec(iter)) + return; + if (!iovec) { + unsigned iov_off = 0; + + io->s.iter.iov = io->s.fast_iov; + if (iter->iov != fast_iov) { + iov_off = iter->iov - fast_iov; + io->s.iter.iov += iov_off; + } + if (io->s.fast_iov != fast_iov) + memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, + sizeof(struct iovec) * iter->nr_segs); + } else { + req->flags |= REQ_F_NEED_CLEANUP; + } +} + +static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, + struct io_rw_state *s, bool force) +{ + if (!force && !io_op_defs[req->opcode].prep_async) + return 0; + if (!req_has_async_data(req)) { + struct io_async_rw *iorw; + + if (io_alloc_async_data(req)) { + kfree(iovec); + return -ENOMEM; + } + + io_req_map_rw(req, iovec, s->fast_iov, &s->iter); + iorw = req->async_data; + /* we've copied and mapped the iter, ensure state is saved */ + iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); + } + return 0; +} + +static inline int io_rw_prep_async(struct io_kiocb *req, int rw) +{ + struct io_async_rw *iorw = req->async_data; + struct iovec *iov; + int ret; + + /* submission path, ->uring_lock should already be taken */ + ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); + if (unlikely(ret < 0)) + return ret; + + iorw->bytes_done = 0; + iorw->free_iovec = iov; + if (iov) + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +int io_readv_prep_async(struct io_kiocb *req) +{ + return io_rw_prep_async(req, READ); +} + +int io_writev_prep_async(struct io_kiocb *req) +{ + return io_rw_prep_async(req, WRITE); +} + +/* + * This is our waitqueue callback handler, registered through __folio_lock_async() + * when we initially tried to do the IO with the iocb armed our waitqueue. + * This gets called when the page is unlocked, and we generally expect that to + * happen when the page IO is completed and the page is now uptodate. This will + * queue a task_work based retry of the operation, attempting to copy the data + * again. If the latter fails because the page was NOT uptodate, then we will + * do a thread based blocking retry of the operation. That's the unexpected + * slow path. + */ +static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, + int sync, void *arg) +{ + struct wait_page_queue *wpq; + struct io_kiocb *req = wait->private; + struct io_rw *rw = io_kiocb_to_cmd(req); + struct wait_page_key *key = arg; + + wpq = container_of(wait, struct wait_page_queue, wait); + + if (!wake_page_match(wpq, key)) + return 0; + + rw->kiocb.ki_flags &= ~IOCB_WAITQ; + list_del_init(&wait->entry); + io_req_task_queue(req); + return 1; +} + +/* + * This controls whether a given IO request should be armed for async page + * based retry. If we return false here, the request is handed to the async + * worker threads for retry. If we're doing buffered reads on a regular file, + * we prepare a private wait_page_queue entry and retry the operation. This + * will either succeed because the page is now uptodate and unlocked, or it + * will register a callback when the page is unlocked at IO completion. Through + * that callback, io_uring uses task_work to setup a retry of the operation. + * That retry will attempt the buffered read again. The retry will generally + * succeed, or in rare cases where it fails, we then fall back to using the + * async worker threads for a blocking retry. + */ +static bool io_rw_should_retry(struct io_kiocb *req) +{ + struct io_async_rw *io = req->async_data; + struct wait_page_queue *wait = &io->wpq; + struct io_rw *rw = io_kiocb_to_cmd(req); + struct kiocb *kiocb = &rw->kiocb; + + /* never retry for NOWAIT, we just complete with -EAGAIN */ + if (req->flags & REQ_F_NOWAIT) + return false; + + /* Only for buffered IO */ + if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) + return false; + + /* + * just use poll if we can, and don't attempt if the fs doesn't + * support callback based unlocks + */ + if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) + return false; + + wait->wait.func = io_async_buf_func; + wait->wait.private = req; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_flags &= ~IOCB_NOWAIT; + kiocb->ki_waitq = wait; + return true; +} + +static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) +{ + struct file *file = rw->kiocb.ki_filp; + + if (likely(file->f_op->read_iter)) + return call_read_iter(file, &rw->kiocb, iter); + else if (file->f_op->read) + return loop_rw_iter(READ, rw, iter); + else + return -EINVAL; +} + +static bool need_complete_io(struct io_kiocb *req) +{ + return req->flags & REQ_F_ISREG || + S_ISBLK(file_inode(req->file)->i_mode); +} + +static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct kiocb *kiocb = &rw->kiocb; + struct io_ring_ctx *ctx = req->ctx; + struct file *file = req->file; + int ret; + + if (unlikely(!file || !(file->f_mode & mode))) + return -EBADF; + + if (!io_req_ffs_set(req)) + req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; + - kiocb->ki_flags = iocb_flags(file); ++ kiocb->ki_flags = file->f_iocb_flags; + ret = kiocb_set_rw_flags(kiocb, rw->flags); + if (unlikely(ret)) + return ret; + + /* + * If the file is marked O_NONBLOCK, still allow retry for it if it + * supports async. Otherwise it's impossible to use O_NONBLOCK files + * reliably. If not, or it IOCB_NOWAIT is set, don't retry. + */ + if ((kiocb->ki_flags & IOCB_NOWAIT) || + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) + req->flags |= REQ_F_NOWAIT; + + if (ctx->flags & IORING_SETUP_IOPOLL) { + if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) + return -EOPNOTSUPP; + + kiocb->private = NULL; + kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; + kiocb->ki_complete = io_complete_rw_iopoll; + req->iopoll_completed = 0; + } else { + if (kiocb->ki_flags & IOCB_HIPRI) + return -EINVAL; + kiocb->ki_complete = io_complete_rw; + } + + return 0; +} + +int io_read(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct io_rw_state __s, *s = &__s; + struct iovec *iovec; + struct kiocb *kiocb = &rw->kiocb; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct io_async_rw *io; + ssize_t ret, ret2; + loff_t *ppos; + + if (!req_has_async_data(req)) { + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + } else { + io = req->async_data; + s = &io->s; + + /* + * Safe and required to re-import if we're using provided + * buffers, as we dropped the selected one before retry. + */ + if (io_do_buffer_select(req)) { + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + } + + /* + * We come here from an earlier attempt, restore our state to + * match in case it doesn't. It's cheap enough that we don't + * need to make this conditional. + */ + iov_iter_restore(&s->iter, &s->iter_state); + iovec = NULL; + } + ret = io_rw_init_file(req, FMODE_READ); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } + req->cqe.res = iov_iter_count(&s->iter); + + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req))) { + ret = io_setup_async_rw(req, iovec, s, true); + return ret ?: -EAGAIN; + } + kiocb->ki_flags |= IOCB_NOWAIT; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; + } + + ppos = io_kiocb_update_pos(req); + + ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } + + ret = io_iter_do_read(rw, &s->iter); + + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { + req->flags &= ~REQ_F_REISSUE; + /* if we can poll, just do that */ + if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) + return -EAGAIN; + /* IOPOLL retry should happen for io-wq threads */ + if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) + goto done; + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (req->flags & REQ_F_NOWAIT) + goto done; + ret = 0; + } else if (ret == -EIOCBQUEUED) { + if (iovec) + kfree(iovec); + return IOU_ISSUE_SKIP_COMPLETE; + } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || + (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { + /* read all, failed, already did sync or don't want to retry */ + goto done; + } + + /* + * Don't depend on the iter state matching what was consumed, or being + * untouched in case of error. Restore it and we'll advance it + * manually if we need to. + */ + iov_iter_restore(&s->iter, &s->iter_state); + + ret2 = io_setup_async_rw(req, iovec, s, true); + if (ret2) + return ret2; + + iovec = NULL; + io = req->async_data; + s = &io->s; + /* + * Now use our persistent iterator and state, if we aren't already. + * We've restored and mapped the iter to match. + */ + + do { + /* + * We end up here because of a partial read, either from + * above or inside this loop. Advance the iter by the bytes + * that were consumed. + */ + iov_iter_advance(&s->iter, ret); + if (!iov_iter_count(&s->iter)) + break; + io->bytes_done += ret; + iov_iter_save_state(&s->iter, &s->iter_state); + + /* if we can retry, do so with the callbacks armed */ + if (!io_rw_should_retry(req)) { + kiocb->ki_flags &= ~IOCB_WAITQ; + return -EAGAIN; + } + + /* + * Now retry read with the IOCB_WAITQ parts set in the iocb. If + * we get -EIOCBQUEUED, then we'll get a notification when the + * desired page gets unlocked. We can also get a partial read + * here, and if we do, then just retry at the new offset. + */ + ret = io_iter_do_read(rw, &s->iter); + if (ret == -EIOCBQUEUED) + return IOU_ISSUE_SKIP_COMPLETE; + /* we got some bytes, but not all. retry. */ + kiocb->ki_flags &= ~IOCB_WAITQ; + iov_iter_restore(&s->iter, &s->iter_state); + } while (ret > 0); +done: + /* it's faster to check here then delegate to kfree */ + if (iovec) + kfree(iovec); + return kiocb_done(req, ret, issue_flags); +} + +int io_write(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req); + struct io_rw_state __s, *s = &__s; + struct iovec *iovec; + struct kiocb *kiocb = &rw->kiocb; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + ssize_t ret, ret2; + loff_t *ppos; + + if (!req_has_async_data(req)) { + ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + } else { + struct io_async_rw *io = req->async_data; + + s = &io->s; + iov_iter_restore(&s->iter, &s->iter_state); + iovec = NULL; + } + ret = io_rw_init_file(req, FMODE_WRITE); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } + req->cqe.res = iov_iter_count(&s->iter); + + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req))) + goto copy_iov; + + /* File path supports NOWAIT for non-direct_IO only for block devices. */ + if (!(kiocb->ki_flags & IOCB_DIRECT) && + !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && + (req->flags & REQ_F_ISREG)) + goto copy_iov; + + kiocb->ki_flags |= IOCB_NOWAIT; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; + } + + ppos = io_kiocb_update_pos(req); + + ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } + + /* + * Open-code file_start_write here to grab freeze protection, + * which will be released by another thread in + * io_complete_rw(). Fool lockdep by telling it the lock got + * released so that it doesn't complain about the held lock when + * we return to userspace. + */ + if (req->flags & REQ_F_ISREG) { + sb_start_write(file_inode(req->file)->i_sb); + __sb_writers_release(file_inode(req->file)->i_sb, + SB_FREEZE_WRITE); + } + kiocb->ki_flags |= IOCB_WRITE; + + if (likely(req->file->f_op->write_iter)) + ret2 = call_write_iter(req->file, kiocb, &s->iter); + else if (req->file->f_op->write) + ret2 = loop_rw_iter(WRITE, rw, &s->iter); + else + ret2 = -EINVAL; + + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + ret2 = -EAGAIN; + } + + /* + * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just + * retry them without IOCB_NOWAIT. + */ + if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) + ret2 = -EAGAIN; + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) + goto done; + if (!force_nonblock || ret2 != -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) + goto copy_iov; + + if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { + struct io_async_rw *rw; + + trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, + req->cqe.res, ret2); + + /* This is a partial write. The file pos has already been + * updated, setup the async struct to complete the request + * in the worker. Also update bytes_done to account for + * the bytes already written. + */ + iov_iter_save_state(&s->iter, &s->iter_state); + ret = io_setup_async_rw(req, iovec, s, true); + + rw = req->async_data; + if (rw) + rw->bytes_done += ret2; + + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); + return ret ? ret : -EAGAIN; + } +done: + ret = kiocb_done(req, ret2, issue_flags); + } else { +copy_iov: + iov_iter_restore(&s->iter, &s->iter_state); + ret = io_setup_async_rw(req, iovec, s, false); + if (!ret) { + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); + return -EAGAIN; + } + return ret; + } + /* it's reportedly faster than delegating the null check to kfree() */ + if (iovec) + kfree(iovec); + return ret; +} + +static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) +{ + io_commit_cqring_flush(ctx); + if (ctx->flags & IORING_SETUP_SQPOLL) + io_cqring_wake(ctx); +} + +int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) +{ + struct io_wq_work_node *pos, *start, *prev; + unsigned int poll_flags = BLK_POLL_NOSLEEP; + DEFINE_IO_COMP_BATCH(iob); + int nr_events = 0; + + /* + * Only spin for completions if we don't have multiple devices hanging + * off our complete list. + */ + if (ctx->poll_multi_queue || force_nonspin) + poll_flags |= BLK_POLL_ONESHOT; + + wq_list_for_each(pos, start, &ctx->iopoll_list) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + struct io_rw *rw = io_kiocb_to_cmd(req); + int ret; + + /* + * Move completed and retryable entries to our local lists. + * If we find a request that requires polling, break out + * and complete those lists first, if we have entries there. + */ + if (READ_ONCE(req->iopoll_completed)) + break; + + ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags); + if (unlikely(ret < 0)) + return ret; + else if (ret) + poll_flags |= BLK_POLL_ONESHOT; + + /* iopoll may have completed current req */ + if (!rq_list_empty(iob.req_list) || + READ_ONCE(req->iopoll_completed)) + break; + } + + if (!rq_list_empty(iob.req_list)) + iob.complete(&iob); + else if (!pos) + return 0; + + prev = start; + wq_list_for_each_resume(pos, prev) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + + /* order with io_complete_rw_iopoll(), e.g. ->result updates */ + if (!smp_load_acquire(&req->iopoll_completed)) + break; + nr_events++; + if (unlikely(req->flags & REQ_F_CQE_SKIP)) + continue; + + req->cqe.flags = io_put_kbuf(req, 0); + __io_fill_cqe_req(req->ctx, req); + } + + if (unlikely(!nr_events)) + return 0; + + io_commit_cqring(ctx); + io_cqring_ev_posted_iopoll(ctx); + pos = start ? start->next : ctx->iopoll_list.first; + wq_list_cut(&ctx->iopoll_list, prev, start); + io_free_batch_list(ctx, pos); + return nr_events; +}