struct async_poll {
struct io_poll_iocb poll;
struct io_poll_iocb *double_poll;
- struct io_wq_work work;
};
/*
u16 buf_index;
u32 result;
- struct io_ring_ctx *ctx;
- unsigned int flags;
- refcount_t refs;
- struct task_struct *task;
- u64 user_data;
+ struct io_ring_ctx *ctx;
+ unsigned int flags;
+ refcount_t refs;
+ struct task_struct *task;
+ u64 user_data;
- struct list_head link_list;
+ struct list_head link_list;
/*
* 1. used with ctx->iopoll_list with reads/writes
* 2. to track reqs with ->files (see io_op_def::file_table)
*/
- struct list_head inflight_entry;
-
- struct percpu_ref *fixed_file_refs;
-
- union {
- /*
- * Only commands that never go async can use the below fields,
- * obviously. Right now only IORING_OP_POLL_ADD uses them, and
- * async armed poll handlers for regular commands. The latter
- * restore the work, if needed.
- */
- struct {
- struct hlist_node hash_node;
- struct async_poll *apoll;
- };
- struct io_wq_work work;
- };
- struct callback_head task_work;
+ struct list_head inflight_entry;
+
+ struct percpu_ref *fixed_file_refs;
+ struct callback_head task_work;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ struct hlist_node hash_node;
+ struct async_poll *apoll;
+ struct io_wq_work work;
};
struct io_defer_entry {
ACCT_PINNED,
};
-static bool io_rw_reissue(struct io_kiocb *req, long res);
+static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
+ struct io_comp_state *cs);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
+static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip,
unsigned nr_args);
-static int io_grab_files(struct io_kiocb *req);
-static void io_complete_rw_common(struct kiocb *kiocb, long res,
- struct io_comp_state *cs);
+static int io_prep_work_files(struct io_kiocb *req);
static void __io_clean_op(struct io_kiocb *req);
static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
int fd, struct file **out_file, bool fixed);
static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
if (!current->mm) {
- if (unlikely(!ctx->sqo_mm || !mmget_not_zero(ctx->sqo_mm)))
+ if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
+ !mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
kthread_use_mm(ctx->sqo_mm);
}
}
}
-static void io_req_clean_work(struct io_kiocb *req)
+/*
+ * Returns true if we need to defer file table putting. This can only happen
+ * from the error path with REQ_F_COMP_LOCKED set.
+ */
+static bool io_req_clean_work(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_WORK_INITIALIZED))
- return;
+ return false;
+
+ req->flags &= ~REQ_F_WORK_INITIALIZED;
if (req->work.mm) {
mmdrop(req->work.mm);
if (req->work.fs) {
struct fs_struct *fs = req->work.fs;
+ if (req->flags & REQ_F_COMP_LOCKED)
+ return true;
+
spin_lock(&req->work.fs->lock);
if (--fs->users)
fs = NULL;
spin_unlock(&req->work.fs->lock);
if (fs)
free_fs_struct(fs);
+ req->work.fs = NULL;
}
+
+ return false;
}
static void io_prep_async_work(struct io_kiocb *req)
io_prep_async_work(cur);
}
-static void __io_queue_async_work(struct io_kiocb *req)
+static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link = io_prep_linked_timeout(req);
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
io_wq_enqueue(ctx->io_wq, &req->work);
-
- if (link)
- io_queue_linked_timeout(link);
+ return link;
}
static void io_queue_async_work(struct io_kiocb *req)
{
+ struct io_kiocb *link;
+
/* init ->work of the whole link before punting */
io_prep_async_link(req);
- __io_queue_async_work(req);
+ link = __io_queue_async_work(req);
+
+ if (link)
+ io_queue_linked_timeout(link);
}
static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
if (ret != -1) {
- atomic_inc(&req->ctx->cq_timeouts);
+ atomic_set(&req->ctx->cq_timeouts,
+ atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, 0);
do {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list);
+ struct io_kiocb *link;
if (req_need_defer(de->req, de->seq))
break;
list_del_init(&de->list);
/* punt-init is done before queueing for defer */
- __io_queue_async_work(de->req);
+ link = __io_queue_async_work(de->req);
+ if (link) {
+ __io_queue_linked_timeout(link);
+ /* drop submission reference */
+ link->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(link);
+ }
kfree(de);
} while (!list_empty(&ctx->defer_list));
}
eventfd_signal(ctx->cq_ev_fd, 1);
}
+static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
+{
+ if (list_empty(&ctx->cq_overflow_list)) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+ }
+}
+
/* Returns true if there are no backlogged entries after the flush */
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
}
io_commit_cqring(ctx);
- if (cqe) {
- clear_bit(0, &ctx->sq_check_overflow);
- clear_bit(0, &ctx->cq_check_overflow);
- ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
- }
+ io_cqring_mark_overflow(ctx);
+
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
fput(file);
}
-static void io_dismantle_req(struct io_kiocb *req)
+static bool io_dismantle_req(struct io_kiocb *req)
{
io_clean_op(req);
kfree(req->io);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- __io_put_req_task(req);
- io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
+
+ return io_req_clean_work(req);
}
-static void __io_free_req(struct io_kiocb *req)
+static void __io_free_req_finish(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx;
+ struct io_ring_ctx *ctx = req->ctx;
- io_dismantle_req(req);
- ctx = req->ctx;
+ __io_put_req_task(req);
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
percpu_ref_put(&ctx->refs);
}
+static void io_req_task_file_table_put(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct fs_struct *fs = req->work.fs;
+
+ spin_lock(&req->work.fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->work.fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ req->work.fs = NULL;
+ __io_free_req_finish(req);
+}
+
+static void __io_free_req(struct io_kiocb *req)
+{
+ if (!io_dismantle_req(req)) {
+ __io_free_req_finish(req);
+ } else {
+ int ret;
+
+ init_task_work(&req->task_work, io_req_task_file_table_put);
+ ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
+ if (unlikely(ret)) {
+ struct task_struct *tsk;
+
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &req->task_work, 0);
+ }
+ }
+}
+
static bool io_link_cancel_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
return false;
list_del_init(&link->link_list);
+ link->flags |= REQ_F_COMP_LOCKED;
wake_ev = io_link_cancel_timeout(link);
req->flags &= ~REQ_F_LINK_TIMEOUT;
return wake_ev;
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link, -ECANCELED);
+ link->flags |= REQ_F_COMP_LOCKED;
__io_double_put_req(link);
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
- int ret, notify = TWA_RESUME;
+ int ret, notify;
/*
- * SQPOLL kernel thread doesn't need notification, just a wakeup.
- * If we're not using an eventfd, then TWA_RESUME is always fine,
- * as we won't have dependencies between request completions for
- * other kernel wait conditions.
+ * SQPOLL kernel thread doesn't need notification, just a wakeup. For
+ * all other cases, use TWA_SIGNAL unconditionally to ensure we're
+ * processing task_work. There's no reliable way to tell if TWA_RESUME
+ * will do the job.
*/
- if (ctx->flags & IORING_SETUP_SQPOLL)
- notify = 0;
- else if (ctx->cq_ev_fd)
+ notify = 0;
+ if (!(ctx->flags & IORING_SETUP_SQPOLL))
notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
if (!ret)
wake_up_process(tsk);
+
return ret;
}
static void io_req_task_submit(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
__io_req_task_submit(req);
+ percpu_ref_put(&ctx->refs);
}
static void io_req_task_queue(struct io_kiocb *req)
int ret;
init_task_work(&req->task_work, io_req_task_submit);
+ percpu_ref_get(&req->ctx->refs);
ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) {
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
+
+ struct task_struct *task;
+ int task_refs;
};
+static inline void io_init_req_batch(struct req_batch *rb)
+{
+ rb->to_free = 0;
+ rb->task_refs = 0;
+ rb->task = NULL;
+}
+
static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
struct req_batch *rb)
{
{
if (rb->to_free)
__io_req_free_batch_flush(ctx, rb);
+ if (rb->task) {
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = NULL;
+ }
}
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
if (req->flags & REQ_F_LINK_HEAD)
io_queue_next(req);
- io_dismantle_req(req);
+ if (req->flags & REQ_F_TASK_PINNED) {
+ if (req->task != rb->task) {
+ if (rb->task)
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = req->task;
+ rb->task_refs = 0;
+ }
+ rb->task_refs++;
+ req->flags &= ~REQ_F_TASK_PINNED;
+ }
+
+ WARN_ON_ONCE(io_dismantle_req(req));
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
__io_req_free_batch_flush(req->ctx, rb);
do {
req = list_first_entry(again, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (!io_rw_reissue(req, -EAGAIN))
- io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
+ __io_complete_rw(req, -EAGAIN, 0, NULL);
} while (!list_empty(again));
}
/* order with ->result store in io_complete_rw_iopoll() */
smp_rmb();
- rb.to_free = 0;
+ io_init_req_batch(&rb);
while (!list_empty(done)) {
int cflags = 0;
refcount_inc(&req->refs);
io_queue_async_work(req);
}
+
+ percpu_ref_put(&ctx->refs);
}
#endif
return false;
init_task_work(&req->task_work, io_rw_resubmit);
+ percpu_ref_get(&req->ctx->refs);
+
ret = io_req_task_work_add(req, &req->task_work);
if (!ret)
return true;
return io_rw_prep_async(req, READ, force_nonblock);
}
+/*
+ * This is our waitqueue callback handler, registered through lock_page_async()
+ * when we initially tried to do the IO with the iocb armed our waitqueue.
+ * This gets called when the page is unlocked, and we generally expect that to
+ * happen when the page IO is completed and the page is now uptodate. This will
+ * queue a task_work based retry of the operation, attempting to copy the data
+ * again. If the latter fails because the page was NOT uptodate, then we will
+ * do a thread based blocking retry of the operation. That's the unexpected
+ * slow path.
+ */
static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
int sync, void *arg)
{
wpq = container_of(wait, struct wait_page_queue, wait);
- ret = wake_page_match(wpq, key);
- if (ret != 1)
- return ret;
+ if (!wake_page_match(wpq, key))
+ return 0;
list_del_init(&wait->entry);
init_task_work(&req->task_work, io_req_task_submit);
+ percpu_ref_get(&req->ctx->refs);
+
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
ret = io_req_task_work_add(req, &req->task_work);
return 1;
}
+static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
+ struct wait_page_queue *wait,
+ wait_queue_func_t func,
+ void *data)
+{
+ /* Can't support async wakeup with polled IO */
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EINVAL;
+ if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
+ wait->wait.func = func;
+ wait->wait.private = data;
+ wait->wait.flags = 0;
+ INIT_LIST_HEAD(&wait->wait.entry);
+ kiocb->ki_flags |= IOCB_WAITQ;
+ kiocb->ki_waitq = wait;
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/*
+ * This controls whether a given IO request should be armed for async page
+ * based retry. If we return false here, the request is handed to the async
+ * worker threads for retry. If we're doing buffered reads on a regular file,
+ * we prepare a private wait_page_queue entry and retry the operation. This
+ * will either succeed because the page is now uptodate and unlocked, or it
+ * will register a callback when the page is unlocked at IO completion. Through
+ * that callback, io_uring uses task_work to setup a retry of the operation.
+ * That retry will attempt the buffered read again. The retry will generally
+ * succeed, or in rare cases where it fails, we then fall back to using the
+ * async worker threads for a blocking retry.
+ */
static bool io_rw_should_retry(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;
{
if (req->file->f_op->read_iter)
return call_read_iter(req->file, &req->rw.kiocb, iter);
- return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+ else if (req->file->f_op->read)
+ return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+ else
+ return -EINVAL;
}
static int io_read(struct io_kiocb *req, bool force_nonblock,
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
- ssize_t io_size, ret;
+ ssize_t io_size, ret, ret2;
+ unsigned long nr_segs;
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
+ io_size = ret;
+ req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
- io_size = ret;
- req->result = io_size;
-
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(&iter);
+ nr_segs = iter.nr_segs;
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
- if (!ret) {
- unsigned long nr_segs = iter.nr_segs;
- ssize_t ret2 = 0;
+ if (unlikely(ret))
+ goto out_free;
- ret2 = io_iter_do_read(req, &iter);
+ ret2 = io_iter_do_read(req, &iter);
- /* Catch -EAGAIN return for forced non-blocking submission */
- if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
- kiocb_done(kiocb, ret2, cs);
- } else {
- iter.count = iov_count;
- iter.nr_segs = nr_segs;
+ /* Catch -EAGAIN return for forced non-blocking submission */
+ if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
+ kiocb_done(kiocb, ret2, cs);
+ } else {
+ iter.count = iov_count;
+ iter.nr_segs = nr_segs;
copy_iov:
- ret = io_setup_async_rw(req, io_size, iovec,
- inline_vecs, &iter);
- if (ret)
+ ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
+ &iter);
+ if (ret)
+ goto out_free;
+ /* it's copied and will be cleaned with ->io */
+ iovec = NULL;
+ /* if we can retry, do so with the callbacks armed */
+ if (io_rw_should_retry(req)) {
+ ret2 = io_iter_do_read(req, &iter);
+ if (ret2 == -EIOCBQUEUED) {
+ goto out_free;
+ } else if (ret2 != -EAGAIN) {
+ kiocb_done(kiocb, ret2, cs);
goto out_free;
- /* it's copied and will be cleaned with ->io */
- iovec = NULL;
- /* if we can retry, do so with the callbacks armed */
- if (io_rw_should_retry(req)) {
- ret2 = io_iter_do_read(req, &iter);
- if (ret2 == -EIOCBQUEUED) {
- goto out_free;
- } else if (ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, cs);
- goto out_free;
- }
}
- kiocb->ki_flags &= ~IOCB_WAITQ;
- return -EAGAIN;
}
+ kiocb->ki_flags &= ~IOCB_WAITQ;
+ return -EAGAIN;
}
out_free:
if (iovec)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
- ssize_t ret, io_size;
+ ssize_t ret, ret2, io_size;
+ unsigned long nr_segs;
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
+ io_size = ret;
+ req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
- io_size = ret;
- req->result = io_size;
-
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
goto copy_iov;
goto copy_iov;
iov_count = iov_iter_count(&iter);
+ nr_segs = iter.nr_segs;
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
- if (!ret) {
- unsigned long nr_segs = iter.nr_segs;
- ssize_t ret2;
+ if (unlikely(ret))
+ goto out_free;
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * io_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (req->flags & REQ_F_ISREG) {
- __sb_start_write(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE, true);
- __sb_writers_release(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE);
- }
- kiocb->ki_flags |= IOCB_WRITE;
+ /*
+ * Open-code file_start_write here to grab freeze protection,
+ * which will be released by another thread in
+ * io_complete_rw(). Fool lockdep by telling it the lock got
+ * released so that it doesn't complain about the held lock when
+ * we return to userspace.
+ */
+ if (req->flags & REQ_F_ISREG) {
+ __sb_start_write(file_inode(req->file)->i_sb,
+ SB_FREEZE_WRITE, true);
+ __sb_writers_release(file_inode(req->file)->i_sb,
+ SB_FREEZE_WRITE);
+ }
+ kiocb->ki_flags |= IOCB_WRITE;
- if (req->file->f_op->write_iter)
- ret2 = call_write_iter(req->file, kiocb, &iter);
- else
- ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ if (req->file->f_op->write_iter)
+ ret2 = call_write_iter(req->file, kiocb, &iter);
+ else if (req->file->f_op->write)
+ ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ else
+ ret2 = -EINVAL;
- /*
- * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
- * retry them without IOCB_NOWAIT.
- */
- if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
- ret2 = -EAGAIN;
- if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, cs);
- } else {
- iter.count = iov_count;
- iter.nr_segs = nr_segs;
+ /*
+ * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
+ * retry them without IOCB_NOWAIT.
+ */
+ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
+ ret2 = -EAGAIN;
+ if (!force_nonblock || ret2 != -EAGAIN) {
+ kiocb_done(kiocb, ret2, cs);
+ } else {
+ iter.count = iov_count;
+ iter.nr_segs = nr_segs;
copy_iov:
- ret = io_setup_async_rw(req, io_size, iovec,
- inline_vecs, &iter);
- if (ret)
- goto out_free;
- /* it's copied and will be cleaned with ->io */
- iovec = NULL;
- return -EAGAIN;
- }
+ ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
+ &iter);
+ if (ret)
+ goto out_free;
+ /* it's copied and will be cleaned with ->io */
+ iovec = NULL;
+ return -EAGAIN;
}
out_free:
if (iovec)
req->result = mask;
init_task_work(&req->task_work, func);
+ percpu_ref_get(&req->ctx->refs);
+
/*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
static void io_poll_task_func(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt = NULL;
io_poll_task_handler(req, &nxt);
if (nxt)
__io_req_task_submit(nxt);
+ percpu_ref_put(&ctx->refs);
}
static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
+ percpu_ref_put(&ctx->refs);
return;
}
io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
- /* restore ->work in case we need to retry again */
- if (req->flags & REQ_F_WORK_INITIALIZED)
- memcpy(&req->work, &apoll->work, sizeof(req->work));
-
if (!READ_ONCE(apoll->poll.canceled))
__io_req_task_submit(req);
else
__io_req_task_cancel(req, -ECANCELED);
+ percpu_ref_put(&ctx->refs);
kfree(apoll->double_poll);
kfree(apoll);
}
apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
- if (req->flags & REQ_F_WORK_INITIALIZED)
- memcpy(&apoll->work, &req->work, sizeof(req->work));
-
io_get_req_task(req);
req->apoll = apoll;
INIT_HLIST_NODE(&req->hash_node);
if (ret) {
io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
- if (req->flags & REQ_F_WORK_INITIALIZED)
- memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll->double_poll);
kfree(apoll);
return false;
do_complete = __io_poll_remove_one(req, &apoll->poll);
if (do_complete) {
io_put_req(req);
- /*
- * restore ->work because we will call
- * io_req_clean_work below when dropping the
- * final reference.
- */
- if (req->flags & REQ_F_WORK_INITIALIZED)
- memcpy(&req->work, &apoll->work,
- sizeof(req->work));
kfree(apoll->double_poll);
kfree(apoll);
}
struct io_poll_table ipt;
__poll_t mask;
- /* ->work is in union with hash_node and others */
- io_req_clean_work(req);
- req->flags &= ~REQ_F_WORK_INITIALIZED;
-
INIT_HLIST_NODE(&req->hash_node);
ipt.pt._qproc = io_poll_queue_proc;
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- atomic_inc(&ctx->cq_timeouts);
-
spin_lock_irqsave(&ctx->completion_lock, flags);
+ atomic_set(&req->ctx->cq_timeouts,
+ atomic_read(&req->ctx->cq_timeouts) + 1);
+
/*
* We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it.
return -EALREADY;
req_set_fail_links(req);
+ req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
if (io_alloc_async_ctx(req))
return -EAGAIN;
-
- if (io_op_defs[req->opcode].file_table) {
- io_req_init_async(req);
- ret = io_grab_files(req);
- if (unlikely(ret))
- return ret;
- }
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
+ return ret;
switch (req->opcode) {
case IORING_OP_NOP:
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(de);
- return 0;
+ io_queue_async_work(req);
+ return -EIOCBQUEUED;
}
trace_io_uring_defer(ctx, req, req->user_data);
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
+ io_req_init_async(req);
+
if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
return 0;
if (!ctx->ring_file)
return ret;
}
+static inline int io_prep_work_files(struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].file_table)
+ return 0;
+ return io_grab_files(req);
+}
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
struct io_timeout_data *data = container_of(timer,
return HRTIMER_NORESTART;
}
-static void io_queue_linked_timeout(struct io_kiocb *req)
+static void __io_queue_linked_timeout(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
-
/*
* If the list is now empty, then our linked request finished before
* we got a chance to setup the timer
*/
- spin_lock_irq(&ctx->completion_lock);
if (!list_empty(&req->link_list)) {
struct io_timeout_data *data = &req->io->timeout;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
}
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ __io_queue_linked_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
/* drop submission reference */
* doesn't support non-blocking read/write attempts
*/
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- if (io_arm_poll_handler(req)) {
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
- goto exit;
- }
+ if (!io_arm_poll_handler(req)) {
punt:
- io_req_init_async(req);
-
- if (io_op_defs[req->opcode].file_table) {
- ret = io_grab_files(req);
- if (ret)
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
goto err;
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
}
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req);
+ if (linked_timeout)
+ io_queue_linked_timeout(linked_timeout);
goto exit;
}
return submitted;
}
+static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
+{
+ /* Tell userspace we may need a wakeup call */
+ spin_lock_irq(&ctx->completion_lock);
+ ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
+static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
+{
+ spin_lock_irq(&ctx->completion_lock);
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
continue;
}
- /* Tell userspace we may need a wakeup call */
- spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
- spin_unlock_irq(&ctx->completion_lock);
+ io_ring_set_wakeup_flag(ctx);
to_submit = io_sqring_entries(ctx);
if (!to_submit || ret == -EBUSY) {
}
if (io_run_task_work()) {
finish_wait(&ctx->sqo_wait, &wait);
+ io_ring_clear_wakeup_flag(ctx);
continue;
}
if (signal_pending(current))
schedule();
finish_wait(&ctx->sqo_wait, &wait);
- spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
- spin_unlock_irq(&ctx->completion_lock);
+ io_ring_clear_wakeup_flag(ctx);
ret = 0;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
- spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
- spin_unlock_irq(&ctx->completion_lock);
+ io_ring_clear_wakeup_flag(ctx);
}
mutex_lock(&ctx->uring_lock);
{
int ret;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- mmgrab(current->mm);
- ctx->sqo_mm = current->mm;
+ mmgrab(current->mm);
+ ctx->sqo_mm = current->mm;
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto err;
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_finish_async(ctx);
+ io_sqe_buffer_unregister(ctx);
if (ctx->sqo_mm) {
mmdrop(ctx->sqo_mm);
ctx->sqo_mm = NULL;
}
- io_sqe_buffer_unregister(ctx);
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
spin_lock_irq(&ctx->completion_lock);
list_del(&cancel_req->compl.list);
cancel_req->flags &= ~REQ_F_OVERFLOW;
- if (list_empty(&ctx->cq_overflow_list)) {
- clear_bit(0, &ctx->sq_check_overflow);
- clear_bit(0, &ctx->cq_check_overflow);
- ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
- }
- spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_mark_overflow(ctx);
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
/*
* Put inflight ref and overflow ref. If that's
struct io_rings *rings;
size_t size, sq_array_offset;
+ /* make sure these are sane, as we already accounted them */
+ ctx->sq_entries = p->sq_entries;
+ ctx->cq_entries = p->cq_entries;
+
size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
if (size == SIZE_MAX)
return -EOVERFLOW;
rings->cq_ring_entries = p->cq_entries;
ctx->sq_mask = rings->sq_ring_mask;
ctx->cq_mask = rings->cq_ring_mask;
- ctx->sq_entries = rings->sq_ring_entries;
- ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
if (size == SIZE_MAX) {
ctx->user = user;
ctx->creds = get_current_cred();
+ /*
+ * Account memory _before_ installing the file descriptor. Once
+ * the descriptor is installed, it can get closed at any time. Also
+ * do this before hitting the general error path, as ring freeing
+ * will un-account as well.
+ */
+ io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
+ ACCT_LOCKED);
+ ctx->limit_mem = limit_mem;
+
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
ret = -EFAULT;
goto err;
}
+
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
goto err;
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
- io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
- ACCT_LOCKED);
- ctx->limit_mem = limit_mem;
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);