};
};
-struct fixed_rsrc_table {
- struct io_fixed_file *files;
+struct io_file_table {
+ /* two level table */
+ struct io_fixed_file **files;
};
struct io_rsrc_node {
typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
struct io_rsrc_data {
- struct fixed_rsrc_table *table;
struct io_ring_ctx *ctx;
rsrc_put_fn *do_put;
- struct percpu_ref refs;
+ atomic_t refs;
struct completion done;
bool quiesce;
};
* used. Only updated through io_uring_register(2).
*/
struct io_rsrc_data *file_data;
+ struct io_file_table file_table;
unsigned nr_user_files;
/* if used, fixed mapped user buffers */
__poll_t events;
bool done;
bool canceled;
- bool update_events;
- bool update_user_data;
- union {
- struct wait_queue_entry wait;
- struct {
- u64 old_user_data;
- u64 new_user_data;
- };
- };
+ struct wait_queue_entry wait;
};
-struct io_poll_remove {
+struct io_poll_update {
struct file *file;
- u64 addr;
+ u64 old_user_data;
+ u64 new_user_data;
+ __poll_t events;
+ bool update_events;
+ bool update_user_data;
};
struct io_close {
struct io_sr_msg {
struct file *file;
union {
- struct user_msghdr __user *umsg;
- void __user *buf;
+ struct compat_msghdr __user *umsg_compat;
+ struct user_msghdr __user *umsg;
+ void __user *buf;
};
int msg_flags;
int bgid;
struct file *file;
struct io_rw rw;
struct io_poll_iocb poll;
- struct io_poll_remove poll_remove;
+ struct io_poll_update poll_update;
struct io_accept accept;
struct io_sync sync;
struct io_cancel cancel;
static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
-static void io_cqring_fill_event(struct io_kiocb *req, long res);
+static bool io_cqring_fill_event(struct io_kiocb *req, long res, unsigned cflags);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
static void io_dismantle_req(struct io_kiocb *req);
}
}
+static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
+{
+ bool got = percpu_ref_tryget(ref);
+
+ /* already at zero, wait for ->release() */
+ if (!got)
+ wait_for_completion(compl);
+ percpu_ref_resurrect(ref);
+ if (got)
+ percpu_ref_put(ref);
+}
+
static bool io_match_task(struct io_kiocb *head,
struct task_struct *task,
struct files_struct *files)
static inline void req_set_fail_links(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
+ if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
}
}
static void io_kill_timeout(struct io_kiocb *req, int status)
+ __must_hold(&req->ctx->completion_lock)
{
struct io_timeout_data *io = req->async_data;
- int ret;
- ret = hrtimer_try_to_cancel(&io->timer);
- if (ret != -1) {
+ if (hrtimer_try_to_cancel(&io->timer) != -1) {
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
- io_cqring_fill_event(req, status);
+ io_cqring_fill_event(req, status, 0);
io_put_req_deferred(req, 1);
}
}
return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
}
-static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
+static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned tail;
static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
{
- if (!ctx->cq_ev_fd)
+ if (likely(!ctx->cq_ev_fd))
return false;
if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
return false;
- if (!ctx->eventfd_async)
- return true;
- return io_wq_current_is_worker();
+ return !ctx->eventfd_async || io_wq_current_is_worker();
}
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
atomic_inc(&req->refs);
}
-static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
- unsigned int cflags)
+static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
+ unsigned int cflags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_overflow_cqe *ocqe;
+
+ ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
+ if (!ocqe) {
+ /*
+ * If we're in ring overflow flush mode, or in task cancel mode,
+ * or cannot allocate an overflow entry, then we need to drop it
+ * on the floor.
+ */
+ WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
+ return false;
+ }
+ if (list_empty(&ctx->cq_overflow_list)) {
+ set_bit(0, &ctx->sq_check_overflow);
+ set_bit(0, &ctx->cq_check_overflow);
+ ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+ }
+ ocqe->cqe.user_data = req->user_data;
+ ocqe->cqe.res = res;
+ ocqe->cqe.flags = cflags;
+ list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
+ return true;
+}
+
+static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cqe *cqe;
WRITE_ONCE(cqe->flags, cflags);
return true;
}
- if (!atomic_read(&req->task->io_uring->in_idle)) {
- struct io_overflow_cqe *ocqe;
-
- ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
- if (!ocqe)
- goto overflow;
- if (list_empty(&ctx->cq_overflow_list)) {
- set_bit(0, &ctx->sq_check_overflow);
- set_bit(0, &ctx->cq_check_overflow);
- ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
- }
- ocqe->cqe.user_data = req->user_data;
- ocqe->cqe.res = res;
- ocqe->cqe.flags = cflags;
- list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
- return true;
- }
-overflow:
- /*
- * If we're in ring overflow flush mode, or in task cancel mode,
- * or cannot allocate an overflow entry, then we need to drop it
- * on the floor.
- */
- WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
- return false;
+ return io_cqring_event_overflow(req, res, cflags);
}
-static void io_cqring_fill_event(struct io_kiocb *req, long res)
+/* not as hot to bloat with inlining */
+static noinline bool io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
- __io_cqring_fill_event(req, res, 0);
+ return __io_cqring_fill_event(req, res, cflags);
}
static void io_req_complete_post(struct io_kiocb *req, long res,
*/
if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
struct io_timeout_data *io = link->async_data;
- int ret;
io_remove_next_linked(req);
link->timeout.head = NULL;
- ret = hrtimer_try_to_cancel(&io->timer);
- if (ret != -1) {
- io_cqring_fill_event(link, -ECANCELED);
+ if (hrtimer_try_to_cancel(&io->timer) != -1) {
+ io_cqring_fill_event(link, -ECANCELED, 0);
io_put_req_deferred(link, 1);
return true;
}
link->link = NULL;
trace_io_uring_fail_link(req, link);
- io_cqring_fill_event(link, -ECANCELED);
+ io_cqring_fill_event(link, -ECANCELED, 0);
io_put_req_deferred(link, 2);
link = nxt;
}
if (likely(req->flags & REQ_F_LINK_TIMEOUT))
posted = io_kill_linked_timeout(req);
- if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
+ if (unlikely((req->flags & REQ_F_FAIL_LINK) &&
+ !(req->flags & REQ_F_HARDLINK))) {
posted |= (req->link != NULL);
io_fail_links(req);
}
return ret;
}
-/*
- * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
- * non-spinning poll check - we'll still enter the driver poll loop, but only
- * as a non-spinning completion check.
- */
-static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
- long min)
-{
- while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
- int ret;
-
- ret = io_do_iopoll(ctx, nr_events, min);
- if (ret < 0)
- return ret;
- if (*nr_events >= min)
- return 0;
- }
-
- return 1;
-}
-
/*
* We can't just wait for polled events to come to us, we have to actively
* find and complete them.
static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
{
unsigned int nr_events = 0;
- int iters = 0, ret = 0;
+ int ret = 0;
/*
* We disallow the app entering submit/complete with polling, but we
* that got punted to a workqueue.
*/
mutex_lock(&ctx->uring_lock);
+ /*
+ * Don't enter poll loop if we already have events pending.
+ * If we do, we can potentially be spinning for commands that
+ * already triggered a CQE (eg in error).
+ */
+ if (test_bit(0, &ctx->cq_check_overflow))
+ __io_cqring_overflow_flush(ctx, false);
+ if (io_cqring_events(ctx))
+ goto out;
do {
- /*
- * Don't enter poll loop if we already have events pending.
- * If we do, we can potentially be spinning for commands that
- * already triggered a CQE (eg in error).
- */
- if (test_bit(0, &ctx->cq_check_overflow))
- __io_cqring_overflow_flush(ctx, false);
- if (io_cqring_events(ctx))
- break;
-
/*
* If a submit got punted to a workqueue, we can have the
* application entering polling for a command before it gets
* forever, while the workqueue is stuck trying to acquire the
* very same mutex.
*/
- if (!(++iters & 7)) {
+ if (list_empty(&ctx->iopoll_list)) {
mutex_unlock(&ctx->uring_lock);
io_run_task_work();
mutex_lock(&ctx->uring_lock);
- }
-
- ret = io_iopoll_getevents(ctx, &nr_events, min);
- if (ret <= 0)
- break;
- ret = 0;
- } while (min && !nr_events && !need_resched());
+ if (list_empty(&ctx->iopoll_list))
+ break;
+ }
+ ret = io_do_iopoll(ctx, &nr_events, min);
+ } while (!ret && nr_events < min && !need_resched());
+out:
mutex_unlock(&ctx->uring_lock);
return ret;
}
return true;
}
#else
+static bool io_resubmit_prep(struct io_kiocb *req)
+{
+ return false;
+}
static bool io_rw_should_reissue(struct io_kiocb *req)
{
return false;
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if (unlikely(res != req->result)) {
- bool fail = true;
-
-#ifdef CONFIG_BLOCK
- if (res == -EAGAIN && io_rw_should_reissue(req) &&
- io_resubmit_prep(req))
- fail = false;
-#endif
- if (fail) {
+ if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
+ io_resubmit_prep(req))) {
req_set_fail_links(req);
req->flags |= REQ_F_DONT_REISSUE;
}
/*
* After the iocb has been issued, it's safe to be found on the poll list.
* Adding the kiocb to the list AFTER submission ensures that we don't
- * find it from a io_iopoll_getevents() thread before the issuer is done
+ * find it from a io_do_iopoll() thread before the issuer is done
* accessing the kiocb cookie.
*/
static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
- io_req_complete(req, ret);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0)
req_set_fail_links(req);
- io_req_complete(req, ret);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
struct files_struct *files = current->files;
struct io_close *close = &req->close;
struct fdtable *fdt;
- struct file *file;
- int ret;
+ struct file *file = NULL;
+ int ret = -EBADF;
- file = NULL;
- ret = -EBADF;
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (close->fd >= fdt->max_fds) {
goto err;
}
file = fdt->fd[close->fd];
- if (!file) {
- spin_unlock(&files->file_lock);
- goto err;
- }
-
- if (file->f_op == &io_uring_fops) {
+ if (!file || file->f_op == &io_uring_fops) {
spin_unlock(&files->file_lock);
file = NULL;
goto err;
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- struct compat_msghdr __user *msg_compat;
struct io_sr_msg *sr = &req->sr_msg;
struct compat_iovec __user *uiov;
compat_uptr_t ptr;
compat_size_t len;
int ret;
- msg_compat = (struct compat_msghdr __user *) sr->umsg;
- ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
- &ptr, &len);
+ ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
+ &ptr, &len);
if (ret)
return ret;
}
if (req->poll.events & EPOLLONESHOT)
flags = 0;
- if (!__io_cqring_fill_event(req, error, flags)) {
+ if (!io_cqring_fill_event(req, error, flags)) {
io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
poll->head = NULL;
poll->done = false;
poll->canceled = false;
- poll->update_events = poll->update_user_data = false;
#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
/* mask in events that we always want/need */
poll->events = events | IO_POLL_UNMASK;
pt->error = -EINVAL;
return;
}
+ /*
+ * Can't handle multishot for double wait for now, turn it
+ * into one-shot mode.
+ */
+ if (!(req->poll.events & EPOLLONESHOT))
+ req->poll.events |= EPOLLONESHOT;
/* double add on the same waitqueue head, ignore */
if (poll->head == head)
return;
bool do_complete;
io_poll_remove_double(req);
+ do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
- if (req->opcode == IORING_OP_POLL_ADD) {
- do_complete = __io_poll_remove_one(req, &req->poll, true);
- } else {
+ if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
struct async_poll *apoll = req->apoll;
/* non-poll requests have submit ref still */
- do_complete = __io_poll_remove_one(req, &apoll->poll, true);
- if (do_complete) {
- req_ref_put(req);
- kfree(apoll->double_poll);
- kfree(apoll);
- }
+ req_ref_put(req);
+ kfree(apoll->double_poll);
+ kfree(apoll);
}
-
return do_complete;
}
do_complete = io_poll_remove_waitqs(req);
if (do_complete) {
- io_cqring_fill_event(req, -ECANCELED);
+ io_cqring_fill_event(req, -ECANCELED, 0);
io_commit_cqring(req->ctx);
req_set_fail_links(req);
io_put_req_deferred(req, 1);
return posted != 0;
}
-static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr)
+static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
+ bool poll_only)
__must_hold(&ctx->completion_lock)
{
struct hlist_head *list;
hlist_for_each_entry(req, list, hash_node) {
if (sqe_addr != req->user_data)
continue;
+ if (poll_only && req->opcode != IORING_OP_POLL_ADD)
+ continue;
return req;
}
-
return NULL;
}
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
+ bool poll_only)
__must_hold(&ctx->completion_lock)
{
struct io_kiocb *req;
- req = io_poll_find(ctx, sqe_addr);
+ req = io_poll_find(ctx, sqe_addr, poll_only);
if (!req)
return -ENOENT;
if (io_poll_remove_one(req))
return -EALREADY;
}
-static int io_poll_remove_prep(struct io_kiocb *req,
+static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
+ unsigned int flags)
+{
+ u32 events;
+
+ events = READ_ONCE(sqe->poll32_events);
+#ifdef __BIG_ENDIAN
+ events = swahw32(events);
+#endif
+ if (!(flags & IORING_POLL_ADD_MULTI))
+ events |= EPOLLONESHOT;
+ return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
+}
+
+static int io_poll_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
+ struct io_poll_update *upd = &req->poll_update;
+ u32 flags;
+
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
- sqe->poll_events)
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->len);
+ if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
+ IORING_POLL_ADD_MULTI))
+ return -EINVAL;
+ /* meaningless without update */
+ if (flags == IORING_POLL_ADD_MULTI)
return -EINVAL;
- req->poll_remove.addr = READ_ONCE(sqe->addr);
- return 0;
-}
-
-/*
- * Find a running poll command that matches one specified in sqe->addr,
- * and remove it if found.
- */
-static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- int ret;
+ upd->old_user_data = READ_ONCE(sqe->addr);
+ upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
+ upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
- spin_lock_irq(&ctx->completion_lock);
- ret = io_poll_cancel(ctx, req->poll_remove.addr);
- spin_unlock_irq(&ctx->completion_lock);
+ upd->new_user_data = READ_ONCE(sqe->off);
+ if (!upd->update_user_data && upd->new_user_data)
+ return -EINVAL;
+ if (upd->update_events)
+ upd->events = io_poll_parse_events(sqe, flags);
+ else if (sqe->poll32_events)
+ return -EINVAL;
- if (ret < 0)
- req_set_fail_links(req);
- io_req_complete(req, ret);
return 0;
}
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_poll_iocb *poll = &req->poll;
- u32 events, flags;
+ u32 flags;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->ioprio || sqe->buf_index)
+ if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
flags = READ_ONCE(sqe->len);
- if (flags & ~(IORING_POLL_ADD_MULTI | IORING_POLL_UPDATE_EVENTS |
- IORING_POLL_UPDATE_USER_DATA))
+ if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
- events = READ_ONCE(sqe->poll32_events);
-#ifdef __BIG_ENDIAN
- events = swahw32(events);
-#endif
- if (!(flags & IORING_POLL_ADD_MULTI))
- events |= EPOLLONESHOT;
- poll->update_events = poll->update_user_data = false;
- if (flags & IORING_POLL_UPDATE_EVENTS) {
- poll->update_events = true;
- poll->old_user_data = READ_ONCE(sqe->addr);
- }
- if (flags & IORING_POLL_UPDATE_USER_DATA) {
- poll->update_user_data = true;
- poll->new_user_data = READ_ONCE(sqe->off);
- }
- if (!(poll->update_events || poll->update_user_data) &&
- (sqe->off || sqe->addr))
- return -EINVAL;
- poll->events = demangle_poll(events) |
- (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
+
+ poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
-static int __io_poll_add(struct io_kiocb *req)
+static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
return ipt.error;
}
-static int io_poll_update(struct io_kiocb *req)
+static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *preq;
int ret;
spin_lock_irq(&ctx->completion_lock);
- preq = io_poll_find(ctx, req->poll.old_user_data);
+ preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
if (!preq) {
ret = -ENOENT;
goto err;
- } else if (preq->opcode != IORING_OP_POLL_ADD) {
- /* don't allow internal poll updates */
- ret = -EACCES;
+ }
+
+ if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
+ completing = true;
+ ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
goto err;
}
return 0;
}
/* only mask one event flags, keep behavior flags */
- if (req->poll.update_events) {
+ if (req->poll_update.update_events) {
preq->poll.events &= ~0xffff;
- preq->poll.events |= req->poll.events & 0xffff;
+ preq->poll.events |= req->poll_update.events & 0xffff;
preq->poll.events |= IO_POLL_UNMASK;
}
- if (req->poll.update_user_data)
- preq->user_data = req->poll.new_user_data;
-
+ if (req->poll_update.update_user_data)
+ preq->user_data = req->poll_update.new_user_data;
spin_unlock_irq(&ctx->completion_lock);
/* complete update request, we're done with it */
io_req_complete(req, ret);
if (!completing) {
- ret = __io_poll_add(preq);
+ ret = io_poll_add(preq, issue_flags);
if (ret < 0) {
req_set_fail_links(preq);
io_req_complete(preq, ret);
return 0;
}
-static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
-{
- if (!req->poll.update_events && !req->poll.update_user_data)
- return __io_poll_add(req);
- return io_poll_update(req);
-}
-
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_timeout_data *data = container_of(timer,
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- io_cqring_fill_event(req, -ETIME);
+ io_cqring_fill_event(req, -ETIME, 0);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
{
struct io_timeout_data *io;
struct io_kiocb *req;
- int ret = -ENOENT;
+ bool found = false;
list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
- if (user_data == req->user_data) {
- ret = 0;
+ found = user_data == req->user_data;
+ if (found)
break;
- }
}
-
- if (ret == -ENOENT)
- return ERR_PTR(ret);
+ if (!found)
+ return ERR_PTR(-ENOENT);
io = req->async_data;
- ret = hrtimer_try_to_cancel(&io->timer);
- if (ret == -1)
+ if (hrtimer_try_to_cancel(&io->timer) == -1)
return ERR_PTR(-EALREADY);
list_del_init(&req->timeout.list);
return req;
return PTR_ERR(req);
req_set_fail_links(req);
- io_cqring_fill_event(req, -ECANCELED);
+ io_cqring_fill_event(req, -ECANCELED, 0);
io_put_req_deferred(req, 1);
return 0;
}
ret = io_timeout_update(ctx, tr->addr, &tr->ts,
io_translate_timeout_mode(tr->flags));
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
ret = io_timeout_cancel(ctx, sqe_addr);
if (ret != -ENOENT)
goto done;
- ret = io_poll_cancel(ctx, sqe_addr);
+ ret = io_poll_cancel(ctx, sqe_addr, false);
done:
if (!ret)
ret = success_ret;
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
ret = io_timeout_cancel(ctx, sqe_addr);
if (ret != -ENOENT)
goto done;
- ret = io_poll_cancel(ctx, sqe_addr);
+ ret = io_poll_cancel(ctx, sqe_addr, false);
if (ret != -ENOENT)
goto done;
spin_unlock_irq(&ctx->completion_lock);
spin_lock_irq(&ctx->completion_lock);
done:
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
case IORING_OP_POLL_ADD:
return io_poll_add_prep(req, sqe);
case IORING_OP_POLL_REMOVE:
- return io_poll_remove_prep(req, sqe);
+ return io_poll_update_prep(req, sqe);
case IORING_OP_FSYNC:
return io_fsync_prep(req, sqe);
case IORING_OP_SYNC_FILE_RANGE:
ret = io_poll_add(req, issue_flags);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req, issue_flags);
+ ret = io_poll_update(req, issue_flags);
break;
case IORING_OP_SYNC_FILE_RANGE:
ret = io_sync_file_range(req, issue_flags);
#endif
#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
-static inline struct io_fixed_file *io_fixed_file_slot(struct io_rsrc_data *file_data,
+static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
unsigned i)
{
- struct fixed_rsrc_table *table;
+ struct io_fixed_file *table_l2;
- table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
- return &table->files[i & IORING_FILE_TABLE_MASK];
+ table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
+ return &table_l2[i & IORING_FILE_TABLE_MASK];
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
int index)
{
- struct io_fixed_file *slot = io_fixed_file_slot(ctx->file_data, index);
+ struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
return (struct file *) (slot->file_ptr & FFS_MASK);
}
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
return NULL;
fd = array_index_nospec(fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(ctx->file_data, fd)->file_ptr;
+ file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
file = (struct file *) (file_ptr & FFS_MASK);
file_ptr &= ~FFS_MASK;
/* mask in overlapping REQ_F and FFS bits */
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
-static void io_free_file_tables(struct io_rsrc_data *data, unsigned nr_files)
+static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
{
unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
- kfree(data->table[i].files);
- kfree(data->table);
- data->table = NULL;
+ kfree(table->files[i]);
+ kfree(table->files);
+ table->files = NULL;
}
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
fput(file);
}
#endif
-}
-
-static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
-{
- struct io_rsrc_data *data = container_of(ref, struct io_rsrc_data, refs);
-
- complete(&data->done);
+ io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
+ kfree(ctx->file_data);
+ ctx->file_data = NULL;
+ ctx->nr_user_files = 0;
}
static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
io_rsrc_ref_unlock(ctx);
- percpu_ref_get(&data_to_kill->refs);
+ atomic_inc(&data_to_kill->refs);
percpu_ref_kill(&rsrc_node->refs);
ctx->rsrc_node = NULL;
}
break;
io_rsrc_node_switch(ctx, data);
- percpu_ref_kill(&data->refs);
+ /* kill initial ref, already quiesced if zero */
+ if (atomic_dec_and_test(&data->refs))
+ break;
flush_delayed_work(&ctx->rsrc_put_work);
-
ret = wait_for_completion_interruptible(&data->done);
if (!ret)
break;
- percpu_ref_resurrect(&data->refs);
+ atomic_inc(&data->refs);
+ /* wait for all works potentially completing data->done */
+ flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done);
mutex_unlock(&ctx->uring_lock);
if (!data)
return NULL;
- if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
- kfree(data);
- return NULL;
- }
+ atomic_set(&data->refs, 1);
data->ctx = ctx;
data->do_put = do_put;
init_completion(&data->done);
return data;
}
-static void io_rsrc_data_free(struct io_rsrc_data *data)
-{
- percpu_ref_exit(&data->refs);
- kfree(data);
-}
-
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- struct io_rsrc_data *data = ctx->file_data;
int ret;
- if (!data)
+ if (!ctx->file_data)
return -ENXIO;
- ret = io_rsrc_ref_quiesce(data, ctx);
- if (ret)
- return ret;
-
- __io_sqe_files_unregister(ctx);
- io_free_file_tables(data, ctx->nr_user_files);
- io_rsrc_data_free(data);
- ctx->file_data = NULL;
- ctx->nr_user_files = 0;
- return 0;
+ ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
+ if (!ret)
+ __io_sqe_files_unregister(ctx);
+ return ret;
}
static void io_sq_thread_unpark(struct io_sq_data *sqd)
static void io_sq_thread_stop(struct io_sq_data *sqd)
{
WARN_ON_ONCE(sqd->thread == current);
+ WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
- mutex_lock(&sqd->lock);
set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ mutex_lock(&sqd->lock);
if (sqd->thread)
wake_up_process(sqd->thread);
mutex_unlock(&sqd->lock);
}
#endif
-static bool io_alloc_file_tables(struct io_rsrc_data *file_data,
- unsigned nr_files)
+static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
{
unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE);
- file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
- GFP_KERNEL);
- if (!file_data->table)
+ table->files = kcalloc(nr_tables, sizeof(*table->files), GFP_KERNEL);
+ if (!table->files)
return false;
for (i = 0; i < nr_tables; i++) {
- struct fixed_rsrc_table *table = &file_data->table[i];
unsigned int this_files = min(nr_files, IORING_MAX_FILES_TABLE);
- table->files = kcalloc(this_files, sizeof(struct file *),
+ table->files[i] = kcalloc(this_files, sizeof(*table->files[i]),
GFP_KERNEL);
- if (!table->files)
+ if (!table->files[i])
break;
nr_files -= this_files;
}
if (i == nr_tables)
return true;
- io_free_file_tables(file_data, nr_tables * IORING_MAX_FILES_TABLE);
+ io_free_file_tables(table, nr_tables * IORING_MAX_FILES_TABLE);
return false;
}
}
io_rsrc_node_destroy(ref_node);
- percpu_ref_put(&rsrc_data->refs);
+ if (atomic_dec_and_test(&rsrc_data->refs))
+ complete(&rsrc_data->done);
}
static void io_rsrc_put_work(struct work_struct *work)
static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
{
struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
- struct io_rsrc_data *data = node->rsrc_data;
- struct io_ring_ctx *ctx = data->ctx;
+ struct io_ring_ctx *ctx = node->rsrc_data->ctx;
bool first_add = false;
- int delay;
io_rsrc_ref_lock(ctx);
node->done = true;
}
io_rsrc_ref_unlock(ctx);
- delay = percpu_ref_is_dying(&data->refs) ? 0 : HZ;
- if (first_add || !delay)
- mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
+ if (first_add)
+ mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
}
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
if (!file_data)
return -ENOMEM;
ctx->file_data = file_data;
-
ret = -ENOMEM;
- if (!io_alloc_file_tables(file_data, nr_args))
+ if (!io_alloc_file_tables(&ctx->file_table, nr_args))
goto out_free;
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
fput(file);
goto out_fput;
}
- io_fixed_file_set(io_fixed_file_slot(file_data, i), file);
+ io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
}
ret = io_sqe_files_scm(ctx);
if (ret) {
- io_sqe_files_unregister(ctx);
+ __io_sqe_files_unregister(ctx);
return ret;
}
if (file)
fput(file);
}
- io_free_file_tables(file_data, nr_args);
+ io_free_file_tables(&ctx->file_table, nr_args);
ctx->nr_user_files = 0;
out_free:
- io_rsrc_data_free(ctx->file_data);
+ kfree(ctx->file_data);
ctx->file_data = NULL;
return ret;
}
continue;
i = array_index_nospec(up->offset + done, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(ctx->file_data, i);
+ file_slot = io_fixed_file_slot(&ctx->file_table, i);
if (file_slot->file_ptr) {
file = (struct file *)(file_slot->file_ptr & FFS_MASK);
return off;
}
+static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
+{
+ unsigned int i;
+
+ for (i = 0; i < imu->nr_bvecs; i++)
+ unpin_user_page(imu->bvec[i].bv_page);
+ if (imu->acct_pages)
+ io_unaccount_mem(ctx, imu->acct_pages);
+ kvfree(imu->bvec);
+ imu->nr_bvecs = 0;
+}
+
static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
- int i, j;
+ unsigned int i;
if (!ctx->user_bufs)
return -ENXIO;
- for (i = 0; i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
-
- for (j = 0; j < imu->nr_bvecs; j++)
- unpin_user_page(imu->bvec[j].bv_page);
-
- if (imu->acct_pages)
- io_unaccount_mem(ctx, imu->acct_pages);
- kvfree(imu->bvec);
- imu->nr_bvecs = 0;
- }
-
+ for (i = 0; i < ctx->nr_user_bufs; i++)
+ io_buffer_unmap(ctx, &ctx->user_bufs[i]);
kfree(ctx->user_bufs);
ctx->user_bufs = NULL;
ctx->nr_user_bufs = 0;
static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
{
- if (ctx->user_bufs)
- return -EBUSY;
- if (!nr_args || nr_args > UIO_MAXIOV)
- return -EINVAL;
-
- ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
- GFP_KERNEL);
- if (!ctx->user_bufs)
- return -ENOMEM;
-
- return 0;
+ ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
+ return ctx->user_bufs ? 0 : -ENOMEM;
}
static int io_buffer_validate(struct iovec *iov)
struct iovec iov;
struct page *last_hpage = NULL;
+ if (ctx->user_bufs)
+ return -EBUSY;
+ if (!nr_args || nr_args > UIO_MAXIOV)
+ return -EINVAL;
ret = io_buffers_map_alloc(ctx, nr_args);
if (ret)
return ret;
- for (i = 0; i < nr_args; i++) {
+ for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
ret = io_copy_iov(ctx, &iov, arg, i);
if (ret)
break;
-
ret = io_buffer_validate(&iov);
if (ret)
break;
-
ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
if (ret)
break;
-
- ctx->nr_user_bufs++;
}
if (ret)
}
mutex_lock(&ctx->uring_lock);
- io_sqe_files_unregister(ctx);
+ if (ctx->file_data) {
+ if (!atomic_dec_and_test(&ctx->file_data->refs))
+ wait_for_completion(&ctx->file_data->done);
+ __io_sqe_files_unregister(ctx);
+ }
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
mutex_unlock(&ctx->uring_lock);
WARN_ON_ONCE(time_after(jiffies, timeout));
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+ init_completion(&exit.completion);
+ init_task_work(&exit.task_work, io_tctx_exit_cb);
+ exit.ctx = ctx;
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock or
node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
ctx_node);
- exit.ctx = ctx;
- init_completion(&exit.completion);
- init_task_work(&exit.task_work, io_tctx_exit_cb);
+ /* don't spin on a single task if cancellation failed */
+ list_rotate_left(&ctx->tctx_list);
ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
if (WARN_ON_ONCE(ret))
continue;
mutex_unlock(&ctx->uring_lock);
wait_for_completion(&exit.completion);
- cond_resched();
mutex_lock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);
if (ret < 0)
break;
} while (1);
-
mutex_lock(&ctx->uring_lock);
if (ret) {
- percpu_ref_resurrect(&ctx->refs);
- goto out_quiesce;
+ io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
+ return ret;
}
}
if (io_register_op_must_quiesce(opcode)) {
/* bring the ctx back to life */
percpu_ref_reinit(&ctx->refs);
-out_quiesce:
reinit_completion(&ctx->ref_comp);
}
return ret;