#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
+#include <linux/blk-cgroup.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
size_t len;
struct bio_vec *bvec;
unsigned int nr_bvecs;
+ unsigned long acct_pages;
};
struct fixed_file_table {
/* Only used for accounting purposes */
struct mm_struct *mm_account;
+#ifdef CONFIG_BLK_CGROUP
+ struct cgroup_subsys_state *sqo_blkcg_css;
+#endif
+
struct io_sq_data *sq_data; /* if using sq thread polling */
struct wait_queue_head sqo_sq_wait;
struct io_timeout {
struct file *file;
- u64 addr;
- int flags;
u32 off;
u32 target_seq;
struct list_head list;
};
+struct io_timeout_rem {
+ struct file *file;
+ u64 addr;
+};
+
struct io_rw {
/* NOTE: kiocb has the file as the first member, so don't do it here */
struct kiocb kiocb;
struct io_sync sync;
struct io_cancel cancel;
struct io_timeout timeout;
+ struct io_timeout_rem timeout_rem;
struct io_connect connect;
struct io_sr_msg sr_msg;
struct io_open open;
unsigned needs_fsize : 1;
/* must always have async data allocated */
unsigned needs_async_data : 1;
+ /* needs blkcg context, issues async io potentially */
+ unsigned needs_blkcg : 1;
/* size of async data needed, if any */
unsigned short async_size;
};
.pollin = 1,
.buffer_select = 1,
.needs_async_data = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_WRITEV] = {
.pollout = 1,
.needs_fsize = 1,
.needs_async_data = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_READ_FIXED] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_WRITE_FIXED] = {
.unbound_nonreg_file = 1,
.pollout = 1,
.needs_fsize = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_POLL_ADD] = {
[IORING_OP_POLL_REMOVE] = {},
[IORING_OP_SYNC_FILE_RANGE] = {
.needs_file = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_SENDMSG] = {
.needs_mm = 1,
.needs_fs = 1,
.pollout = 1,
.needs_async_data = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_RECVMSG] = {
.pollin = 1,
.buffer_select = 1,
.needs_async_data = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_TIMEOUT] = {
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
.needs_fsize = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_OPENAT] = {
.file_table = 1,
.needs_fs = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_CLOSE] = {
.needs_file = 1,
.needs_file_no_error = 1,
.file_table = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_FILES_UPDATE] = {
.needs_mm = 1,
.needs_mm = 1,
.needs_fs = 1,
.file_table = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_READ] = {
.needs_mm = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_WRITE] = {
.unbound_nonreg_file = 1,
.pollout = 1,
.needs_fsize = 1,
+ .needs_blkcg = 1,
.async_size = sizeof(struct io_async_rw),
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_MADVISE] = {
.needs_mm = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_SEND] = {
.needs_mm = 1,
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_RECV] = {
.needs_mm = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_OPENAT2] = {
.file_table = 1,
.needs_fs = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
.needs_file = 1,
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
+ .needs_blkcg = 1,
},
[IORING_OP_PROVIDE_BUFFERS] = {},
[IORING_OP_REMOVE_BUFFERS] = {},
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip,
unsigned nr_args);
-static int io_prep_work_files(struct io_kiocb *req);
static void __io_clean_op(struct io_kiocb *req);
-static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
- int fd, struct file **out_file, bool fixed);
-static void __io_queue_sqe(struct io_kiocb *req,
- const struct io_uring_sqe *sqe,
- struct io_comp_state *cs);
+static struct file *io_file_get(struct io_submit_state *state,
+ struct io_kiocb *req, int fd, bool fixed);
+static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
static void io_file_put_work(struct work_struct *work);
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return __io_sq_thread_acquire_mm(ctx);
}
+static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
+ struct cgroup_subsys_state **cur_css)
+
+{
+#ifdef CONFIG_BLK_CGROUP
+ /* puts the old one when swapping */
+ if (*cur_css != ctx->sqo_blkcg_css) {
+ kthread_associate_blkcg(ctx->sqo_blkcg_css);
+ *cur_css = ctx->sqo_blkcg_css;
+ }
+#endif
+}
+
+static void io_sq_thread_unassociate_blkcg(void)
+{
+#ifdef CONFIG_BLK_CGROUP
+ kthread_associate_blkcg(NULL);
+#endif
+}
+
static inline void req_set_fail_links(struct io_kiocb *req)
{
if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
mmdrop(req->work.mm);
req->work.mm = NULL;
}
+#ifdef CONFIG_BLK_CGROUP
+ if (req->work.blkcg_css)
+ css_put(req->work.blkcg_css);
+#endif
if (req->work.creds) {
put_cred(req->work.creds);
req->work.creds = NULL;
static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
+ struct io_ring_ctx *ctx = req->ctx;
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
+ if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
+ if (!req->work.files && io_op_defs[req->opcode].file_table &&
+ !(req->flags & REQ_F_NO_FILE_TABLE)) {
+ req->work.files = get_files_struct(current);
+ get_nsproxy(current->nsproxy);
+ req->work.nsproxy = current->nsproxy;
+ req->flags |= REQ_F_INFLIGHT;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
if (!req->work.mm && def->needs_mm) {
mmgrab(current->mm);
req->work.mm = current->mm;
}
+#ifdef CONFIG_BLK_CGROUP
+ if (!req->work.blkcg_css && def->needs_blkcg) {
+ rcu_read_lock();
+ req->work.blkcg_css = blkcg_css();
+ /*
+ * This should be rare, either the cgroup is dying or the task
+ * is moving cgroups. Just punt to root for the handful of ios.
+ */
+ if (!css_tryget_online(req->work.blkcg_css))
+ req->work.blkcg_css = NULL;
+ rcu_read_unlock();
+ }
+#endif
if (!req->work.creds)
req->work.creds = get_current_cred();
if (!req->work.fs && def->needs_fs) {
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
- gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
- struct io_kiocb *req;
-
if (!state->free_reqs) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
size_t sz;
int ret;
goto fallback;
ret = 1;
}
- state->free_reqs = ret - 1;
- req = state->reqs[ret - 1];
- } else {
- state->free_reqs--;
- req = state->reqs[state->free_reqs];
+ state->free_reqs = ret;
}
- return req;
+ state->free_reqs--;
+ return state->reqs[state->free_reqs];
fallback:
return io_get_fallback_req(ctx);
}
return __io_req_find_next(req);
}
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
- bool twa_signal_ok)
+static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
notify = TWA_SIGNAL;
- ret = task_work_add(tsk, cb, notify);
+ ret = task_work_add(tsk, &req->task_work, notify);
if (!ret)
wake_up_process(tsk);
if (!__io_sq_thread_acquire_mm(ctx)) {
mutex_lock(&ctx->uring_lock);
- __io_queue_sqe(req, NULL, NULL);
+ __io_queue_sqe(req, NULL);
mutex_unlock(&ctx->uring_lock);
} else {
__io_req_task_cancel(req, -EFAULT);
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, &req->task_work, true);
+ ret = io_req_task_work_add(req, true);
if (unlikely(ret)) {
struct task_struct *tsk;
if (state->file) {
if (state->fd == fd) {
state->has_refs--;
- state->ios_left--;
return state->file;
}
__io_state_file_put(state);
return NULL;
state->fd = fd;
- state->ios_left--;
- state->has_refs = state->ios_left;
+ state->has_refs = state->ios_left - 1;
return state->file;
}
return file->f_op->write_iter != NULL;
}
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
if (kiocb->ki_flags & IOCB_NOWAIT)
req->flags |= REQ_F_NOWAIT;
- if (force_nonblock)
- kiocb->ki_flags |= IOCB_NOWAIT;
-
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) ||
!kiocb->ki_filp->f_op->iopoll)
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
{
- return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
+ return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
}
/*
return 0;
}
-static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
- bool force_nonblock)
+static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{
struct io_async_rw *iorw = req->async_data;
struct iovec *iov = iorw->fast_iov;
ssize_t ret;
- ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
if (unlikely(ret < 0))
return ret;
return 0;
}
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
ssize_t ret;
- ret = io_prep_rw(req, sqe, force_nonblock);
+ ret = io_prep_rw(req, sqe);
if (ret)
return ret;
return -EBADF;
/* either don't need iovec imported or already have it */
- if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
+ if (!req->async_data)
return 0;
- return io_rw_prep_async(req, READ, force_nonblock);
+ return io_rw_prep_async(req, READ);
}
/*
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
- ret = io_req_task_work_add(req, &req->task_work, true);
+ ret = io_req_task_work_add(req, true);
if (unlikely(ret)) {
struct task_struct *tsk;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
+ else
+ kiocb->ki_flags |= IOCB_NOWAIT;
+
/* If the file doesn't support async, just async punt */
no_async = force_nonblock && !io_file_supports_async(req->file, READ);
return ret;
}
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
ssize_t ret;
- ret = io_prep_rw(req, sqe, force_nonblock);
+ ret = io_prep_rw(req, sqe);
if (ret)
return ret;
return -EBADF;
/* either don't need iovec imported or already have it */
- if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
+ if (!req->async_data)
return 0;
- return io_rw_prep_async(req, WRITE, force_nonblock);
+ return io_rw_prep_async(req, WRITE);
}
static int io_write(struct io_kiocb *req, bool force_nonblock,
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
- req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
+ kiocb->ki_flags &= ~IOCB_NOWAIT;
+ else
+ kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
{
struct io_splice* sp = &req->splice;
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
- int ret;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sp->flags & ~valid_flags))
return -EINVAL;
- ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
- (sp->flags & SPLICE_F_FD_IN_FIXED));
- if (ret)
- return ret;
+ sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
+ (sp->flags & SPLICE_F_FD_IN_FIXED));
+ if (!sp->file_in)
+ return -EBADF;
req->flags |= REQ_F_NEED_CLEANUP;
if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
mode = READ_ONCE(sqe->len);
flags = READ_ONCE(sqe->open_flags);
req->open.how = build_open_how(flags, mode);
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
len = READ_ONCE(sqe->len);
if (len < OPEN_HOW_SIZE_VER0)
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- /* iovec is already imported */
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
-
ret = io_sendmsg_copy_hdr(req, async_msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
- return ret;;
+ return ret;
msg.msg_name = NULL;
msg.msg_control = NULL;
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- /* iovec is already imported */
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
-
ret = io_recvmsg_copy_hdr(req, async_msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
+ ret = io_req_task_work_add(req, twa_signal_ok);
if (unlikely(ret)) {
struct task_struct *tsk;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
+ list_del_init(&req->timeout.list);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- /*
- * We could be racing with timeout deletion. If the list is empty,
- * then timeout lookup already found it and will be handling it.
- */
- if (!list_empty(&req->timeout.list))
- list_del_init(&req->timeout.list);
-
io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
struct io_timeout_data *io = req->async_data;
int ret;
- list_del_init(&req->timeout.list);
-
ret = hrtimer_try_to_cancel(&io->timer);
if (ret == -1)
return -EALREADY;
+ list_del_init(&req->timeout.list);
req_set_fail_links(req);
req->flags |= REQ_F_COMP_LOCKED;
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->len)
- return -EINVAL;
-
- req->timeout.addr = READ_ONCE(sqe->addr);
- req->timeout.flags = READ_ONCE(sqe->timeout_flags);
- if (req->timeout.flags)
+ if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags)
return -EINVAL;
+ req->timeout_rem.addr = READ_ONCE(sqe->addr);
return 0;
}
int ret;
spin_lock_irq(&ctx->completion_lock);
- ret = io_timeout_cancel(ctx, req->timeout.addr);
+ ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
return 0;
}
-static int io_req_defer_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- ssize_t ret = 0;
-
- if (!sqe)
- return 0;
-
- if (io_alloc_async_data(req))
- return -EAGAIN;
- ret = io_prep_work_files(req);
- if (unlikely(ret))
- return ret;
-
- io_prep_async_work(req);
-
switch (req->opcode) {
case IORING_OP_NOP:
- break;
+ return 0;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- ret = io_read_prep(req, sqe, true);
- break;
+ return io_read_prep(req, sqe);
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- ret = io_write_prep(req, sqe, true);
- break;
+ return io_write_prep(req, sqe);
case IORING_OP_POLL_ADD:
- ret = io_poll_add_prep(req, sqe);
- break;
+ return io_poll_add_prep(req, sqe);
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove_prep(req, sqe);
- break;
+ return io_poll_remove_prep(req, sqe);
case IORING_OP_FSYNC:
- ret = io_prep_fsync(req, sqe);
- break;
+ return io_prep_fsync(req, sqe);
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_prep_sfr(req, sqe);
- break;
+ return io_prep_sfr(req, sqe);
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
- ret = io_sendmsg_prep(req, sqe);
- break;
+ return io_sendmsg_prep(req, sqe);
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
- ret = io_recvmsg_prep(req, sqe);
- break;
+ return io_recvmsg_prep(req, sqe);
case IORING_OP_CONNECT:
- ret = io_connect_prep(req, sqe);
- break;
+ return io_connect_prep(req, sqe);
case IORING_OP_TIMEOUT:
- ret = io_timeout_prep(req, sqe, false);
- break;
+ return io_timeout_prep(req, sqe, false);
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove_prep(req, sqe);
- break;
+ return io_timeout_remove_prep(req, sqe);
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel_prep(req, sqe);
- break;
+ return io_async_cancel_prep(req, sqe);
case IORING_OP_LINK_TIMEOUT:
- ret = io_timeout_prep(req, sqe, true);
- break;
+ return io_timeout_prep(req, sqe, true);
case IORING_OP_ACCEPT:
- ret = io_accept_prep(req, sqe);
- break;
+ return io_accept_prep(req, sqe);
case IORING_OP_FALLOCATE:
- ret = io_fallocate_prep(req, sqe);
- break;
+ return io_fallocate_prep(req, sqe);
case IORING_OP_OPENAT:
- ret = io_openat_prep(req, sqe);
- break;
+ return io_openat_prep(req, sqe);
case IORING_OP_CLOSE:
- ret = io_close_prep(req, sqe);
- break;
+ return io_close_prep(req, sqe);
case IORING_OP_FILES_UPDATE:
- ret = io_files_update_prep(req, sqe);
- break;
+ return io_files_update_prep(req, sqe);
case IORING_OP_STATX:
- ret = io_statx_prep(req, sqe);
- break;
+ return io_statx_prep(req, sqe);
case IORING_OP_FADVISE:
- ret = io_fadvise_prep(req, sqe);
- break;
+ return io_fadvise_prep(req, sqe);
case IORING_OP_MADVISE:
- ret = io_madvise_prep(req, sqe);
- break;
+ return io_madvise_prep(req, sqe);
case IORING_OP_OPENAT2:
- ret = io_openat2_prep(req, sqe);
- break;
+ return io_openat2_prep(req, sqe);
case IORING_OP_EPOLL_CTL:
- ret = io_epoll_ctl_prep(req, sqe);
- break;
+ return io_epoll_ctl_prep(req, sqe);
case IORING_OP_SPLICE:
- ret = io_splice_prep(req, sqe);
- break;
+ return io_splice_prep(req, sqe);
case IORING_OP_PROVIDE_BUFFERS:
- ret = io_provide_buffers_prep(req, sqe);
- break;
+ return io_provide_buffers_prep(req, sqe);
case IORING_OP_REMOVE_BUFFERS:
- ret = io_remove_buffers_prep(req, sqe);
- break;
+ return io_remove_buffers_prep(req, sqe);
case IORING_OP_TEE:
- ret = io_tee_prep(req, sqe);
- break;
- default:
- printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
- req->opcode);
- ret = -EINVAL;
- break;
+ return io_tee_prep(req, sqe);
}
- return ret;
+ printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
+ req->opcode);
+ return-EINVAL;
+}
+
+static int io_req_defer_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (!sqe)
+ return 0;
+ if (io_alloc_async_data(req))
+ return -EAGAIN;
+ return io_req_prep(req, sqe);
}
static u32 io_get_sequence(struct io_kiocb *req)
io_req_drop_files(req);
}
-static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock, struct io_comp_state *cs)
+static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- if (sqe) {
- ret = io_read_prep(req, sqe, force_nonblock);
- if (ret < 0)
- break;
- }
ret = io_read(req, force_nonblock, cs);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- if (sqe) {
- ret = io_write_prep(req, sqe, force_nonblock);
- if (ret < 0)
- break;
- }
ret = io_write(req, force_nonblock, cs);
break;
case IORING_OP_FSYNC:
- if (sqe) {
- ret = io_prep_fsync(req, sqe);
- if (ret < 0)
- break;
- }
ret = io_fsync(req, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- if (sqe) {
- ret = io_poll_add_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_poll_add(req);
break;
case IORING_OP_POLL_REMOVE:
- if (sqe) {
- ret = io_poll_remove_prep(req, sqe);
- if (ret < 0)
- break;
- }
ret = io_poll_remove(req);
break;
case IORING_OP_SYNC_FILE_RANGE:
- if (sqe) {
- ret = io_prep_sfr(req, sqe);
- if (ret < 0)
- break;
- }
ret = io_sync_file_range(req, force_nonblock);
break;
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
- if (sqe) {
- ret = io_sendmsg_prep(req, sqe);
- if (ret < 0)
- break;
- }
if (req->opcode == IORING_OP_SENDMSG)
ret = io_sendmsg(req, force_nonblock, cs);
else
break;
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
- if (sqe) {
- ret = io_recvmsg_prep(req, sqe);
- if (ret)
- break;
- }
if (req->opcode == IORING_OP_RECVMSG)
ret = io_recvmsg(req, force_nonblock, cs);
else
ret = io_recv(req, force_nonblock, cs);
break;
case IORING_OP_TIMEOUT:
- if (sqe) {
- ret = io_timeout_prep(req, sqe, false);
- if (ret)
- break;
- }
ret = io_timeout(req);
break;
case IORING_OP_TIMEOUT_REMOVE:
- if (sqe) {
- ret = io_timeout_remove_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_timeout_remove(req);
break;
case IORING_OP_ACCEPT:
- if (sqe) {
- ret = io_accept_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_accept(req, force_nonblock, cs);
break;
case IORING_OP_CONNECT:
- if (sqe) {
- ret = io_connect_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_connect(req, force_nonblock, cs);
break;
case IORING_OP_ASYNC_CANCEL:
- if (sqe) {
- ret = io_async_cancel_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_async_cancel(req);
break;
case IORING_OP_FALLOCATE:
- if (sqe) {
- ret = io_fallocate_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_fallocate(req, force_nonblock);
break;
case IORING_OP_OPENAT:
- if (sqe) {
- ret = io_openat_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_openat(req, force_nonblock);
break;
case IORING_OP_CLOSE:
- if (sqe) {
- ret = io_close_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_close(req, force_nonblock, cs);
break;
case IORING_OP_FILES_UPDATE:
- if (sqe) {
- ret = io_files_update_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_files_update(req, force_nonblock, cs);
break;
case IORING_OP_STATX:
- if (sqe) {
- ret = io_statx_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_statx(req, force_nonblock);
break;
case IORING_OP_FADVISE:
- if (sqe) {
- ret = io_fadvise_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_fadvise(req, force_nonblock);
break;
case IORING_OP_MADVISE:
- if (sqe) {
- ret = io_madvise_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_madvise(req, force_nonblock);
break;
case IORING_OP_OPENAT2:
- if (sqe) {
- ret = io_openat2_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_openat2(req, force_nonblock);
break;
case IORING_OP_EPOLL_CTL:
- if (sqe) {
- ret = io_epoll_ctl_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_epoll_ctl(req, force_nonblock, cs);
break;
case IORING_OP_SPLICE:
- if (sqe) {
- ret = io_splice_prep(req, sqe);
- if (ret < 0)
- break;
- }
ret = io_splice(req, force_nonblock);
break;
case IORING_OP_PROVIDE_BUFFERS:
- if (sqe) {
- ret = io_provide_buffers_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_provide_buffers(req, force_nonblock, cs);
break;
case IORING_OP_REMOVE_BUFFERS:
- if (sqe) {
- ret = io_remove_buffers_prep(req, sqe);
- if (ret)
- break;
- }
ret = io_remove_buffers(req, force_nonblock, cs);
break;
case IORING_OP_TEE:
- if (sqe) {
- ret = io_tee_prep(req, sqe);
- if (ret < 0)
- break;
- }
ret = io_tee(req, force_nonblock);
break;
default:
if (!ret) {
do {
- ret = io_issue_sqe(req, NULL, false, NULL);
+ ret = io_issue_sqe(req, false, NULL);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
return table->files[index & IORING_FILE_TABLE_MASK];
}
-static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
- int fd, struct file **out_file, bool fixed)
+static struct file *io_file_get(struct io_submit_state *state,
+ struct io_kiocb *req, int fd, bool fixed)
{
struct io_ring_ctx *ctx = req->ctx;
struct file *file;
if (fixed) {
- if (unlikely(!ctx->file_data ||
- (unsigned) fd >= ctx->nr_user_files))
- return -EBADF;
+ if (unlikely((unsigned int)fd >= ctx->nr_user_files))
+ return NULL;
fd = array_index_nospec(fd, ctx->nr_user_files);
file = io_file_from_index(ctx, fd);
if (file) {
file = __io_file_get(state, fd);
}
- if (file || io_op_defs[req->opcode].needs_file_no_error) {
- *out_file = file;
- return 0;
- }
- return -EBADF;
+ return file;
}
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
if (unlikely(!fixed && io_async_submit(req->ctx)))
return -EBADF;
- return io_file_get(state, req, fd, &req->file, fixed);
-}
-
-static int io_grab_files(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- io_req_init_async(req);
-
- if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
+ req->file = io_file_get(state, req, fd, fixed);
+ if (req->file || io_op_defs[req->opcode].needs_file_no_error)
return 0;
-
- req->work.files = get_files_struct(current);
- get_nsproxy(current->nsproxy);
- req->work.nsproxy = current->nsproxy;
- req->flags |= REQ_F_INFLIGHT;
-
- spin_lock_irq(&ctx->inflight_lock);
- list_add(&req->inflight_entry, &ctx->inflight_list);
- spin_unlock_irq(&ctx->inflight_lock);
- return 0;
-}
-
-static inline int io_prep_work_files(struct io_kiocb *req)
-{
- if (!io_op_defs[req->opcode].file_table)
- return 0;
- return io_grab_files(req);
+ return -EBADF;
}
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
return nxt;
}
-static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_comp_state *cs)
+static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
{
struct io_kiocb *linked_timeout;
struct io_kiocb *nxt;
old_creds = override_creds(req->work.creds);
}
- ret = io_issue_sqe(req, sqe, true, cs);
+ ret = io_issue_sqe(req, true, cs);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
if (!io_arm_poll_handler(req)) {
punt:
- ret = io_prep_work_files(req);
- if (unlikely(ret))
- goto err;
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
}
if (unlikely(ret)) {
-err:
/* un-prep timeout, so it'll be killed as any other linked */
req->flags &= ~REQ_F_LINK_TIMEOUT;
req_set_fail_links(req);
req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req);
} else {
- __io_queue_sqe(req, sqe, cs);
+ if (sqe) {
+ ret = io_req_prep(req, sqe);
+ if (unlikely(ret))
+ goto fail_req;
+ }
+ __io_queue_sqe(req, cs);
}
}
struct io_submit_state *state)
{
unsigned int sqe_flags;
- int id;
+ int id, ret;
req->opcode = READ_ONCE(sqe->opcode);
req->user_data = READ_ONCE(sqe->user_data);
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->task = current;
- get_task_struct(req->task);
- atomic_long_inc(&req->task->io_uring->req_issue);
req->result = 0;
if (unlikely(req->opcode >= IORING_OP_LAST))
if (!io_op_defs[req->opcode].needs_file)
return 0;
- return io_req_set_file(state, req, READ_ONCE(sqe->fd));
+ ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
+ state->ios_left--;
+ return ret;
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
+ atomic_long_add(nr, ¤t->io_uring->req_issue);
+ refcount_add(nr, ¤t->usage);
+
io_submit_state_start(&state, ctx, nr);
for (i = 0; i < nr; i++) {
int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
percpu_ref_put_many(&ctx->refs, nr - ref_used);
+ atomic_long_sub(nr - ref_used, ¤t->io_uring->req_issue);
+ put_task_struct_many(current, nr - ref_used);
}
if (link)
io_queue_link_head(link, &state.comp);
};
static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
- unsigned long start_jiffies)
+ unsigned long start_jiffies, bool cap_entries)
{
unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
struct io_sq_data *sqd = ctx->sq_data;
finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
io_ring_clear_wakeup_flag(ctx);
+ /* if we're handling multiple rings, cap submit size for fairness */
+ if (cap_entries && to_submit > 8)
+ to_submit = 8;
+
mutex_lock(&ctx->uring_lock);
if (likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit);
static int io_sq_thread(void *data)
{
+ struct cgroup_subsys_state *cur_css = NULL;
const struct cred *old_cred = NULL;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
start_jiffies = jiffies;
while (!kthread_should_stop()) {
enum sq_ret ret = 0;
+ bool cap_entries;
/*
* Any changes to the sqd lists are synchronized through the
if (unlikely(!list_empty(&sqd->ctx_new_list)))
io_sqd_init_new(sqd);
+ cap_entries = !list_is_singular(&sqd->ctx_list);
+
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
if (current->cred != ctx->creds) {
if (old_cred)
revert_creds(old_cred);
old_cred = override_creds(ctx->creds);
}
+ io_sq_thread_associate_blkcg(ctx, &cur_css);
- ret |= __io_sq_thread(ctx, start_jiffies);
+ ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
io_sq_thread_drop_mm();
}
io_run_task_work();
+ if (cur_css)
+ io_sq_thread_unassociate_blkcg();
if (old_cred)
revert_creds(old_cred);
return autoremove_wake_function(curr, mode, wake_flags, key);
}
+static int io_run_task_work_sig(void)
+{
+ if (io_run_task_work())
+ return 1;
+ if (!signal_pending(current))
+ return 0;
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(¤t->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ return 1;
+ }
+ return -EINTR;
+}
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
- if (io_run_task_work())
+ ret = io_run_task_work_sig();
+ if (ret > 0)
continue;
- if (signal_pending(current)) {
- if (current->jobctl & JOBCTL_TASK_WORK) {
- spin_lock_irq(¤t->sighand->siglock);
- current->jobctl &= ~JOBCTL_TASK_WORK;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
- continue;
- }
- ret = -EINTR;
+ else if (ret < 0)
break;
- }
if (io_should_wake(&iowq, false))
break;
schedule();
struct io_uring_task *tctx = tsk->io_uring;
WARN_ON_ONCE(!xa_empty(&tctx->xa));
- xa_destroy(&tctx->xa);
kfree(tctx);
tsk->io_uring = NULL;
}
for (j = 0; j < imu->nr_bvecs; j++)
unpin_user_page(imu->bvec[j].bv_page);
- io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED);
+ if (imu->acct_pages)
+ io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
kvfree(imu->bvec);
imu->nr_bvecs = 0;
}
return 0;
}
+/*
+ * Not super efficient, but this is just a registration time. And we do cache
+ * the last compound head, so generally we'll only do a full search if we don't
+ * match that one.
+ *
+ * We check if the given compound head page has already been accounted, to
+ * avoid double accounting it. This allows us to account the full size of the
+ * page, not just the constituent pages of a huge page.
+ */
+static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
+ int nr_pages, struct page *hpage)
+{
+ int i, j;
+
+ /* check current page array */
+ for (i = 0; i < nr_pages; i++) {
+ if (!PageCompound(pages[i]))
+ continue;
+ if (compound_head(pages[i]) == hpage)
+ return true;
+ }
+
+ /* check previously registered pages */
+ for (i = 0; i < ctx->nr_user_bufs; i++) {
+ struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
+
+ for (j = 0; j < imu->nr_bvecs; j++) {
+ if (!PageCompound(imu->bvec[j].bv_page))
+ continue;
+ if (compound_head(imu->bvec[j].bv_page) == hpage)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
+ int nr_pages, struct io_mapped_ubuf *imu,
+ struct page **last_hpage)
+{
+ int i, ret;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (!PageCompound(pages[i])) {
+ imu->acct_pages++;
+ } else {
+ struct page *hpage;
+
+ hpage = compound_head(pages[i]);
+ if (hpage == *last_hpage)
+ continue;
+ *last_hpage = hpage;
+ if (headpage_already_acct(ctx, pages, i, hpage))
+ continue;
+ imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
+ }
+ }
+
+ if (!imu->acct_pages)
+ return 0;
+
+ ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
+ if (ret)
+ imu->acct_pages = 0;
+ return ret;
+}
+
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
struct vm_area_struct **vmas = NULL;
struct page **pages = NULL;
+ struct page *last_hpage = NULL;
int i, j, got_pages = 0;
int ret = -EINVAL;
start = ubuf >> PAGE_SHIFT;
nr_pages = end - start;
- ret = io_account_mem(ctx, nr_pages, ACCT_PINNED);
- if (ret)
- goto err;
-
ret = 0;
if (!pages || nr_pages > got_pages) {
kvfree(vmas);
GFP_KERNEL);
if (!pages || !vmas) {
ret = -ENOMEM;
- io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
goto err;
}
got_pages = nr_pages;
imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
GFP_KERNEL);
ret = -ENOMEM;
- if (!imu->bvec) {
- io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
+ if (!imu->bvec)
goto err;
- }
ret = 0;
mmap_read_lock(current->mm);
*/
if (pret > 0)
unpin_user_pages(pages, pret);
- io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
+ kvfree(imu->bvec);
+ goto err;
+ }
+
+ ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
+ if (ret) {
+ unpin_user_pages(pages, pret);
kvfree(imu->bvec);
goto err;
}
ctx->mm_account = NULL;
}
+#ifdef CONFIG_BLK_CGROUP
+ if (ctx->sqo_blkcg_css)
+ css_put(ctx->sqo_blkcg_css);
+#endif
+
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
*/
static int io_uring_add_task_file(struct file *file)
{
- if (unlikely(!current->io_uring)) {
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (unlikely(!tctx)) {
int ret;
ret = io_uring_alloc_task_context(current);
if (unlikely(ret))
return ret;
+ tctx = current->io_uring;
}
- if (current->io_uring->last != file) {
- XA_STATE(xas, ¤t->io_uring->xa, (unsigned long) file);
- void *old;
+ if (tctx->last != file) {
+ void *old = xa_load(&tctx->xa, (unsigned long)file);
- rcu_read_lock();
- old = xas_load(&xas);
- if (old != file) {
+ if (!old) {
get_file(file);
- xas_lock(&xas);
- xas_store(&xas, file);
- xas_unlock(&xas);
+ xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL);
}
- rcu_read_unlock();
- current->io_uring->last = file;
+ tctx->last = file;
}
return 0;
static void io_uring_del_task_file(struct file *file)
{
struct io_uring_task *tctx = current->io_uring;
- XA_STATE(xas, &tctx->xa, (unsigned long) file);
if (tctx->last == file)
tctx->last = NULL;
-
- xas_lock(&xas);
- file = xas_store(&xas, NULL);
- xas_unlock(&xas);
-
+ file = xa_erase(&tctx->xa, (unsigned long)file);
if (file)
fput(file);
}
static void __io_uring_attempt_task_drop(struct file *file)
{
- XA_STATE(xas, ¤t->io_uring->xa, (unsigned long) file);
- struct file *old;
-
- rcu_read_lock();
- old = xas_load(&xas);
- rcu_read_unlock();
+ struct file *old = xa_load(¤t->io_uring->xa, (unsigned long)file);
if (old == file)
io_uring_del_task_file(file);
void __io_uring_files_cancel(struct files_struct *files)
{
struct io_uring_task *tctx = current->io_uring;
- XA_STATE(xas, &tctx->xa, 0);
+ struct file *file;
+ unsigned long index;
/* make sure overflow events are dropped */
tctx->in_idle = true;
- do {
- struct io_ring_ctx *ctx;
- struct file *file;
-
- xas_lock(&xas);
- file = xas_next_entry(&xas, ULONG_MAX);
- xas_unlock(&xas);
-
- if (!file)
- break;
-
- ctx = file->private_data;
+ xa_for_each(&tctx->xa, index, file) {
+ struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_task_requests(ctx, files);
if (files)
io_uring_del_task_file(file);
- } while (1);
+ }
}
static inline bool io_uring_task_idle(struct io_uring_task *tctx)
static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
+ struct io_sq_data *sq = NULL;
bool has_lock;
int i;
*/
has_lock = mutex_trylock(&ctx->uring_lock);
+ if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
+ sq = ctx->sq_data;
+
+ seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct fixed_file_table *table;
mmgrab(current->mm);
ctx->mm_account = current->mm;
+#ifdef CONFIG_BLK_CGROUP
+ /*
+ * The sq thread will belong to the original cgroup it was inited in.
+ * If the cgroup goes offline (e.g. disabling the io controller), then
+ * issued bios will be associated with the closest cgroup later in the
+ * block layer.
+ */
+ rcu_read_lock();
+ ctx->sqo_blkcg_css = blkcg_css();
+ ret = css_tryget_online(ctx->sqo_blkcg_css);
+ rcu_read_unlock();
+ if (!ret) {
+ /* don't init against a dying cgroup, have the user try again */
+ ctx->sqo_blkcg_css = NULL;
+ ret = -ENODEV;
+ goto err;
+ }
+#endif
+
/*
* Account memory _before_ installing the file descriptor. Once
* the descriptor is installed, it can get closed at any time. Also
* after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- ret = wait_for_completion_interruptible(&ctx->ref_comp);
+ do {
+ ret = wait_for_completion_interruptible(&ctx->ref_comp);
+ if (!ret)
+ break;
+ ret = io_run_task_work_sig();
+ if (ret < 0)
+ break;
+ } while (1);
+
mutex_lock(&ctx->uring_lock);
+
if (ret) {
percpu_ref_resurrect(&ctx->refs);
- ret = -EINTR;
goto out_quiesce;
}
}