.pollout = 1,
.needs_async_data = 1,
.async_size = sizeof(struct io_async_msghdr),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
+ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+ IO_WQ_WORK_FS,
},
[IORING_OP_RECVMSG] = {
.needs_file = 1,
.buffer_select = 1,
.needs_async_data = 1,
.async_size = sizeof(struct io_async_msghdr),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
+ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+ IO_WQ_WORK_FS,
},
[IORING_OP_TIMEOUT] = {
.needs_async_data = 1,
if (wq_list_empty(&tctx->task_list))
return false;
- spin_lock(&tctx->task_lock);
+ spin_lock_irq(&tctx->task_lock);
list = tctx->task_list;
INIT_WQ_LIST(&tctx->task_list);
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irq(&tctx->task_lock);
node = list.first;
while (node) {
{
struct io_uring_task *tctx = tsk->io_uring;
struct io_wq_work_node *node, *prev;
+ unsigned long flags;
int ret;
WARN_ON_ONCE(!tctx);
- spin_lock(&tctx->task_lock);
+ spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
/* task_work already pending, we're done */
if (test_bit(0, &tctx->task_state) ||
* in the list, it got run and we're fine.
*/
ret = 0;
- spin_lock(&tctx->task_lock);
+ spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_for_each(node, prev, &tctx->task_list) {
if (&req->io_task_work.node == node) {
wq_list_del(&tctx->task_list, node, prev);
break;
}
}
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
clear_bit(0, &tctx->task_state);
return ret;
}
else
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
+
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ io_sq_thread_drop_mm_files();
}
static void io_req_task_submit(struct callback_head *cb)
}
}
-static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_submit_state *submit_state = &ctx->submit_state;
+ mutex_lock(&ctx->uring_lock);
+
+ if (submit_state->free_reqs)
+ kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
+ submit_state->reqs);
+
+ io_req_cache_free(&submit_state->comp.free_list, NULL);
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+{
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock, see
ctx->mm_account = NULL;
}
- if (submit_state->free_reqs)
- kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
- submit_state->reqs);
-
#ifdef CONFIG_BLK_CGROUP
if (ctx->sqo_blkcg_css)
css_put(ctx->sqo_blkcg_css);
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
put_cred(ctx->creds);
+ io_req_caches_free(ctx, NULL);
kfree(ctx->cancel_hash);
- io_req_cache_free(&ctx->submit_state.comp.free_list, NULL);
- io_req_cache_free(&ctx->submit_state.comp.locked_free_list, NULL);
kfree(ctx);
}
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle);
- /*
- * If the files that are going away are the ones in the thread
- * identity, clear them out.
- */
- if (task->io_uring->identity->files == files)
- task->io_uring->identity->files = NULL;
io_sq_thread_unpark(ctx->sq_data);
}
}
struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data;
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
io_uring_cancel_task_requests(ctx, NULL);
+ io_req_caches_free(ctx, current);
+ }
if (!tctx)
return 0;