io_uring: acquire 'mm' for task_work for SQPOLL
authorJens Axboe <axboe@kernel.dk>
Wed, 17 Jun 2020 00:42:49 +0000 (18:42 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 17 Jun 2020 18:49:16 +0000 (12:49 -0600)
If we're unlucky with timing, we could be running task_work after
having dropped the memory context in the sq thread. Since dropping
the context requires a runnable task state, we cannot reliably drop
it as part of our check-for-work loop in io_sq_thread(). Instead,
abstract out the mm acquire for the sq thread into a helper, and call
it from the async task work handler.

Cc: stable@vger.kernel.org # v5.7
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 9d2ae9a..98c83fb 100644 (file)
@@ -4256,6 +4256,28 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
        __io_queue_proc(&pt->req->apoll->poll, pt, head);
 }
 
+static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
+{
+       struct mm_struct *mm = current->mm;
+
+       if (mm) {
+               kthread_unuse_mm(mm);
+               mmput(mm);
+       }
+}
+
+static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
+                                  struct io_kiocb *req)
+{
+       if (io_op_defs[req->opcode].needs_mm && !current->mm) {
+               if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
+                       return -EFAULT;
+               kthread_use_mm(ctx->sqo_mm);
+       }
+
+       return 0;
+}
+
 static void io_async_task_func(struct callback_head *cb)
 {
        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -4290,11 +4312,16 @@ static void io_async_task_func(struct callback_head *cb)
 
        if (!canceled) {
                __set_current_state(TASK_RUNNING);
+               if (io_sq_thread_acquire_mm(ctx, req)) {
+                       io_cqring_add_event(req, -EFAULT);
+                       goto end_req;
+               }
                mutex_lock(&ctx->uring_lock);
                __io_queue_sqe(req, NULL);
                mutex_unlock(&ctx->uring_lock);
        } else {
                io_cqring_ev_posted(ctx);
+end_req:
                req_set_fail_links(req);
                io_double_put_req(req);
        }
@@ -5841,11 +5868,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (unlikely(req->opcode >= IORING_OP_LAST))
                return -EINVAL;
 
-       if (io_op_defs[req->opcode].needs_mm && !current->mm) {
-               if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
-                       return -EFAULT;
-               kthread_use_mm(ctx->sqo_mm);
-       }
+       if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
+               return -EFAULT;
 
        sqe_flags = READ_ONCE(sqe->flags);
        /* enforce forwards compatibility on users */
@@ -5954,16 +5978,6 @@ fail_req:
        return submitted;
 }
 
-static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
-{
-       struct mm_struct *mm = current->mm;
-
-       if (mm) {
-               kthread_unuse_mm(mm);
-               mmput(mm);
-       }
-}
-
 static int io_sq_thread(void *data)
 {
        struct io_ring_ctx *ctx = data;