io_uring: factor out grab_env() from defer_prep()
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 29 Jun 2020 16:18:42 +0000 (19:18 +0300)
committerJens Axboe <axboe@kernel.dk>
Tue, 30 Jun 2020 14:39:59 +0000 (08:39 -0600)
Remove io_req_work_grab_env() call from io_req_defer_prep(), just call
it when neccessary.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 3b2f6fd..caf9083 100644 (file)
@@ -5240,7 +5240,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock,
 }
 
 static int io_req_defer_prep(struct io_kiocb *req,
-                            const struct io_uring_sqe *sqe, bool for_async)
+                            const struct io_uring_sqe *sqe)
 {
        ssize_t ret = 0;
 
@@ -5254,9 +5254,6 @@ static int io_req_defer_prep(struct io_kiocb *req,
                        return ret;
        }
 
-       if (for_async || (req->flags & REQ_F_WORK_INITIALIZED))
-               io_req_work_grab_env(req);
-
        switch (req->opcode) {
        case IORING_OP_NOP:
                break;
@@ -5369,9 +5366,10 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (!req->io) {
                if (io_alloc_async_ctx(req))
                        return -EAGAIN;
-               ret = io_req_defer_prep(req, sqe, true);
+               ret = io_req_defer_prep(req, sqe);
                if (ret < 0)
                        return ret;
+               io_req_work_grab_env(req);
        }
 
        spin_lock_irq(&ctx->completion_lock);
@@ -5983,9 +5981,10 @@ fail_req:
                        ret = -EAGAIN;
                        if (io_alloc_async_ctx(req))
                                goto fail_req;
-                       ret = io_req_defer_prep(req, sqe, true);
+                       ret = io_req_defer_prep(req, sqe);
                        if (unlikely(ret < 0))
                                goto fail_req;
+                       io_req_work_grab_env(req);
                }
 
                /*
@@ -6039,7 +6038,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                if (io_alloc_async_ctx(req))
                        return -EAGAIN;
 
-               ret = io_req_defer_prep(req, sqe, false);
+               ret = io_req_defer_prep(req, sqe);
                if (ret) {
                        /* fail even hard links since we don't submit */
                        head->flags |= REQ_F_FAIL_LINK;
@@ -6066,7 +6065,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                        if (io_alloc_async_ctx(req))
                                return -EAGAIN;
 
-                       ret = io_req_defer_prep(req, sqe, false);
+                       ret = io_req_defer_prep(req, sqe);
                        if (ret)
                                req->flags |= REQ_F_FAIL_LINK;
                        *link = req;