io_uring: alloc ->io in io_req_defer_prep()
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 15 Jul 2020 09:46:51 +0000 (12:46 +0300)
committerJens Axboe <axboe@kernel.dk>
Fri, 24 Jul 2020 19:00:44 +0000 (13:00 -0600)
Every call to io_req_defer_prep() is prepended with allocating ->io,
just do that in the function. And while we're at it, mark error paths
with unlikey and replace "if (ret < 0)" with "if (ret)".

There is only one change in the observable behaviour, that's instead of
killing the head request right away on error, it postpones it until the
link is assembled, that looks more preferable.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 8d6f1c4..6a1cd2a 100644 (file)
@@ -5279,6 +5279,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
        if (!sqe)
                return 0;
 
+       if (io_alloc_async_ctx(req))
+               return -EAGAIN;
+
        if (io_op_defs[req->opcode].file_table) {
                io_req_init_async(req);
                ret = io_grab_files(req);
@@ -5418,10 +5421,8 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return 0;
 
        if (!req->io) {
-               if (io_alloc_async_ctx(req))
-                       return -EAGAIN;
                ret = io_req_defer_prep(req, sqe);
-               if (ret < 0)
+               if (ret)
                        return ret;
        }
        io_prep_async_link(req);
@@ -6024,11 +6025,8 @@ fail_req:
                }
        } else if (req->flags & REQ_F_FORCE_ASYNC) {
                if (!req->io) {
-                       ret = -EAGAIN;
-                       if (io_alloc_async_ctx(req))
-                               goto fail_req;
                        ret = io_req_defer_prep(req, sqe);
-                       if (unlikely(ret < 0))
+                       if (unlikely(ret))
                                goto fail_req;
                }
 
@@ -6081,11 +6079,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                        head->flags |= REQ_F_IO_DRAIN;
                        ctx->drain_next = 1;
                }
-               if (io_alloc_async_ctx(req))
-                       return -EAGAIN;
-
                ret = io_req_defer_prep(req, sqe);
-               if (ret) {
+               if (unlikely(ret)) {
                        /* fail even hard links since we don't submit */
                        head->flags |= REQ_F_FAIL_LINK;
                        return ret;
@@ -6108,11 +6103,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                        req->flags |= REQ_F_LINK_HEAD;
                        INIT_LIST_HEAD(&req->link_list);
 
-                       if (io_alloc_async_ctx(req))
-                               return -EAGAIN;
-
                        ret = io_req_defer_prep(req, sqe);
-                       if (ret)
+                       if (unlikely(ret))
                                req->flags |= REQ_F_FAIL_LINK;
                        *link = req;
                } else {