io_uring: make fail flag not link specific
authorPavel Begunkov <asml.silence@gmail.com>
Sun, 16 May 2021 21:58:05 +0000 (22:58 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 14 Jun 2021 14:23:04 +0000 (08:23 -0600)
The main difference is in req_set_fail_links() renamed into
req_set_fail(), which now sets REQ_F_FAIL_LINK/REQ_F_FAIL flag
unconditional on whether it has been a link or not. It only matters in
io_disarm_next(), which already handles it well, and all calls to it
have a fast path checking REQ_F_LINK/HARDLINK.

It looks cleaner, and sheds binary size
   text    data     bss     dec     hex filename
  84235   12390       8   96633   17979 ./fs/io_uring.o
  84151   12414       8   96573   1793d ./fs/io_uring.o

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e2224154dd6e53b665ac835d29436b177872fa10.1621201931.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 8b8d252..8a5fda7 100644 (file)
@@ -705,7 +705,7 @@ enum {
        REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
 
        /* first byte is taken by user flags, shift it to not overlap */
-       REQ_F_FAIL_LINK_BIT     = 8,
+       REQ_F_FAIL_BIT          = 8,
        REQ_F_INFLIGHT_BIT,
        REQ_F_CUR_POS_BIT,
        REQ_F_NOWAIT_BIT,
@@ -741,7 +741,7 @@ enum {
        REQ_F_BUFFER_SELECT     = BIT(REQ_F_BUFFER_SELECT_BIT),
 
        /* fail rest of links */
-       REQ_F_FAIL_LINK         = BIT(REQ_F_FAIL_LINK_BIT),
+       REQ_F_FAIL              = BIT(REQ_F_FAIL_BIT),
        /* on inflight list, should be cancelled and waited on exit reliably */
        REQ_F_INFLIGHT          = BIT(REQ_F_INFLIGHT_BIT),
        /* read/write uses file position */
@@ -1122,10 +1122,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
        return false;
 }
 
-static inline void req_set_fail_links(struct io_kiocb *req)
+static inline void req_set_fail(struct io_kiocb *req)
 {
-       if (req->flags & REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       req->flags |= REQ_F_FAIL;
 }
 
 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
@@ -1594,7 +1593,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
                struct io_comp_state *cs = &ctx->submit_state.comp;
 
                if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
-                       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
+                       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
                                io_disarm_next(req);
                        if (req->link) {
                                io_req_task_queue(req->link);
@@ -1650,7 +1649,7 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
 
 static void io_req_complete_failed(struct io_kiocb *req, long res)
 {
-       req_set_fail_links(req);
+       req_set_fail(req);
        io_put_req(req);
        io_req_complete_post(req, res, 0);
 }
@@ -1829,7 +1828,7 @@ static bool io_disarm_next(struct io_kiocb *req)
 
        if (likely(req->flags & REQ_F_LINK_TIMEOUT))
                posted = io_kill_linked_timeout(req);
-       if (unlikely((req->flags & REQ_F_FAIL_LINK) &&
+       if (unlikely((req->flags & REQ_F_FAIL) &&
                     !(req->flags & REQ_F_HARDLINK))) {
                posted |= (req->link != NULL);
                io_fail_links(req);
@@ -1847,7 +1846,7 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
         * dependencies to the next request. In case of failure, fail the rest
         * of the chain.
         */
-       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
+       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
                struct io_ring_ctx *ctx = req->ctx;
                unsigned long flags;
                bool posted;
@@ -2486,7 +2485,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
                        req->flags |= REQ_F_REISSUE;
                        return;
                }
-               req_set_fail_links(req);
+               req_set_fail(req);
        }
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_rw_kbuf(req);
@@ -2509,7 +2508,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (unlikely(res != req->result)) {
                if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
                    io_resubmit_prep(req))) {
-                       req_set_fail_links(req);
+                       req_set_fail(req);
                        req->flags |= REQ_F_DONT_REISSUE;
                }
        }
@@ -2765,7 +2764,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                } else {
                        int cflags = 0;
 
-                       req_set_fail_links(req);
+                       req_set_fail(req);
                        if (req->flags & REQ_F_BUFFER_SELECTED)
                                cflags = io_put_rw_kbuf(req);
                        __io_req_complete(req, issue_flags, ret, cflags);
@@ -3487,7 +3486,7 @@ static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
 
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3531,7 +3530,7 @@ static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
 
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3568,7 +3567,7 @@ static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = __sys_shutdown_sock(sock, req->shutdown.how);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 #else
@@ -3626,7 +3625,7 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
        req->flags &= ~REQ_F_NEED_CLEANUP;
 
        if (ret != sp->len)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3663,7 +3662,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
        req->flags &= ~REQ_F_NEED_CLEANUP;
 
        if (ret != sp->len)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3716,7 +3715,7 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
                                end > 0 ? end : LLONG_MAX,
                                req->sync.flags & IORING_FSYNC_DATASYNC);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3745,7 +3744,7 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
        ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
                                req->sync.len);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3864,7 +3863,7 @@ err:
        putname(req->open.filename);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -3936,7 +3935,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
        if (head)
                ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
 
        /* complete before unlock, IOPOLL may need the lock */
        __io_req_complete(req, issue_flags, ret, 0);
@@ -4027,7 +4026,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
                        __io_remove_buffers(ctx, head, p->bgid, -1U);
        }
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        /* complete before unlock, IOPOLL may need the lock */
        __io_req_complete(req, issue_flags, ret, 0);
        io_ring_submit_unlock(ctx, !force_nonblock);
@@ -4073,7 +4072,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
                return -EAGAIN;
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 #else
@@ -4109,7 +4108,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 #else
@@ -4148,7 +4147,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -4183,7 +4182,7 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
                       ctx->buffer);
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -4241,7 +4240,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
        ret = filp_close(file, current->files);
 err:
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        if (file)
                fput(file);
        __io_req_complete(req, issue_flags, ret, 0);
@@ -4274,7 +4273,7 @@ static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
        ret = sync_file_range(req->file, req->sync.off, req->sync.len,
                                req->sync.flags);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -4378,7 +4377,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < min_ret)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -4420,7 +4419,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
                ret = -EINTR;
 
        if (ret < min_ret)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -4615,7 +4614,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
 }
@@ -4670,7 +4669,7 @@ out_free:
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_recv_kbuf(req);
        if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
 }
@@ -4709,7 +4708,7 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
        if (ret < 0) {
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
-               req_set_fail_links(req);
+               req_set_fail(req);
        }
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
@@ -4773,7 +4772,7 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
                ret = -EINTR;
 out:
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -5244,7 +5243,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
        if (do_complete) {
                io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
                io_commit_cqring(req->ctx);
-               req_set_fail_links(req);
+               req_set_fail(req);
                io_put_req_deferred(req, 1);
        }
 
@@ -5454,7 +5453,7 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
 err:
        if (ret < 0) {
                spin_unlock_irq(&ctx->completion_lock);
-               req_set_fail_links(req);
+               req_set_fail(req);
                io_req_complete(req, ret);
                return 0;
        }
@@ -5474,7 +5473,7 @@ err:
        if (!completing) {
                ret = io_poll_add(preq, issue_flags);
                if (ret < 0) {
-                       req_set_fail_links(preq);
+                       req_set_fail(preq);
                        io_req_complete(preq, ret);
                }
        }
@@ -5499,7 +5498,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        io_cqring_ev_posted(ctx);
-       req_set_fail_links(req);
+       req_set_fail(req);
        io_put_req(req);
        return HRTIMER_NORESTART;
 }
@@ -5535,7 +5534,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       req_set_fail_links(req);
+       req_set_fail(req);
        io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
        io_put_req_deferred(req, 1);
        return 0;
@@ -5614,7 +5613,7 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
        spin_unlock_irq(&ctx->completion_lock);
        io_cqring_ev_posted(ctx);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_put_req(req);
        return 0;
 }
@@ -5767,7 +5766,7 @@ done:
        io_cqring_ev_posted(ctx);
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
 }
 
 static int io_async_cancel_prep(struct io_kiocb *req,
@@ -5824,7 +5823,7 @@ done:
        io_cqring_ev_posted(ctx);
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_put_req(req);
        return 0;
 }
@@ -5866,7 +5865,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        mutex_unlock(&ctx->uring_lock);
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -6569,7 +6568,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 fail_req:
                if (link->head) {
                        /* fail even hard links since we don't submit */
-                       link->head->flags |= REQ_F_FAIL_LINK;
+                       req_set_fail(link->head);
                        io_req_complete_failed(link->head, -ECANCELED);
                        link->head = NULL;
                }