io_uring: defer flushing cached reqs
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 10 Feb 2021 00:03:23 +0000 (00:03 +0000)
committerJens Axboe <axboe@kernel.dk>
Wed, 10 Feb 2021 14:28:43 +0000 (07:28 -0700)
Awhile there are requests in the allocation cache -- use them, only if
those ended go for the stashed memory in comp.free_list. As list
manipulation are generally heavy and are not good for caches, flush them
all or as much as can in one go.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
[axboe: return success/failure from io_flush_cached_reqs()]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index bff5bc4..4a28032 100644 (file)
@@ -1952,25 +1952,35 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
        __io_req_complete(req, 0, res, 0);
 }
 
-static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+static bool io_flush_cached_reqs(struct io_submit_state *state)
 {
-       struct io_submit_state *state = &ctx->submit_state;
-
-       BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
-
-       if (!list_empty(&state->comp.free_list)) {
-               struct io_kiocb *req;
+       struct io_kiocb *req = NULL;
 
+       while (!list_empty(&state->comp.free_list)) {
                req = list_first_entry(&state->comp.free_list, struct io_kiocb,
                                        compl.list);
                list_del(&req->compl.list);
-               return req;
+               state->reqs[state->free_reqs++] = req;
+               if (state->free_reqs == ARRAY_SIZE(state->reqs))
+                       break;
        }
 
+       return req != NULL;
+}
+
+static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+{
+       struct io_submit_state *state = &ctx->submit_state;
+
+       BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
+
        if (!state->free_reqs) {
                gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
                int ret;
 
+               if (io_flush_cached_reqs(state))
+                       goto got_req;
+
                ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
                                            state->reqs);
 
@@ -1986,7 +1996,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
                }
                state->free_reqs = ret;
        }
-
+got_req:
        state->free_reqs--;
        return state->reqs[state->free_reqs];
 }