io_uring: clean up io_ring_ctx_alloc
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 16 Jun 2022 09:22:06 +0000 (10:22 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:13 +0000 (18:39 -0600)
Add a variable for the number of hash buckets in io_ring_ctx_alloc(),
makes it more readable.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/993926ed0d614ba9a76b2a85bebae2babcb13983.1655371007.git.asml.silence@gmail.com
Reviewed-by: Hao Xu <howeyxu@tencent.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index aafdf13..85a4795 100644 (file)
@@ -244,6 +244,8 @@ static __cold void io_fallback_req_func(struct work_struct *work)
 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 {
        struct io_ring_ctx *ctx;
+       unsigned hash_buckets;
+       size_t hash_size;
        int hash_bits;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -259,15 +261,15 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
         */
        hash_bits = ilog2(p->cq_entries) - 5;
        hash_bits = clamp(hash_bits, 1, 8);
+       hash_buckets = 1U << hash_bits;
+       hash_size = hash_buckets * sizeof(struct io_hash_bucket);
 
        ctx->cancel_hash_bits = hash_bits;
-       ctx->cancel_hash =
-               kmalloc((1U << hash_bits) * sizeof(struct io_hash_bucket),
-                       GFP_KERNEL);
+       ctx->cancel_hash = kmalloc(hash_size, GFP_KERNEL);
        if (!ctx->cancel_hash)
                goto err;
 
-       init_hash_table(ctx->cancel_hash, 1U << hash_bits);
+       init_hash_table(ctx->cancel_hash, hash_buckets);
 
        ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
        if (!ctx->dummy_ubuf)