block: Call .limit_depth() after .hctx has been set
authorBart Van Assche <bvanassche@acm.org>
Thu, 9 May 2024 17:01:48 +0000 (10:01 -0700)
committerJens Axboe <axboe@kernel.dk>
Tue, 2 Jul 2024 14:47:45 +0000 (08:47 -0600)
Call .limit_depth() after data->hctx has been set such that data->hctx can
be used in .limit_depth() implementations.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Zhiguo Niu <zhiguo.niu@unisoc.com>
Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240509170149.7639-2-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index fec2dea..e3c3c0c 100644 (file)
@@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
        if (data->cmd_flags & REQ_NOWAIT)
                data->flags |= BLK_MQ_REQ_NOWAIT;
 
+retry:
+       data->ctx = blk_mq_get_ctx(q);
+       data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+
        if (q->elevator) {
                /*
                 * All requests use scheduler tags when an I/O scheduler is
@@ -469,13 +473,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
                        if (ops->limit_depth)
                                ops->limit_depth(data->cmd_flags, data);
                }
-       }
-
-retry:
-       data->ctx = blk_mq_get_ctx(q);
-       data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
-       if (!(data->rq_flags & RQF_SCHED_TAGS))
+       } else {
                blk_mq_tag_busy(data->hctx);
+       }
 
        if (data->flags & BLK_MQ_REQ_RESERVED)
                data->rq_flags |= RQF_RESV;