Revert "blk-mq, elevator: Count requests per hctx to improve performance"
authorJan Kara <jack@suse.cz>
Mon, 11 Jan 2021 16:47:16 +0000 (17:47 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jan 2021 01:19:46 +0000 (18:19 -0700)
This reverts commit b445547ec1bbd3e7bf4b1c142550942f70527d95.

Since both mq-deadline and BFQ completely ignore hctx they are passed to
their dispatch function and dispatch whatever request they deem fit
checking whether any request for a particular hctx is queued is just
pointless since we'll very likely get a request from a different hctx
anyway. In the following commit we'll deal with lock contention in these
IO schedulers in presence of multiple HW queues in a different way.

Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-mq.c
block/mq-deadline.c
include/linux/blk-mq.h

index c045613..b12a416 100644 (file)
@@ -4677,9 +4677,6 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
 {
        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
 
-       if (!atomic_read(&hctx->elevator_queued))
-               return false;
-
        /*
         * Avoiding lock: a race on bfqd->busy_queues should cause at
         * most a call to dispatch for nothing
@@ -5597,7 +5594,6 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                bfq_insert_request(hctx, rq, at_head);
-               atomic_inc(&hctx->elevator_queued);
        }
 }
 
@@ -5965,7 +5961,6 @@ static void bfq_finish_requeue_request(struct request *rq)
 
                bfq_completed_request(bfqq, bfqd);
                bfq_finish_requeue_request_body(bfqq);
-               atomic_dec(&rq->mq_hctx->elevator_queued);
 
                spin_unlock_irqrestore(&bfqd->lock, flags);
        } else {
index 74b17b3..1af6b8a 100644 (file)
@@ -2653,7 +2653,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
                goto free_hctx;
 
        atomic_set(&hctx->nr_active, 0);
-       atomic_set(&hctx->elevator_queued, 0);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
        hctx->numa_node = node;
index 800ac90..b57470e 100644 (file)
@@ -386,8 +386,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
        spin_lock(&dd->lock);
        rq = __dd_dispatch_request(dd);
        spin_unlock(&dd->lock);
-       if (rq)
-               atomic_dec(&rq->mq_hctx->elevator_queued);
 
        return rq;
 }
@@ -535,7 +533,6 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                dd_insert_request(hctx, rq, at_head);
-               atomic_inc(&hctx->elevator_queued);
        }
        spin_unlock(&dd->lock);
 }
@@ -582,9 +579,6 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
 {
        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 
-       if (!atomic_read(&hctx->elevator_queued))
-               return false;
-
        return !list_empty_careful(&dd->dispatch) ||
                !list_empty_careful(&dd->fifo_list[0]) ||
                !list_empty_careful(&dd->fifo_list[1]);
index 6b410da..aabbf68 100644 (file)
@@ -140,10 +140,6 @@ struct blk_mq_hw_ctx {
         * shared across request queues.
         */
        atomic_t                nr_active;
-       /**
-        * @elevator_queued: Number of queued requests on hctx.
-        */
-       atomic_t                elevator_queued;
 
        /** @cpuhp_online: List to store request if CPU is going to die */
        struct hlist_node       cpuhp_online;