Merge tag 'libata-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
[linux-2.6-microblaze.git] / block / blk-mq-sched.c
index 6c15f6e..c62b966 100644 (file)
@@ -57,10 +57,8 @@ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
 
-void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 {
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-               return;
        clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 
        /*
@@ -363,7 +361,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
        }
 }
 
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
        struct elevator_queue *e = q->elevator;
@@ -389,13 +387,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
         * potentially merge with. Currently includes a hand-wavy stop
         * count of 8, to not spend too much time checking for merges.
         */
-       if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
-               ctx->rq_merged++;
+       if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
                ret = true;
-       }
 
        spin_unlock(&ctx->lock);
-
        return ret;
 }
 
@@ -519,78 +514,67 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
                                          struct blk_mq_hw_ctx *hctx,
                                          unsigned int hctx_idx)
 {
-       struct blk_mq_tag_set *set = q->tag_set;
-       int ret;
+       if (blk_mq_is_shared_tags(q->tag_set->flags)) {
+               hctx->sched_tags = q->sched_shared_tags;
+               return 0;
+       }
+
+       hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
+                                                   q->nr_requests);
 
-       hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
-                                              set->reserved_tags, set->flags);
        if (!hctx->sched_tags)
                return -ENOMEM;
+       return 0;
+}
 
-       ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
-       if (ret) {
-               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
-               hctx->sched_tags = NULL;
-       }
-
-       return ret;
+static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
+{
+       blk_mq_free_rq_map(queue->sched_shared_tags);
+       queue->sched_shared_tags = NULL;
 }
 
 /* called in queue's release handler, tagset has gone away */
-static void blk_mq_sched_tags_teardown(struct request_queue *q)
+static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
                if (hctx->sched_tags) {
-                       blk_mq_free_rq_map(hctx->sched_tags, hctx->flags);
+                       if (!blk_mq_is_shared_tags(flags))
+                               blk_mq_free_rq_map(hctx->sched_tags);
                        hctx->sched_tags = NULL;
                }
        }
+
+       if (blk_mq_is_shared_tags(flags))
+               blk_mq_exit_sched_shared_tags(q);
 }
 
-static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
+static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
 {
        struct blk_mq_tag_set *set = queue->tag_set;
-       int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
-       struct blk_mq_hw_ctx *hctx;
-       int ret, i;
 
        /*
         * Set initial depth at max so that we don't need to reallocate for
         * updating nr_requests.
         */
-       ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
-                                 &queue->sched_breserved_tags,
-                                 MAX_SCHED_RQ, set->reserved_tags,
-                                 set->numa_node, alloc_policy);
-       if (ret)
-               return ret;
-
-       queue_for_each_hw_ctx(queue, hctx, i) {
-               hctx->sched_tags->bitmap_tags =
-                                       &queue->sched_bitmap_tags;
-               hctx->sched_tags->breserved_tags =
-                                       &queue->sched_breserved_tags;
-       }
+       queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
+                                               BLK_MQ_NO_HCTX_IDX,
+                                               MAX_SCHED_RQ);
+       if (!queue->sched_shared_tags)
+               return -ENOMEM;
 
-       blk_mq_tag_update_sched_shared_sbitmap(queue);
+       blk_mq_tag_update_sched_shared_tags(queue);
 
        return 0;
 }
 
-static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
-{
-       sbitmap_queue_free(&queue->sched_bitmap_tags);
-       sbitmap_queue_free(&queue->sched_breserved_tags);
-}
-
 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 {
+       unsigned int i, flags = q->tag_set->flags;
        struct blk_mq_hw_ctx *hctx;
        struct elevator_queue *eq;
-       unsigned int i;
        int ret;
 
        if (!e) {
@@ -607,21 +591,21 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
                                   BLKDEV_DEFAULT_RQ);
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
+       if (blk_mq_is_shared_tags(flags)) {
+               ret = blk_mq_init_sched_shared_tags(q);
                if (ret)
-                       goto err_free_map_and_rqs;
+                       return ret;
        }
 
-       if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
-               ret = blk_mq_init_sched_shared_sbitmap(q);
+       queue_for_each_hw_ctx(q, hctx, i) {
+               ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
                if (ret)
                        goto err_free_map_and_rqs;
        }
 
        ret = e->ops.init_sched(q, e);
        if (ret)
-               goto err_free_sbitmap;
+               goto err_free_map_and_rqs;
 
        blk_mq_debugfs_register_sched(q);
 
@@ -641,12 +625,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 
        return 0;
 
-err_free_sbitmap:
-       if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
-               blk_mq_exit_sched_shared_sbitmap(q);
 err_free_map_and_rqs:
        blk_mq_sched_free_rqs(q);
-       blk_mq_sched_tags_teardown(q);
+       blk_mq_sched_tags_teardown(q, flags);
+
        q->elevator = NULL;
        return ret;
 }
@@ -660,9 +642,15 @@ void blk_mq_sched_free_rqs(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               if (hctx->sched_tags)
-                       blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+       if (blk_mq_is_shared_tags(q->tag_set->flags)) {
+               blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
+                               BLK_MQ_NO_HCTX_IDX);
+       } else {
+               queue_for_each_hw_ctx(q, hctx, i) {
+                       if (hctx->sched_tags)
+                               blk_mq_free_rqs(q->tag_set,
+                                               hctx->sched_tags, i);
+               }
        }
 }
 
@@ -683,8 +671,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
        blk_mq_debugfs_unregister_sched(q);
        if (e->type->ops.exit_sched)
                e->type->ops.exit_sched(e);
-       blk_mq_sched_tags_teardown(q);
-       if (blk_mq_is_sbitmap_shared(flags))
-               blk_mq_exit_sched_shared_sbitmap(q);
+       blk_mq_sched_tags_teardown(q, flags);
        q->elevator = NULL;
 }