perf cpumap: Use existing allocator to avoid using malloc
[linux-2.6-microblaze.git] / block / blk-mq-tag.c
index 32d82e2..9c92053 100644 (file)
  */
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
-       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
-           !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               atomic_inc(&hctx->tags->active_queues);
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               struct request_queue *q = hctx->queue;
+               struct blk_mq_tag_set *set = q->tag_set;
+
+               if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
+                   !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+                       atomic_inc(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+                   !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       atomic_inc(&hctx->tags->active_queues);
+       }
 
        return true;
 }
@@ -35,9 +44,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  */
 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
-       sbitmap_queue_wake_all(&tags->bitmap_tags);
+       sbitmap_queue_wake_all(tags->bitmap_tags);
        if (include_reserve)
-               sbitmap_queue_wake_all(&tags->breserved_tags);
+               sbitmap_queue_wake_all(tags->breserved_tags);
 }
 
 /*
@@ -47,11 +56,19 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
        struct blk_mq_tags *tags = hctx->tags;
-
-       if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               return;
-
-       atomic_dec(&tags->active_queues);
+       struct request_queue *q = hctx->queue;
+       struct blk_mq_tag_set *set = q->tag_set;
+
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
+                                       &q->queue_flags))
+                       return;
+               atomic_dec(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       return;
+               atomic_dec(&tags->active_queues);
+       }
 
        blk_mq_tag_wakeup_all(tags, false);
 }
@@ -59,7 +76,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
                            struct sbitmap_queue *bt)
 {
-       if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
+       if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
+                       !hctx_may_queue(data->hctx, bt))
                return BLK_MQ_NO_TAG;
 
        if (data->shallow_depth)
@@ -82,10 +100,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                        WARN_ON_ONCE(1);
                        return BLK_MQ_NO_TAG;
                }
-               bt = &tags->breserved_tags;
+               bt = tags->breserved_tags;
                tag_offset = 0;
        } else {
-               bt = &tags->bitmap_tags;
+               bt = tags->bitmap_tags;
                tag_offset = tags->nr_reserved_tags;
        }
 
@@ -131,9 +149,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                                                data->ctx);
                tags = blk_mq_tags_from_data(data);
                if (data->flags & BLK_MQ_REQ_RESERVED)
-                       bt = &tags->breserved_tags;
+                       bt = tags->breserved_tags;
                else
-                       bt = &tags->bitmap_tags;
+                       bt = tags->bitmap_tags;
 
                /*
                 * If destination hw queue is changed, fake wake up on
@@ -167,10 +185,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
                const int real_tag = tag - tags->nr_reserved_tags;
 
                BUG_ON(real_tag >= tags->nr_tags);
-               sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
+               sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
        } else {
                BUG_ON(tag >= tags->nr_reserved_tags);
-               sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
+               sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
        }
 }
 
@@ -197,7 +215,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
         * We can hit rq == NULL here, because the tagging functions
         * test and set the bit before assigning ->rqs[].
         */
-       if (rq && rq->q == hctx->queue)
+       if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
                return iter_data->fn(hctx, rq, iter_data->data, reserved);
        return true;
 }
@@ -298,9 +316,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
        WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
 
        if (tags->nr_reserved_tags)
-               bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
+               bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
                                 flags | BT_TAG_ITER_RESERVED);
-       bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
+       bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
 }
 
 /**
@@ -398,9 +416,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
        /*
         * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
         * while the queue is frozen. So we can use q_usage_counter to avoid
-        * racing with it. __blk_mq_update_nr_hw_queues() uses
-        * synchronize_rcu() to ensure this function left the critical section
-        * below.
+        * racing with it.
         */
        if (!percpu_ref_tryget(&q->q_usage_counter))
                return;
@@ -416,8 +432,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        continue;
 
                if (tags->nr_reserved_tags)
-                       bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
-               bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+                       bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
+               bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
        }
        blk_queue_exit(q);
 }
@@ -429,30 +445,64 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
                                       node);
 }
 
-static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
-                                                  int node, int alloc_policy)
+static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
+                                  int node, int alloc_policy)
 {
        unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
        bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
 
-       if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
-               goto free_tags;
-       if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
-                    node))
+       if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
+               return -ENOMEM;
+       if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
+                    round_robin, node))
                goto free_bitmap_tags;
 
-       return tags;
+       tags->bitmap_tags = &tags->__bitmap_tags;
+       tags->breserved_tags = &tags->__breserved_tags;
+
+       return 0;
 free_bitmap_tags:
-       sbitmap_queue_free(&tags->bitmap_tags);
-free_tags:
-       kfree(tags);
-       return NULL;
+       sbitmap_queue_free(&tags->__bitmap_tags);
+       return -ENOMEM;
+}
+
+int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
+{
+       unsigned int depth = set->queue_depth - set->reserved_tags;
+       int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
+       bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
+       int i, node = set->numa_node;
+
+       if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
+               return -ENOMEM;
+       if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
+                    round_robin, node))
+               goto free_bitmap_tags;
+
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               struct blk_mq_tags *tags = set->tags[i];
+
+               tags->bitmap_tags = &set->__bitmap_tags;
+               tags->breserved_tags = &set->__breserved_tags;
+       }
+
+       return 0;
+free_bitmap_tags:
+       sbitmap_queue_free(&set->__bitmap_tags);
+       return -ENOMEM;
+}
+
+void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
+{
+       sbitmap_queue_free(&set->__bitmap_tags);
+       sbitmap_queue_free(&set->__breserved_tags);
 }
 
 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
                                     unsigned int reserved_tags,
-                                    int node, int alloc_policy)
+                                    int node, unsigned int flags)
 {
+       int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
        struct blk_mq_tags *tags;
 
        if (total_tags > BLK_MQ_TAG_MAX) {
@@ -467,13 +517,22 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
        tags->nr_tags = total_tags;
        tags->nr_reserved_tags = reserved_tags;
 
-       return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
+       if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
+               return tags;
+
+       if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
+               kfree(tags);
+               return NULL;
+       }
+       return tags;
 }
 
-void blk_mq_free_tags(struct blk_mq_tags *tags)
+void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
 {
-       sbitmap_queue_free(&tags->bitmap_tags);
-       sbitmap_queue_free(&tags->breserved_tags);
+       if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
+               sbitmap_queue_free(tags->bitmap_tags);
+               sbitmap_queue_free(tags->breserved_tags);
+       }
        kfree(tags);
 }
 
@@ -492,6 +551,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
         */
        if (tdepth > tags->nr_tags) {
                struct blk_mq_tag_set *set = hctx->queue->tag_set;
+               /* Only sched tags can grow, so clear HCTX_SHARED flag  */
+               unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
                struct blk_mq_tags *new;
                bool ret;
 
@@ -506,30 +567,35 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
                        return -EINVAL;
 
                new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
-                               tags->nr_reserved_tags);
+                               tags->nr_reserved_tags, flags);
                if (!new)
                        return -ENOMEM;
                ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
                if (ret) {
-                       blk_mq_free_rq_map(new);
+                       blk_mq_free_rq_map(new, flags);
                        return -ENOMEM;
                }
 
                blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
-               blk_mq_free_rq_map(*tagsptr);
+               blk_mq_free_rq_map(*tagsptr, flags);
                *tagsptr = new;
        } else {
                /*
                 * Don't need (or can't) update reserved tags here, they
                 * remain static and should never need resizing.
                 */
-               sbitmap_queue_resize(&tags->bitmap_tags,
+               sbitmap_queue_resize(tags->bitmap_tags,
                                tdepth - tags->nr_reserved_tags);
        }
 
        return 0;
 }
 
+void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
+{
+       sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
+}
+
 /**
  * blk_mq_unique_tag() - return a tag that is unique queue-wide
  * @rq: request for which to compute a unique tag