block: Fix the maximum minor value is blk_alloc_ext_minor()
[linux-2.6-microblaze.git] / block / blk-mq-tag.c
index e55a683..68ac23d 100644 (file)
 #include "blk-mq-sched.h"
 #include "blk-mq-tag.h"
 
+/*
+ * Recalculate wakeup batch when tag is shared by hctx.
+ */
+static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+               unsigned int users)
+{
+       if (!users)
+               return;
+
+       sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
+                       users);
+       sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
+                       users);
+}
+
 /*
  * If a previously inactive queue goes active, bump the active user count.
  * We need to do this before try to allocate driver tag, then even if fail
  */
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
+       unsigned int users;
+
        if (blk_mq_is_shared_tags(hctx->flags)) {
                struct request_queue *q = hctx->queue;
 
-               if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
-                   !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
-                       atomic_inc(&hctx->tags->active_queues);
+               if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
+                   test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
+                       return true;
+               }
        } else {
-               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
-                   !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-                       atomic_inc(&hctx->tags->active_queues);
+               if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
+                   test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
+                       return true;
+               }
        }
 
+       users = atomic_inc_return(&hctx->tags->active_queues);
+
+       blk_mq_update_wake_batch(hctx->tags, users);
+
        return true;
 }
 
@@ -56,6 +79,7 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
        struct blk_mq_tags *tags = hctx->tags;
+       unsigned int users;
 
        if (blk_mq_is_shared_tags(hctx->flags)) {
                struct request_queue *q = hctx->queue;
@@ -68,7 +92,9 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
                        return;
        }
 
-       atomic_dec(&tags->active_queues);
+       users = atomic_dec_return(&tags->active_queues);
+
+       blk_mq_update_wake_batch(tags, users);
 
        blk_mq_tag_wakeup_all(tags, false);
 }
@@ -81,7 +107,7 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
                return BLK_MQ_NO_TAG;
 
        if (data->shallow_depth)
-               return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
+               return sbitmap_queue_get_shallow(bt, data->shallow_depth);
        else
                return __sbitmap_queue_get(bt);
 }
@@ -472,7 +498,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
                void *priv)
 {
        /*
-        * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
+        * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
         * while the queue is frozen. So we can use q_usage_counter to avoid
         * racing with it.
         */
@@ -489,7 +515,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
                bt_for_each(NULL, q, btags, fn, priv, false);
        } else {
                struct blk_mq_hw_ctx *hctx;
-               int i;
+               unsigned long i;
 
                queue_for_each_hw_ctx(q, hctx, i) {
                        struct blk_mq_tags *tags = hctx->tags;