Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-microblaze.git] / block / kyber-iosched.c
index c25c41d..33d34d6 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/module.h>
 #include <linux/sbitmap.h>
 
+#include <trace/events/block.h>
+
 #include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-debugfs.h"
@@ -353,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
        }
 }
 
-static unsigned int kyber_sched_tags_shift(struct request_queue *q)
-{
-       /*
-        * All of the hardware queues have the same depth, so we can just grab
-        * the shift of the first one.
-        */
-       return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
-}
-
 static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
 {
        struct kyber_queue_data *kqd;
-       unsigned int shift;
        int ret = -ENOMEM;
        int i;
 
@@ -400,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
                kqd->latency_targets[i] = kyber_latency_targets[i];
        }
 
-       shift = kyber_sched_tags_shift(q);
-       kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
-
        return kqd;
 
 err_buckets:
@@ -458,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
                INIT_LIST_HEAD(&kcq->rq_list[i]);
 }
 
-static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
        struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+       struct blk_mq_tags *tags = hctx->sched_tags;
+       unsigned int shift = tags->bitmap_tags->sb.shift;
+
+       kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+
+       sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
+}
+
+static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
        struct kyber_hctx_data *khd;
        int i;
 
@@ -502,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
        khd->batching = 0;
 
        hctx->sched_data = khd;
-       sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
-                                       kqd->async_depth);
+       kyber_depth_updated(hctx);
 
        return 0;
 
@@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
                        list_move_tail(&rq->queuelist, head);
                sbitmap_set_bit(&khd->kcq_map[sched_domain],
                                rq->mq_ctx->index_hw[hctx->type]);
-               blk_mq_sched_request_inserted(rq);
+               trace_block_rq_insert(rq);
                spin_unlock(&kcq->lock);
        }
 }
@@ -1022,6 +1020,7 @@ static struct elevator_type kyber_sched = {
                .completed_request = kyber_completed_request,
                .dispatch_request = kyber_dispatch_request,
                .has_work = kyber_has_work,
+               .depth_updated = kyber_depth_updated,
        },
 #ifdef CONFIG_BLK_DEBUG_FS
        .queue_debugfs_attrs = kyber_queue_debugfs_attrs,