blk-stat: Optimise blk_stat_add()
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 7 Oct 2019 21:16:51 +0000 (00:16 +0300)
committerJens Axboe <axboe@kernel.dk>
Tue, 8 Oct 2019 03:19:10 +0000 (21:19 -0600)
blk_stat_add() calls {get,put}_cpu_ptr() in a loop, which entails
overhead of disabling/enabling preemption. The loop is under RCU
(i.e.short) anyway, so do get_cpu() in advance.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-stat.c

index 940f15d..7da302f 100644 (file)
@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now)
        struct request_queue *q = rq->q;
        struct blk_stat_callback *cb;
        struct blk_rq_stat *stat;
-       int bucket;
+       int bucket, cpu;
        u64 value;
 
        value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now)
        blk_throtl_stat_add(rq, value);
 
        rcu_read_lock();
+       cpu = get_cpu();
        list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
                if (!blk_stat_is_active(cb))
                        continue;
@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now)
                if (bucket < 0)
                        continue;
 
-               stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
+               stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
                blk_rq_stat_add(stat, value);
-               put_cpu_ptr(cb->cpu_stat);
        }
+       put_cpu();
        rcu_read_unlock();
 }