block: only allocate poll_stats if there's a user of them
authorJens Axboe <axboe@kernel.dk>
Sat, 13 Nov 2021 21:03:26 +0000 (14:03 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:38:35 +0000 (06:38 -0700)
This is essentially never used, yet it's about 1/3rd of the total
queue size. Allocate it when needed, and don't embed it in the queue.

Kill the queue flag for this while at it, since we can just check the
assigned pointer now.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-stat.c
block/blk-stat.h
block/blk-sysfs.c
include/linux/blkdev.h

index 4f2cf83..f4022b1 100644 (file)
@@ -122,7 +122,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(FUA),
        QUEUE_FLAG_NAME(DAX),
        QUEUE_FLAG_NAME(STATS),
-       QUEUE_FLAG_NAME(POLL_STATS),
        QUEUE_FLAG_NAME(REGISTERED),
        QUEUE_FLAG_NAME(QUIESCED),
        QUEUE_FLAG_NAME(PCI_P2PDMA),
index 3af88ff..7cd4084 100644 (file)
@@ -4581,11 +4581,10 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 /* Enable polling stats and return whether they were already enabled. */
 static bool blk_poll_stats_enable(struct request_queue *q)
 {
-       if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
-           blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
+       if (q->poll_stat)
                return true;
-       blk_stat_add_callback(q, q->poll_cb);
-       return false;
+
+       return blk_stats_alloc_enable(q);
 }
 
 static void blk_mq_poll_stats_start(struct request_queue *q)
@@ -4594,8 +4593,7 @@ static void blk_mq_poll_stats_start(struct request_queue *q)
         * We don't arm the callback if polling stats are not enabled or the
         * callback is already active.
         */
-       if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
-           blk_stat_is_active(q->poll_cb))
+       if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
                return;
 
        blk_stat_activate_msecs(q->poll_cb, 100);
index ae3dd1f..efb2a80 100644 (file)
@@ -219,3 +219,21 @@ void blk_free_queue_stats(struct blk_queue_stats *stats)
 
        kfree(stats);
 }
+
+bool blk_stats_alloc_enable(struct request_queue *q)
+{
+       struct blk_rq_stat *poll_stat;
+
+       poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
+                               GFP_ATOMIC);
+       if (!poll_stat)
+               return false;
+
+       if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
+               kfree(poll_stat);
+               return true;
+       }
+
+       blk_stat_add_callback(q, q->poll_cb);
+       return false;
+}
index 17b47a8..58f029a 100644 (file)
@@ -64,6 +64,7 @@ struct blk_stat_callback {
 
 struct blk_queue_stats *blk_alloc_queue_stats(void);
 void blk_free_queue_stats(struct blk_queue_stats *);
+bool blk_stats_alloc_enable(struct request_queue *q);
 
 void blk_stat_add(struct request *rq, u64 now);
 
index cd75b0f..c079be1 100644 (file)
@@ -785,11 +785,12 @@ static void blk_release_queue(struct kobject *kobj)
 
        might_sleep();
 
-       if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
+       if (q->poll_stat)
                blk_stat_remove_callback(q, q->poll_cb);
        blk_stat_free_callback(q->poll_cb);
 
        blk_free_queue_stats(q->stats);
+       kfree(q->poll_stat);
 
        blk_exit_queue(q);
 
index bd4370b..74118e6 100644 (file)
@@ -267,7 +267,7 @@ struct request_queue {
        int                     poll_nsec;
 
        struct blk_stat_callback        *poll_cb;
-       struct blk_rq_stat      poll_stat[BLK_MQ_POLL_STATS_BKTS];
+       struct blk_rq_stat      *poll_stat;
 
        struct timer_list       timeout;
        struct work_struct      timeout_work;
@@ -397,7 +397,6 @@ struct request_queue {
 #define QUEUE_FLAG_FUA         18      /* device supports FUA writes */
 #define QUEUE_FLAG_DAX         19      /* device supports DAX */
 #define QUEUE_FLAG_STATS       20      /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS  21      /* collecting stats for hybrid polling */
 #define QUEUE_FLAG_REGISTERED  22      /* queue has been registered to a disk */
 #define QUEUE_FLAG_QUIESCED    24      /* queue has been quiesced */
 #define QUEUE_FLAG_PCI_P2PDMA  25      /* device supports PCI p2p requests */