block: clean up blk_mq_submit_bio() merging
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 20 Oct 2021 19:00:49 +0000 (20:00 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 21 Oct 2021 14:27:17 +0000 (08:27 -0600)
Combine blk_mq_sched_bio_merge() and blk_attempt_plug_merge() under a
common if, so we don't check it twice.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/daedc90d4029a5d1d73344771632b1faca3aaf81.1634755800.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c

index e85b755..5b259fd 100644 (file)
@@ -361,7 +361,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
        }
 }
 
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
        struct elevator_queue *e = q->elevator;
index 9883610..25d1034 100644 (file)
@@ -12,7 +12,7 @@ void blk_mq_sched_assign_ioc(struct request *rq);
 
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs, struct request **merged_request);
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
                                   struct list_head *free);
@@ -42,16 +42,6 @@ static inline bool bio_mergeable(struct bio *bio)
        return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
 }
 
-static inline bool
-blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-               unsigned int nr_segs)
-{
-       if (blk_queue_nomerges(q) || !bio_mergeable(bio))
-               return false;
-
-       return __blk_mq_sched_bio_merge(q, bio, nr_segs);
-}
-
 static inline bool
 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
                         struct bio *bio)
index 101466e..d04ee72 100644 (file)
@@ -2481,7 +2481,6 @@ void blk_mq_submit_bio(struct bio *bio)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
        const int is_sync = op_is_sync(bio->bi_opf);
-       const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct request *rq;
        struct blk_plug *plug;
        bool same_queue_rq = false;
@@ -2495,12 +2494,12 @@ void blk_mq_submit_bio(struct bio *bio)
        if (!bio_integrity_prep(bio))
                goto queue_exit;
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
-               goto queue_exit;
-
-       if (blk_mq_sched_bio_merge(q, bio, nr_segs))
-               goto queue_exit;
+       if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
+               if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
+                       goto queue_exit;
+               if (blk_mq_sched_bio_merge(q, bio, nr_segs))
+                       goto queue_exit;
+       }
 
        rq_qos_throttle(q, bio);
 
@@ -2543,7 +2542,7 @@ void blk_mq_submit_bio(struct bio *bio)
                return;
        }
 
-       if (is_flush_fua && blk_insert_flush(rq))
+       if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
                return;
 
        if (plug && (q->nr_hw_queues == 1 ||