blk-mq: don't handle non-flush requests in blk_insert_flush
authorChristoph Hellwig <hch@lst.de>
Tue, 19 Oct 2021 12:25:53 +0000 (14:25 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 17:10:09 +0000 (11:10 -0600)
Return to the normal blk_mq_submit_bio flow if the bio did not end up
actually being a flush because the device didn't support it.  Note that
this is basically impossible to hit without special instrumentation given
that submit_bio_checks already clears these flags usually, so we'd need a
tight race to actually hit this code path.

With this the call to blk_mq_run_hw_queue for the flush requests can be
removed given that the actual flush requests are always issued via the
requeue workqueue which runs the queue unconditionally.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq.c
block/blk.h

index 4201728..8e364bd 100644 (file)
@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
  * @rq is being submitted.  Analyze what needs to be done and put it on the
  * right queue.
  */
-void blk_insert_flush(struct request *rq)
+bool blk_insert_flush(struct request *rq)
 {
        struct request_queue *q = rq->q;
        unsigned long fflags = q->queue_flags;  /* may change, cache */
@@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq)
         */
        if (!policy) {
                blk_mq_end_request(rq, 0);
-               return;
+               return true;
        }
 
        BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
@@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq)
         * for normal execution.
         */
        if ((policy & REQ_FSEQ_DATA) &&
-           !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
-               blk_mq_request_bypass_insert(rq, false, false);
-               return;
-       }
+           !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
+               return false;
 
        /*
         * @rq should go through flush machinery.  Mark it part of flush
@@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq)
        spin_lock_irq(&fq->mq_flush_lock);
        blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
        spin_unlock_irq(&fq->mq_flush_lock);
+
+       return true;
 }
 
 /**
index 71ab752..3481a87 100644 (file)
@@ -2532,14 +2532,12 @@ void blk_mq_submit_bio(struct bio *bio)
                return;
        }
 
-       if (unlikely(is_flush_fua)) {
-               struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-               /* Bypass scheduler for flush requests */
-               blk_insert_flush(rq);
-               blk_mq_run_hw_queue(hctx, true);
-       } else if (plug && (q->nr_hw_queues == 1 ||
-                  blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
-                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
+       if (is_flush_fua && blk_insert_flush(rq))
+               return;
+
+       if (plug && (q->nr_hw_queues == 1 ||
+           blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
+           q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
                /*
                 * Use plugging if we have a ->commit_rqs() hook as well, as
                 * we know the driver uses bd->last in a smart fashion.
index b9729c1..6a039e6 100644 (file)
@@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now);
  */
 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 
-void blk_insert_flush(struct request *rq);
+bool blk_insert_flush(struct request *rq);
 
 int elevator_switch_mq(struct request_queue *q,
                              struct elevator_type *new_e);