Merge tag 'for-5.19/block-2022-06-02' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / block / blk-mq.c
index 30f4565..30e4bdc 100644 (file)
@@ -1132,14 +1132,7 @@ void blk_mq_start_request(struct request *rq)
        trace_block_rq_issue(rq);
 
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
-               u64 start_time;
-#ifdef CONFIG_BLK_CGROUP
-               if (rq->bio)
-                       start_time = bio_issue_time(&rq->bio->bi_issue);
-               else
-#endif
-                       start_time = ktime_get_ns();
-               rq->io_start_time_ns = start_time;
+               rq->io_start_time_ns = ktime_get_ns();
                rq->stats_sectors = blk_rq_sectors(rq);
                rq->rq_flags |= RQF_STATS;
                rq_qos_issue(q, rq);
@@ -1177,6 +1170,62 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
        complete(waiting);
 }
 
+/*
+ * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
+ * queues. This is important for md arrays to benefit from merging
+ * requests.
+ */
+static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
+{
+       if (plug->multiple_queues)
+               return BLK_MAX_REQUEST_COUNT * 2;
+       return BLK_MAX_REQUEST_COUNT;
+}
+
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+       struct request *last = rq_list_peek(&plug->mq_list);
+
+       if (!plug->rq_count) {
+               trace_block_plug(rq->q);
+       } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
+                  (!blk_queue_nomerges(rq->q) &&
+                   blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+               blk_mq_flush_plug_list(plug, false);
+               trace_block_plug(rq->q);
+       }
+
+       if (!plug->multiple_queues && last && last->q != rq->q)
+               plug->multiple_queues = true;
+       if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
+               plug->has_elevator = true;
+       rq->rq_next = NULL;
+       rq_list_add(&plug->mq_list, rq);
+       plug->rq_count++;
+}
+
+static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
+               rq_end_io_fn *done, bool use_plug)
+{
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
+
+       rq->end_io = done;
+
+       blk_account_io_start(rq);
+
+       if (use_plug && current->plug) {
+               blk_add_rq_to_plug(current->plug, rq);
+               return;
+       }
+       /*
+        * don't check dying flag for MQ because the request won't
+        * be reused after dying flag is set
+        */
+       blk_mq_sched_insert_request(rq, at_head, true, false);
+}
+
+
 /**
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:                request to insert
@@ -1192,18 +1241,8 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
  */
 void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
 {
-       WARN_ON(irqs_disabled());
-       WARN_ON(!blk_rq_is_passthrough(rq));
-
-       rq->end_io = done;
+       __blk_execute_rq_nowait(rq, at_head, done, true);
 
-       blk_account_io_start(rq);
-
-       /*
-        * don't check dying flag for MQ because the request won't
-        * be reused after dying flag is set
-        */
-       blk_mq_sched_insert_request(rq, at_head, true, false);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
@@ -1241,8 +1280,13 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
        DECLARE_COMPLETION_ONSTACK(wait);
        unsigned long hang_check;
 
+       /*
+        * iopoll requires request to be submitted to driver, so can't
+        * use plug
+        */
        rq->end_io_data = &wait;
-       blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
+       __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
+                       !blk_rq_is_poll(rq));
 
        /* Prevent hang_check timer from firing at us during very long I/O */
        hang_check = sysctl_hung_task_timeout_secs;
@@ -2683,40 +2727,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
-/*
- * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
- * queues. This is important for md arrays to benefit from merging
- * requests.
- */
-static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
-{
-       if (plug->multiple_queues)
-               return BLK_MAX_REQUEST_COUNT * 2;
-       return BLK_MAX_REQUEST_COUNT;
-}
-
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
-       struct request *last = rq_list_peek(&plug->mq_list);
-
-       if (!plug->rq_count) {
-               trace_block_plug(rq->q);
-       } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
-                  (!blk_queue_nomerges(rq->q) &&
-                   blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
-               blk_mq_flush_plug_list(plug, false);
-               trace_block_plug(rq->q);
-       }
-
-       if (!plug->multiple_queues && last && last->q != rq->q)
-               plug->multiple_queues = true;
-       if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
-               plug->has_elevator = true;
-       rq->rq_next = NULL;
-       rq_list_add(&plug->mq_list, rq);
-       plug->rq_count++;
-}
-
 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
                                     struct bio *bio, unsigned int nr_segs)
 {