dt-bindings: dma: fsl-edma: Convert to DT schema
[linux-2.6-microblaze.git] / block / blk-mq.c
index 84d7495..e9bf950 100644 (file)
@@ -133,7 +133,8 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv,
 {
        struct mq_inflight *mi = priv;
 
-       if ((!mi->part->bd_partno || rq->part == mi->part) &&
+       if (rq->part && blk_do_io_stat(rq) &&
+           (!mi->part->bd_partno || rq->part == mi->part) &&
            blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
                mi->inflight[rq_data_dir(rq)]++;
 
@@ -1083,7 +1084,7 @@ bool blk_mq_complete_request_remote(struct request *rq)
        WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
 
        /*
-        * For a polled request, always complete locallly, it's pointless
+        * For a polled request, always complete locally, it's pointless
         * to redirect the completion.
         */
        if (rq->cmd_flags & REQ_POLLED)
@@ -1151,29 +1152,44 @@ void blk_mq_start_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
+/*
+ * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
+ * queues. This is important for md arrays to benefit from merging
+ * requests.
  */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
+static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
+{
+       if (plug->multiple_queues)
+               return BLK_MAX_REQUEST_COUNT * 2;
+       return BLK_MAX_REQUEST_COUNT;
+}
+
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
 {
-       struct completion *waiting = rq->end_io_data;
+       struct request *last = rq_list_peek(&plug->mq_list);
 
-       rq->end_io_data = (void *)(uintptr_t)error;
+       if (!plug->rq_count) {
+               trace_block_plug(rq->q);
+       } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
+                  (!blk_queue_nomerges(rq->q) &&
+                   blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+               blk_mq_flush_plug_list(plug, false);
+               trace_block_plug(rq->q);
+       }
 
-       /*
-        * complete last, if this is a stack request the process (and thus
-        * the rq pointer) could be invalid right after this complete()
-        */
-       complete(waiting);
+       if (!plug->multiple_queues && last && last->q != rq->q)
+               plug->multiple_queues = true;
+       if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
+               plug->has_elevator = true;
+       rq->rq_next = NULL;
+       rq_list_add(&plug->mq_list, rq);
+       plug->rq_count++;
 }
 
 /**
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:                request to insert
  * @at_head:    insert request at head or tail of queue
- * @done:      I/O completion handler
  *
  * Description:
  *    Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1182,23 +1198,32 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
  * Note:
  *    This function will invoke @done directly if the queue is dead.
  */
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
 {
        WARN_ON(irqs_disabled());
        WARN_ON(!blk_rq_is_passthrough(rq));
 
-       rq->end_io = done;
-
        blk_account_io_start(rq);
-
-       /*
-        * don't check dying flag for MQ because the request won't
-        * be reused after dying flag is set
-        */
-       blk_mq_sched_insert_request(rq, at_head, true, false);
+       if (current->plug)
+               blk_add_rq_to_plug(current->plug, rq);
+       else
+               blk_mq_sched_insert_request(rq, at_head, true, false);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
+struct blk_rq_wait {
+       struct completion done;
+       blk_status_t ret;
+};
+
+static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+{
+       struct blk_rq_wait *wait = rq->end_io_data;
+
+       wait->ret = ret;
+       complete(&wait->done);
+}
+
 static bool blk_rq_is_poll(struct request *rq)
 {
        if (!rq->mq_hctx)
@@ -1230,25 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
  */
 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
-       unsigned long hang_check;
+       struct blk_rq_wait wait = {
+               .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+       };
+
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
 
        rq->end_io_data = &wait;
-       blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
+       rq->end_io = blk_end_sync_rq;
 
-       /* Prevent hang_check timer from firing at us during very long I/O */
-       hang_check = sysctl_hung_task_timeout_secs;
+       blk_account_io_start(rq);
+       blk_mq_sched_insert_request(rq, at_head, true, false);
 
-       if (blk_rq_is_poll(rq))
-               blk_rq_poll_completion(rq, &wait);
-       else if (hang_check)
-               while (!wait_for_completion_io_timeout(&wait,
-                               hang_check * (HZ/2)))
-                       ;
-       else
-               wait_for_completion_io(&wait);
+       if (blk_rq_is_poll(rq)) {
+               blk_rq_poll_completion(rq, &wait.done);
+       } else {
+               /*
+                * Prevent hang_check timer from firing at us during very long
+                * I/O
+                */
+               unsigned long hang_check = sysctl_hung_task_timeout_secs;
+
+               if (hang_check)
+                       while (!wait_for_completion_io_timeout(&wait.done,
+                                       hang_check * (HZ/2)))
+                               ;
+               else
+                       wait_for_completion_io(&wait.done);
+       }
 
-       return (blk_status_t)(uintptr_t)rq->end_io_data;
+       return wait.ret;
 }
 EXPORT_SYMBOL(blk_execute_rq);
 
@@ -2123,8 +2160,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
  */
 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
 {
-       struct blk_mq_hw_ctx *hctx;
-
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
        /*
         * If the IO scheduler does not respect hardware queues when
         * dispatching, we just don't bother with multiple HW queues and
@@ -2132,8 +2168,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
         * just causes lock contention inside the scheduler and pointless cache
         * bouncing.
         */
-       hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
-                                    raw_smp_processor_id());
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
        if (!blk_mq_hctx_stopped(hctx))
                return hctx;
        return NULL;
@@ -2676,40 +2712,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
-/*
- * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
- * queues. This is important for md arrays to benefit from merging
- * requests.
- */
-static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
-{
-       if (plug->multiple_queues)
-               return BLK_MAX_REQUEST_COUNT * 2;
-       return BLK_MAX_REQUEST_COUNT;
-}
-
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
-       struct request *last = rq_list_peek(&plug->mq_list);
-
-       if (!plug->rq_count) {
-               trace_block_plug(rq->q);
-       } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
-                  (!blk_queue_nomerges(rq->q) &&
-                   blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
-               blk_mq_flush_plug_list(plug, false);
-               trace_block_plug(rq->q);
-       }
-
-       if (!plug->multiple_queues && last && last->q != rq->q)
-               plug->multiple_queues = true;
-       if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
-               plug->has_elevator = true;
-       rq->rq_next = NULL;
-       rq_list_add(&plug->mq_list, rq);
-       plug->rq_count++;
-}
-
 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
                                     struct bio *bio, unsigned int nr_segs)
 {