ALSA: memalloc: Fix missing return value comments for kernel docs
[linux-2.6-microblaze.git] / block / blk-mq.c
index ae116b7..e9bf950 100644 (file)
@@ -133,7 +133,8 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv,
 {
        struct mq_inflight *mi = priv;
 
-       if ((!mi->part->bd_partno || rq->part == mi->part) &&
+       if (rq->part && blk_do_io_stat(rq) &&
+           (!mi->part->bd_partno || rq->part == mi->part) &&
            blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
                mi->inflight[rq_data_dir(rq)]++;
 
@@ -1151,24 +1152,6 @@ void blk_mq_start_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
- */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       rq->end_io_data = (void *)(uintptr_t)error;
-
-       /*
-        * complete last, if this is a stack request the process (and thus
-        * the rq pointer) could be invalid right after this complete()
-        */
-       complete(waiting);
-}
-
 /*
  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  * queues. This is important for md arrays to benefit from merging
@@ -1203,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
        plug->rq_count++;
 }
 
-static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
-               rq_end_io_fn *done, bool use_plug)
-{
-       WARN_ON(irqs_disabled());
-       WARN_ON(!blk_rq_is_passthrough(rq));
-
-       rq->end_io = done;
-
-       blk_account_io_start(rq);
-
-       if (use_plug && current->plug) {
-               blk_add_rq_to_plug(current->plug, rq);
-               return;
-       }
-       /*
-        * don't check dying flag for MQ because the request won't
-        * be reused after dying flag is set
-        */
-       blk_mq_sched_insert_request(rq, at_head, true, false);
-}
-
-
 /**
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:                request to insert
  * @at_head:    insert request at head or tail of queue
- * @done:      I/O completion handler
  *
  * Description:
  *    Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1238,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
  * Note:
  *    This function will invoke @done directly if the queue is dead.
  */
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
 {
-       __blk_execute_rq_nowait(rq, at_head, done, true);
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
 
+       blk_account_io_start(rq);
+       if (current->plug)
+               blk_add_rq_to_plug(current->plug, rq);
+       else
+               blk_mq_sched_insert_request(rq, at_head, true, false);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
+struct blk_rq_wait {
+       struct completion done;
+       blk_status_t ret;
+};
+
+static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+{
+       struct blk_rq_wait *wait = rq->end_io_data;
+
+       wait->ret = ret;
+       complete(&wait->done);
+}
+
 static bool blk_rq_is_poll(struct request *rq)
 {
        if (!rq->mq_hctx)
@@ -1276,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
  */
 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
-       unsigned long hang_check;
+       struct blk_rq_wait wait = {
+               .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+       };
+
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
 
-       /*
-        * iopoll requires request to be submitted to driver, so can't
-        * use plug
-        */
        rq->end_io_data = &wait;
-       __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
-                       !blk_rq_is_poll(rq));
-
-       /* Prevent hang_check timer from firing at us during very long I/O */
-       hang_check = sysctl_hung_task_timeout_secs;
-
-       if (blk_rq_is_poll(rq))
-               blk_rq_poll_completion(rq, &wait);
-       else if (hang_check)
-               while (!wait_for_completion_io_timeout(&wait,
-                               hang_check * (HZ/2)))
-                       ;
-       else
-               wait_for_completion_io(&wait);
+       rq->end_io = blk_end_sync_rq;
+
+       blk_account_io_start(rq);
+       blk_mq_sched_insert_request(rq, at_head, true, false);
+
+       if (blk_rq_is_poll(rq)) {
+               blk_rq_poll_completion(rq, &wait.done);
+       } else {
+               /*
+                * Prevent hang_check timer from firing at us during very long
+                * I/O
+                */
+               unsigned long hang_check = sysctl_hung_task_timeout_secs;
+
+               if (hang_check)
+                       while (!wait_for_completion_io_timeout(&wait.done,
+                                       hang_check * (HZ/2)))
+                               ;
+               else
+                       wait_for_completion_io(&wait.done);
+       }
 
-       return (blk_status_t)(uintptr_t)rq->end_io_data;
+       return wait.ret;
 }
 EXPORT_SYMBOL(blk_execute_rq);
 
@@ -2174,8 +2160,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
  */
 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
 {
-       struct blk_mq_hw_ctx *hctx;
-
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
        /*
         * If the IO scheduler does not respect hardware queues when
         * dispatching, we just don't bother with multiple HW queues and
@@ -2183,8 +2168,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
         * just causes lock contention inside the scheduler and pointless cache
         * bouncing.
         */
-       hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
-                                    raw_smp_processor_id());
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
        if (!blk_mq_hctx_stopped(hctx))
                return hctx;
        return NULL;