blk-flush: reuse rq queuelist in flush state machine
[linux-2.6-microblaze.git] / block / blk-flush.c
index fedb390..e73dc22 100644 (file)
@@ -183,14 +183,13 @@ static void blk_flush_complete_seq(struct request *rq,
                /* queue for flush */
                if (list_empty(pending))
                        fq->flush_pending_since = jiffies;
-               list_move_tail(&rq->flush.list, pending);
+               list_move_tail(&rq->queuelist, pending);
                break;
 
        case REQ_FSEQ_DATA:
-               list_del_init(&rq->flush.list);
                fq->flush_data_in_flight++;
                spin_lock(&q->requeue_lock);
-               list_add(&rq->queuelist, &q->requeue_list);
+               list_move(&rq->queuelist, &q->requeue_list);
                spin_unlock(&q->requeue_lock);
                blk_mq_kick_requeue_list(q);
                break;
@@ -202,7 +201,7 @@ static void blk_flush_complete_seq(struct request *rq,
                 * flush data request completion path.  Restore @rq for
                 * normal completion and end it.
                 */
-               list_del_init(&rq->flush.list);
+               list_del_init(&rq->queuelist);
                blk_flush_restore_request(rq);
                blk_mq_end_request(rq, error);
                break;
@@ -258,7 +257,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
        fq->flush_running_idx ^= 1;
 
        /* and push the waiting requests to the next stage */
-       list_for_each_entry_safe(rq, n, running, flush.list) {
+       list_for_each_entry_safe(rq, n, running, queuelist) {
                unsigned int seq = blk_flush_cur_seq(rq);
 
                BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
@@ -292,7 +291,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 {
        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
        struct request *first_rq =
-               list_first_entry(pending, struct request, flush.list);
+               list_first_entry(pending, struct request, queuelist);
        struct request *flush_rq = fq->flush_rq;
 
        /* C1 described at the top of this file */
@@ -376,6 +375,11 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
         */
        spin_lock_irqsave(&fq->mq_flush_lock, flags);
        fq->flush_data_in_flight--;
+       /*
+        * May have been corrupted by rq->rq_next reuse, we need to
+        * re-initialize rq->queuelist before reusing it here.
+        */
+       INIT_LIST_HEAD(&rq->queuelist);
        blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 
@@ -386,7 +390,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
 static void blk_rq_init_flush(struct request *rq)
 {
        rq->flush.seq = 0;
-       INIT_LIST_HEAD(&rq->flush.list);
        rq->rq_flags |= RQF_FLUSH_SEQ;
        rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
        rq->end_io = mq_flush_data_end_io;