block: add a blk_mq_init_queue_data helper
[linux-2.6-microblaze.git] / block / bfq-iosched.c
index 4686b68..78ba57e 100644 (file)
@@ -613,6 +613,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
                bfqq->pos_root = NULL;
        }
 
+       /* oom_bfqq does not participate in queue merging */
+       if (bfqq == &bfqd->oom_bfqq)
+               return;
+
        /*
         * bfqq cannot be merged any longer (see comments in
         * bfq_setup_cooperator): no point in adding bfqq into the
@@ -1055,7 +1059,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
 
 static int bfqq_process_refs(struct bfq_queue *bfqq)
 {
-       return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+       return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
                (bfqq->weight_counter != NULL);
 }
 
@@ -2712,8 +2716,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
        }
 }
 
-
-static
 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        /*
@@ -3443,6 +3445,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
                                                 struct bfq_queue *bfqq)
 {
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        return (bfqq->wr_coeff > 1 &&
                (bfqd->wr_busy_queues <
                 bfq_tot_busy_queues(bfqd) ||
@@ -4076,6 +4082,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
                bfqq_sequential_and_IO_bound,
                idling_boosts_thr;
 
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
                bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
 
@@ -4169,6 +4179,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
        struct bfq_data *bfqd = bfqq->bfqd;
        bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
 
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        if (unlikely(bfqd->strict_guarantees))
                return true;
 
@@ -4809,9 +4823,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 {
        struct bfq_queue *item;
        struct hlist_node *n;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        struct bfq_group *bfqg = bfqq_group(bfqq);
-#endif
 
        if (bfqq->bfqd)
                bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
@@ -4884,9 +4896,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
                bfqq->bfqd->last_completed_rq_bfqq = NULL;
 
        kmem_cache_free(bfq_pool, bfqq);
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        bfqg_and_blkg_put(bfqg);
-#endif
 }
 
 static void bfq_put_cooperator(struct bfq_queue *bfqq)
@@ -5967,6 +5977,8 @@ static void bfq_finish_requeue_request(struct request *rq)
 }
 
 /*
+ * Removes the association between the current task and bfqq, assuming
+ * that bic points to the bfq iocontext of the task.
  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  * was the last process referring to that bfqq.
  */
@@ -6201,20 +6213,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
        return bfqq;
 }
 
-static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+static void
+bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
-       struct bfq_data *bfqd = bfqq->bfqd;
        enum bfqq_expiration reason;
        unsigned long flags;
 
        spin_lock_irqsave(&bfqd->lock, flags);
-       bfq_clear_bfqq_wait_request(bfqq);
 
+       /*
+        * Considering that bfqq may be in race, we should firstly check
+        * whether bfqq is in service before doing something on it. If
+        * the bfqq in race is not in service, it has already been expired
+        * through __bfq_bfqq_expire func and its wait_request flags has
+        * been cleared in __bfq_bfqd_reset_in_service func.
+        */
        if (bfqq != bfqd->in_service_queue) {
                spin_unlock_irqrestore(&bfqd->lock, flags);
                return;
        }
 
+       bfq_clear_bfqq_wait_request(bfqq);
+
        if (bfq_bfqq_budget_timeout(bfqq))
                /*
                 * Also here the queue can be safely expired
@@ -6259,7 +6279,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
         * early.
         */
        if (bfqq)
-               bfq_idle_slice_timer_body(bfqq);
+               bfq_idle_slice_timer_body(bfqd, bfqq);
 
        return HRTIMER_NORESTART;
 }
@@ -6374,10 +6394,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
 
        hrtimer_cancel(&bfqd->idle_slice_timer);
 
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        /* release oom-queue reference to root group */
        bfqg_and_blkg_put(bfqd->root_group);
 
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
        blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
 #else
        spin_lock_irq(&bfqd->lock);