Merge tag 'char-misc-5.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux-2.6-microblaze.git] / block / blk-mq.c
index e9bf950..93d9d60 100644 (file)
@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        if (!blk_mq_hw_queue_mapped(data.hctx))
                goto out_queue_exit;
        cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               goto out_queue_exit;
        data.ctx = __blk_mq_get_ctx(q, cpu);
 
        if (!q->elevator)
@@ -2140,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 }
 EXPORT_SYMBOL(blk_mq_run_hw_queue);
 
-/*
- * Is the request queue handled by an IO scheduler that does not respect
- * hardware queues when dispatching?
- */
-static bool blk_mq_has_sqsched(struct request_queue *q)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e && e->type->ops.dispatch_request &&
-           !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
-               return true;
-       return false;
-}
-
 /*
  * Return prefered queue to dispatch from (if any) for non-mq aware IO
  * scheduler.
@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -2777,15 +2765,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                return NULL;
        }
 
-       rq_qos_throttle(q, *bio);
-
        if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
                return NULL;
        if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
                return NULL;
 
-       rq->cmd_flags = (*bio)->bi_opf;
+       /*
+        * If any qos ->throttle() end up blocking, we will have flushed the
+        * plug and hence killed the cached_rq list as well. Pop this entry
+        * before we throttle.
+        */
        plug->cached_rq = rq_list_next(rq);
+       rq_qos_throttle(q, *bio);
+
+       rq->cmd_flags = (*bio)->bi_opf;
        INIT_LIST_HEAD(&rq->queuelist);
        return rq;
 }
@@ -3443,8 +3436,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
        if (blk_mq_hw_queue_mapped(hctx))
                blk_mq_tag_idle(hctx);
 
-       blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
-                       set->queue_depth, flush_rq);
+       if (blk_queue_init_done(q))
+               blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
+                               set->queue_depth, flush_rq);
        if (set->ops->exit_request)
                set->ops->exit_request(set, flush_rq, hctx_idx);
 
@@ -4438,12 +4432,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        if (!qe)
                return false;
 
+       /* q->elevator needs protection from ->sysfs_lock */
+       mutex_lock(&q->sysfs_lock);
+
        INIT_LIST_HEAD(&qe->node);
        qe->q = q;
        qe->type = q->elevator->type;
        list_add(&qe->node, head);
 
-       mutex_lock(&q->sysfs_lock);
        /*
         * After elevator_switch_mq, the previous elevator_queue will be
         * released by elevator_release. The reference of the io scheduler