x86/unwind/orc: Fix unwind_get_return_address_ptr() for inactive tasks
[linux-2.6-microblaze.git] / block / blk-mq.c
index d92088d..a7785df 100644 (file)
@@ -1178,6 +1178,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
 
 #define BLK_MQ_RESOURCE_DELAY  3               /* ms units */
 
+static void blk_mq_handle_dev_resource(struct request *rq,
+                                      struct list_head *list)
+{
+       struct request *next =
+               list_first_entry_or_null(list, struct request, queuelist);
+
+       /*
+        * If an I/O scheduler has been configured and we got a driver tag for
+        * the next request already, free it.
+        */
+       if (next)
+               blk_mq_put_driver_tag(next);
+
+       list_add(&rq->queuelist, list);
+       __blk_mq_requeue_request(rq);
+}
+
 /*
  * Returns true if we did some work AND can potentially do more.
  */
@@ -1205,8 +1222,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                rq = list_first_entry(list, struct request, queuelist);
 
                hctx = rq->mq_hctx;
-               if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
+               if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+                       blk_mq_put_driver_tag(rq);
                        break;
+               }
 
                if (!blk_mq_get_driver_tag(rq)) {
                        /*
@@ -1245,17 +1264,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
                if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
-                       /*
-                        * If an I/O scheduler has been configured and we got a
-                        * driver tag for the next request already, free it
-                        * again.
-                        */
-                       if (!list_empty(list)) {
-                               nxt = list_first_entry(list, struct request, queuelist);
-                               blk_mq_put_driver_tag(nxt);
-                       }
-                       list_add(&rq->queuelist, list);
-                       __blk_mq_requeue_request(rq);
+                       blk_mq_handle_dev_resource(rq, list);
                        break;
                }
 
@@ -1282,7 +1291,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                 * the driver there was more coming, but that turned out to
                 * be a lie.
                 */
-               if (q->mq_ops->commit_rqs)
+               if (q->mq_ops->commit_rqs && queued)
                        q->mq_ops->commit_rqs(hctx);
 
                spin_lock(&hctx->lock);
@@ -1904,6 +1913,8 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
+       int queued = 0;
+
        while (!list_empty(list)) {
                blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
@@ -1919,7 +1930,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                break;
                        }
                        blk_mq_end_request(rq, ret);
-               }
+               } else
+                       queued++;
        }
 
        /*
@@ -1927,7 +1939,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
         * the driver there was more coming, but that turned out to
         * be a lie.
         */
-       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
+       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
@@ -2409,8 +2421,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
        init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
        INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
 
-       hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
-                       gfp);
+       hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
        if (!hctx->fq)
                goto free_bitmap;
 
@@ -2718,13 +2729,15 @@ void blk_mq_release(struct request_queue *q)
        blk_mq_sysfs_deinit(q);
 }
 
-struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
+               void *queuedata)
 {
        struct request_queue *uninit_q, *q;
 
-       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+       uninit_q = __blk_alloc_queue(set->numa_node);
        if (!uninit_q)
                return ERR_PTR(-ENOMEM);
+       uninit_q->queuedata = queuedata;
 
        /*
         * Initialize the queue without an elevator. device_add_disk() will do
@@ -2736,6 +2749,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        return q;
 }
+EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+{
+       return blk_mq_init_queue_data(set, NULL);
+}
 EXPORT_SYMBOL(blk_mq_init_queue);
 
 /*
@@ -2824,7 +2843,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                        memcpy(new_hctxs, hctxs, q->nr_hw_queues *
                               sizeof(*hctxs));
                q->queue_hw_ctx = new_hctxs;
-               q->nr_hw_queues = set->nr_hw_queues;
                kfree(hctxs);
                hctxs = new_hctxs;
        }
@@ -2926,11 +2944,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        INIT_LIST_HEAD(&q->requeue_list);
        spin_lock_init(&q->requeue_lock);
 
-       blk_queue_make_request(q, blk_mq_make_request);
-
-       /*
-        * Do this after blk_queue_make_request() overrides it...
-        */
+       q->make_request_fn = blk_mq_make_request;
        q->nr_requests = set->queue_depth;
 
        /*
@@ -3023,6 +3037,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
 
 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
 {
+       /*
+        * blk_mq_map_queues() and multiple .map_queues() implementations
+        * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
+        * number of hardware queues.
+        */
+       if (set->nr_maps == 1)
+               set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
+
        if (set->ops->map_queues && !is_kdump_kernel()) {
                int i;