Merge tag 'perf-tools-for-v6.2-1-2022-12-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / block / blk-core.c
index 5487912..3866b6c 100644 (file)
@@ -59,13 +59,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
 
-DEFINE_IDA(blk_queue_ida);
+static DEFINE_IDA(blk_queue_ida);
 
 /*
  * For queue allocation
  */
-struct kmem_cache *blk_requestq_cachep;
-struct kmem_cache *blk_requestq_srcu_cachep;
+static struct kmem_cache *blk_requestq_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -253,19 +252,44 @@ void blk_clear_pm_only(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
 
+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+{
+       kmem_cache_free(blk_requestq_cachep,
+                       container_of(rcu_head, struct request_queue, rcu_head));
+}
+
+static void blk_free_queue(struct request_queue *q)
+{
+       percpu_ref_exit(&q->q_usage_counter);
+
+       if (q->poll_stat)
+               blk_stat_remove_callback(q, q->poll_cb);
+       blk_stat_free_callback(q->poll_cb);
+
+       blk_free_queue_stats(q->stats);
+       kfree(q->poll_stat);
+
+       if (queue_is_mq(q))
+               blk_mq_release(q);
+
+       ida_free(&blk_queue_ida, q->id);
+       call_rcu(&q->rcu_head, blk_free_queue_rcu);
+}
+
 /**
  * blk_put_queue - decrement the request_queue refcount
  * @q: the request_queue structure to decrement the refcount for
  *
- * Decrements the refcount of the request_queue kobject. When this reaches 0
- * we'll have blk_release_queue() called.
+ * Decrements the refcount of the request_queue and free it when the refcount
+ * reaches 0.
  *
- * Context: Any context, but the last reference must not be dropped from
- *          atomic context.
+ * Context: Can sleep.
  */
 void blk_put_queue(struct request_queue *q)
 {
-       kobject_put(&q->kobj);
+       might_sleep();
+       if (refcount_dec_and_test(&q->refs))
+               blk_free_queue(q);
 }
 EXPORT_SYMBOL(blk_put_queue);
 
@@ -373,26 +397,20 @@ static void blk_timeout_work(struct work_struct *work)
 {
 }
 
-struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
+struct request_queue *blk_alloc_queue(int node_id)
 {
        struct request_queue *q;
 
-       q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
-                       GFP_KERNEL | __GFP_ZERO, node_id);
+       q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
+                                 node_id);
        if (!q)
                return NULL;
 
-       if (alloc_srcu) {
-               blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
-               if (init_srcu_struct(q->srcu) != 0)
-                       goto fail_q;
-       }
-
        q->last_merge = NULL;
 
        q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
        if (q->id < 0)
-               goto fail_srcu;
+               goto fail_q;
 
        q->stats = blk_alloc_queue_stats();
        if (!q->stats)
@@ -406,8 +424,7 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
        INIT_WORK(&q->timeout_work, blk_timeout_work);
        INIT_LIST_HEAD(&q->icq_list);
 
-       kobject_init(&q->kobj, &blk_queue_ktype);
-
+       refcount_set(&q->refs, 1);
        mutex_init(&q->debugfs_mutex);
        mutex_init(&q->sysfs_lock);
        mutex_init(&q->sysfs_dir_lock);
@@ -434,11 +451,8 @@ fail_stats:
        blk_free_queue_stats(q->stats);
 fail_id:
        ida_free(&blk_queue_ida, q->id);
-fail_srcu:
-       if (alloc_srcu)
-               cleanup_srcu_struct(q->srcu);
 fail_q:
-       kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
+       kmem_cache_free(blk_requestq_cachep, q);
        return NULL;
 }
 
@@ -454,7 +468,7 @@ bool blk_get_queue(struct request_queue *q)
 {
        if (unlikely(blk_queue_dying(q)))
                return false;
-       kobject_get(&q->kobj);
+       refcount_inc(&q->refs);
        return true;
 }
 EXPORT_SYMBOL(blk_get_queue);
@@ -944,18 +958,6 @@ unsigned long bdev_start_io_acct(struct block_device *bdev,
 }
 EXPORT_SYMBOL(bdev_start_io_acct);
 
-/**
- * bio_start_io_acct_time - start I/O accounting for bio based drivers
- * @bio:       bio to start account for
- * @start_time:        start time that should be passed back to bio_end_io_acct().
- */
-void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
-{
-       bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
-                          bio_op(bio), start_time);
-}
-EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
-
 /**
  * bio_start_io_acct - start I/O accounting for bio based drivers
  * @bio:       bio to start account for
@@ -1183,9 +1185,6 @@ int __init blk_dev_init(void)
                        sizeof_field(struct request, cmd_flags));
        BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
                        sizeof_field(struct bio, bi_opf));
-       BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
-                          __alignof__(struct request_queue)) !=
-                    sizeof(struct request_queue));
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
@@ -1196,10 +1195,6 @@ int __init blk_dev_init(void)
        blk_requestq_cachep = kmem_cache_create("request_queue",
                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
 
-       blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
-                       sizeof(struct request_queue) +
-                       sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
-
        blk_debugfs_root = debugfs_create_dir("block", NULL);
 
        return 0;