dm: cleanup open_table_device
authorChristoph Hellwig <hch@lst.de>
Tue, 15 Nov 2022 14:10:47 +0000 (22:10 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 16 Nov 2022 22:19:56 +0000 (15:19 -0700)
Move all the logic for allocation the table_device and linking it into
the list into the open_table_device.  This keeps the code tidy and
ensures that the table_devices only exist in fully initialized state.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Mike Snitzer <snitzer@kernel.org>
Link: https://lore.kernel.org/r/20221115141054.1051801-4-yukuai1@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/dm.c

index 19d25bf..28d7581 100644 (file)
@@ -732,28 +732,41 @@ static char *_dm_claim_ptr = "I belong to device-mapper";
 /*
  * Open a table device so we can use it as a map destination.
  */
-static int open_table_device(struct table_device *td, dev_t dev,
-                            struct mapped_device *md)
+static struct table_device *open_table_device(struct mapped_device *md,
+               dev_t dev, fmode_t mode)
 {
+       struct table_device *td;
        struct block_device *bdev;
        u64 part_off;
        int r;
 
-       BUG_ON(td->dm_dev.bdev);
+       td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
+       if (!td)
+               return ERR_PTR(-ENOMEM);
+       refcount_set(&td->count, 1);
 
-       bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
-       if (IS_ERR(bdev))
-               return PTR_ERR(bdev);
+       bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr);
+       if (IS_ERR(bdev)) {
+               r = PTR_ERR(bdev);
+               goto out_free_td;
+       }
 
        r = bd_link_disk_holder(bdev, dm_disk(md));
-       if (r) {
-               blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
-               return r;
-       }
+       if (r)
+               goto out_blkdev_put;
 
+       td->dm_dev.mode = mode;
        td->dm_dev.bdev = bdev;
        td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
-       return 0;
+       format_dev_t(td->dm_dev.name, dev);
+       list_add(&td->list, &md->table_devices);
+       return td;
+
+out_blkdev_put:
+       blkdev_put(bdev, mode | FMODE_EXCL);
+out_free_td:
+       kfree(td);
+       return ERR_PTR(r);
 }
 
 /*
@@ -786,31 +799,16 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
                        struct dm_dev **result)
 {
-       int r;
        struct table_device *td;
 
        mutex_lock(&md->table_devices_lock);
        td = find_table_device(&md->table_devices, dev, mode);
        if (!td) {
-               td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
-               if (!td) {
-                       mutex_unlock(&md->table_devices_lock);
-                       return -ENOMEM;
-               }
-
-               td->dm_dev.mode = mode;
-               td->dm_dev.bdev = NULL;
-
-               if ((r = open_table_device(td, dev, md))) {
+               td = open_table_device(md, dev, mode);
+               if (IS_ERR(td)) {
                        mutex_unlock(&md->table_devices_lock);
-                       kfree(td);
-                       return r;
+                       return PTR_ERR(td);
                }
-
-               format_dev_t(td->dm_dev.name, dev);
-
-               refcount_set(&td->count, 1);
-               list_add(&td->list, &md->table_devices);
        } else {
                refcount_inc(&td->count);
        }