md: add a mddev_is_dm helper
authorChristoph Hellwig <hch@lst.de>
Sun, 3 Mar 2024 14:01:42 +0000 (07:01 -0700)
committerSong Liu <song@kernel.org>
Wed, 6 Mar 2024 16:59:53 +0000 (08:59 -0800)
Add a helper to check for a DM-mapped MD device instead of using
the obfuscated ->gendisk or ->queue NULL checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed--by: Song Liu <song@kernel.org>
Tested-by: Song Liu <song@kernel.org>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-4-hch@lst.de
drivers/md/md.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

index ab23153..2d93ea1 100644 (file)
@@ -2410,7 +2410,7 @@ int md_integrity_register(struct mddev *mddev)
 
        if (list_empty(&mddev->disks))
                return 0; /* nothing to do */
-       if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+       if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
                return 0; /* shouldn't register, or already is */
        rdev_for_each(rdev, mddev) {
                /* skip spares and non-functional disks */
@@ -2463,7 +2463,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
 {
        struct blk_integrity *bi_mddev;
 
-       if (!mddev->gendisk)
+       if (mddev_is_dm(mddev))
                return 0;
 
        bi_mddev = blk_get_integrity(mddev->gendisk);
@@ -5977,7 +5977,7 @@ int md_run(struct mddev *mddev)
                invalidate_bdev(rdev->bdev);
                if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
                        mddev->ro = MD_RDONLY;
-                       if (mddev->gendisk)
+                       if (!mddev_is_dm(mddev))
                                set_disk_ro(mddev->gendisk, 1);
                }
 
@@ -6139,7 +6139,7 @@ int md_run(struct mddev *mddev)
                }
        }
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                bool nonrot = true;
 
                rdev_for_each(rdev, mddev) {
@@ -6404,7 +6404,7 @@ static void mddev_detach(struct mddev *mddev)
                mddev->pers->quiesce(mddev, 0);
        }
        md_unregister_thread(mddev, &mddev->thread);
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev))
                blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 }
 
@@ -7360,10 +7360,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
        if (!rv) {
                if (mddev_is_clustered(mddev))
                        md_cluster_ops->update_size(mddev, old_dev_sectors);
-               else if (mddev->queue) {
+               else if (!mddev_is_dm(mddev))
                        set_capacity_and_notify(mddev->gendisk,
                                                mddev->array_sectors);
-               }
        }
        return rv;
 }
@@ -9177,7 +9176,7 @@ void md_do_sync(struct md_thread *thread)
                        mddev->delta_disks > 0 &&
                        mddev->pers->finish_reshape &&
                        mddev->pers->size &&
-                       mddev->queue) {
+                       !mddev_is_dm(mddev)) {
                mddev_lock_nointr(mddev);
                md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
                mddev_unlock(mddev);
index e2e1ddd..d5e9965 100644 (file)
@@ -911,16 +911,24 @@ int do_md_run(struct mddev *mddev);
 
 extern const struct block_device_operations md_fops;
 
+/*
+ * MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
+ */
+static inline bool mddev_is_dm(struct mddev *mddev)
+{
+       return !mddev->gendisk;
+}
+
 static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
                sector_t sector)
 {
-       if (mddev->gendisk)
+       if (!mddev_is_dm(mddev))
                trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
 }
 
 #define mddev_add_trace_msg(mddev, fmt, args...)                       \
 do {                                                                   \
-       if ((mddev)->gendisk)                                           \
+       if (!mddev_is_dm(mddev))                                        \
                blk_add_trace_msg((mddev)->queue, fmt, ##args);         \
 } while (0)
 
index aff094d..9f787ae 100644 (file)
@@ -399,7 +399,7 @@ static int raid0_run(struct mddev *mddev)
                mddev->private = conf;
        }
        conf = mddev->private;
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                struct md_rdev *rdev;
 
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
index 05870a4..dd1393d 100644 (file)
@@ -1926,7 +1926,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        for (mirror = first; mirror <= last; mirror++) {
                p = conf->mirrors + mirror;
                if (!p->rdev) {
-                       if (mddev->gendisk)
+                       if (!mddev_is_dm(mddev))
                                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                                  rdev->data_offset << 9);
 
@@ -3227,14 +3227,11 @@ static int raid1_run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev)) {
                blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
-       rdev_for_each(rdev, mddev) {
-               if (!mddev->gendisk)
-                       continue;
-               disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                 rdev->data_offset << 9);
+               rdev_for_each(rdev, mddev)
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
        }
 
        mddev->degraded = 0;
index 1447cb1..4021cf0 100644 (file)
@@ -2106,7 +2106,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        continue;
                }
 
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
 
@@ -2126,7 +2126,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                set_bit(Replacement, &rdev->flags);
                rdev->raid_disk = repl_slot;
                err = 0;
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
                conf->fullsync = 1;
@@ -4014,7 +4014,7 @@ static int raid10_run(struct mddev *mddev)
                }
        }
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(conf->mddev)) {
                blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
                blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
                raid10_set_io_opt(conf);
@@ -4048,7 +4048,7 @@ static int raid10_run(struct mddev *mddev)
                if (first || diff < min_offset_diff)
                        min_offset_diff = diff;
 
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
 
@@ -4933,7 +4933,7 @@ static void end_reshape(struct r10conf *conf)
        conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
-       if (conf->mddev->queue)
+       if (!mddev_is_dm(conf->mddev))
                raid10_set_io_opt(conf);
        conf->fullsync = 0;
 }
index c082b07..f8a8117 100644 (file)
@@ -2416,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num)
        size_t namelen = sizeof(conf->cache_name[0]);
        int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
-       if (conf->mddev->gendisk)
+       if (mddev_is_dm(conf->mddev))
                snprintf(conf->cache_name[0], namelen,
-                       "raid%d-%s", conf->level, mdname(conf->mddev));
+                       "raid%d-%p", conf->level, conf->mddev);
        else
                snprintf(conf->cache_name[0], namelen,
-                       "raid%d-%p", conf->level, conf->mddev);
+                       "raid%d-%s", conf->level, mdname(conf->mddev));
        snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
        conf->active_name = 0;
@@ -4274,11 +4274,10 @@ static int handle_stripe_dirtying(struct r5conf *conf,
                                        set_bit(STRIPE_DELAYED, &sh->state);
                        }
                }
-               if (rcw && conf->mddev->queue)
-                       mddev_add_trace_msg(conf->mddev,
-                               "raid5 rcw %llu %d %d %d",
-                               sh->sector, rcw, qread,
-                               test_bit(STRIPE_DELAYED, &sh->state));
+               if (rcw && !mddev_is_dm(conf->mddev))
+                       blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+                                         (unsigned long long)sh->sector,
+                                         rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
 
        if (rcw > disks && rmw > disks &&
@@ -5686,7 +5685,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
        }
        release_inactive_stripe_list(conf, cb->temp_inactive_list,
                                     NR_STRIPE_HASH_LOCKS);
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev))
                trace_block_unplug(mddev->queue, cnt, !from_schedule);
        kfree(cb);
 }
@@ -7960,7 +7959,7 @@ static int raid5_run(struct mddev *mddev)
                        mdname(mddev));
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                int chunk_size;
                /* read-ahead size must cover two whole stripes, which
                 * is 2 * (datadisks) * chunksize where 'n' is the
@@ -8564,7 +8563,7 @@ static void end_reshape(struct r5conf *conf)
                spin_unlock_irq(&conf->device_lock);
                wake_up(&conf->wait_for_overlap);
 
-               if (conf->mddev->queue)
+               if (!mddev_is_dm(conf->mddev))
                        raid5_set_io_opt(conf);
        }
 }