dm: fix zoned locking imbalance due to needless check in clone_endio
authorMike Snitzer <snitzer@kernel.org>
Fri, 10 Jun 2022 19:07:48 +0000 (15:07 -0400)
committerMike Snitzer <snitzer@kernel.org>
Fri, 10 Jun 2022 19:23:54 +0000 (15:23 -0400)
After the commit ca522482e3ea ("dm: pass NULL bdev to bio_alloc_clone"),
clone_endio() only calls dm_zone_endio() when DM targets remap the
clone bio's bdev to something other than the md->disk->part0 default.

However, if a DM target (e.g. dm-crypt) stacked ontop of a dm-zoned
does not remap the clone bio using bio_set_dev() then dm_zone_endio()
is not called at completion of the bios and zone locks are not
properly unlocked. This triggers a hang, in dm_zone_map_bio(), when
blktests block/004 is run for dm-crypt on zoned block devices. To
avoid the hang, simply remove the clone_endio() check that verifies
the target remapped the clone bio to a device other than the default.

Fixes: ca522482e3ea ("dm: pass NULL bdev to bio_alloc_clone")
Reported-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm.c

index 8b21155..d8f1618 100644 (file)
@@ -1016,23 +1016,19 @@ static void clone_endio(struct bio *bio)
        struct dm_io *io = tio->io;
        struct mapped_device *md = io->md;
 
-       if (likely(bio->bi_bdev != md->disk->part0)) {
-               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
-               if (unlikely(error == BLK_STS_TARGET)) {
-                       if (bio_op(bio) == REQ_OP_DISCARD &&
-                           !bdev_max_discard_sectors(bio->bi_bdev))
-                               disable_discard(md);
-                       else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                                !q->limits.max_write_zeroes_sectors)
-                               disable_write_zeroes(md);
-               }
-
-               if (static_branch_unlikely(&zoned_enabled) &&
-                   unlikely(blk_queue_is_zoned(q)))
-                       dm_zone_endio(io, bio);
+       if (unlikely(error == BLK_STS_TARGET)) {
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bdev_max_discard_sectors(bio->bi_bdev))
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bdev_write_zeroes_sectors(bio->bi_bdev))
+                       disable_write_zeroes(md);
        }
 
+       if (static_branch_unlikely(&zoned_enabled) &&
+           unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+               dm_zone_endio(io, bio);
+
        if (endio) {
                int r = endio(ti, bio, &error);
                switch (r) {