md: change the return value type of md_write_start to void
authorLi Nan <linan122@huawei.com>
Sat, 25 May 2024 18:52:56 +0000 (02:52 +0800)
committerSong Liu <song@kernel.org>
Mon, 10 Jun 2024 19:10:25 +0000 (19:10 +0000)
Commit cc27b0c78c79 ("md: fix deadlock between mddev_suspend() and
md_write_start()") aborted md_write_start() with false when mddev is
suspended, which fixed a deadlock if calling mddev_suspend() with
holding reconfig_mutex(). Since mddev_suspend() now includes
lockdep_assert_not_held(), it no longer holds the reconfig_mutex. This
makes previous abort unnecessary. Now, remove unnecessary abort and
change function return value to void.

Signed-off-by: Li Nan <linan122@huawei.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240525185257.3896201-2-linan666@huaweicloud.com
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

index 09c55d9..6bac20e 100644 (file)
@@ -8640,12 +8640,12 @@ EXPORT_SYMBOL(md_done_sync);
  * A return value of 'false' means that the write wasn't recorded
  * and cannot proceed as the array is being suspend.
  */
-bool md_write_start(struct mddev *mddev, struct bio *bi)
+void md_write_start(struct mddev *mddev, struct bio *bi)
 {
        int did_change = 0;
 
        if (bio_data_dir(bi) != WRITE)
-               return true;
+               return;
 
        BUG_ON(mddev->ro == MD_RDONLY);
        if (mddev->ro == MD_AUTO_READ) {
@@ -8678,15 +8678,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
        if (!mddev->has_superblocks)
-               return true;
+               return;
        wait_event(mddev->sb_wait,
-                  !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
-                  is_md_suspended(mddev));
-       if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
-               percpu_ref_put(&mddev->writes_pending);
-               return false;
-       }
-       return true;
+                  !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 }
 EXPORT_SYMBOL(md_write_start);
 
index ca085ec..4875820 100644 (file)
@@ -785,7 +785,7 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t
 extern void md_wakeup_thread(struct md_thread __rcu *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
-extern bool md_write_start(struct mddev *mddev, struct bio *bi);
+extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
index 7b8a71c..0d80ff4 100644 (file)
@@ -1687,8 +1687,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
        if (bio_data_dir(bio) == READ)
                raid1_read_request(mddev, bio, sectors, NULL);
        else {
-               if (!md_write_start(mddev,bio))
-                       return false;
+               md_write_start(mddev,bio);
                raid1_write_request(mddev, bio, sectors);
        }
        return true;
index a4556d2..f8d7c02 100644 (file)
@@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
            && md_flush_request(mddev, bio))
                return true;
 
-       if (!md_write_start(mddev, bio))
-               return false;
+       md_write_start(mddev, bio);
 
        if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
                if (!raid10_handle_discard(mddev, bio))
index 2bd1ce9..a843893 100644 (file)
@@ -6078,8 +6078,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
        }
 
-       if (!md_write_start(mddev, bi))
-               return false;
+       md_write_start(mddev, bi);
        /*
         * If array is degraded, better not do chunk aligned read because
         * later we might have to read it again in order to reconstruct