ceph: add new RECOVER mount_state when recovering session
authorJeff Layton <jlayton@kernel.org>
Fri, 25 Sep 2020 11:55:39 +0000 (07:55 -0400)
committerIlya Dryomov <idryomov@gmail.com>
Mon, 14 Dec 2020 22:21:46 +0000 (23:21 +0100)
When recovering a session (a'la recover_session=clean), we want to do
all of the operations that we do on a forced umount, but changing the
mount state to SHUTDOWN is can cause queued MDS requests to fail when
the session comes back. Most of those can idle until the session is
recovered in this situation.

Reserve SHUTDOWN state for forced umount, and make a new RECOVER state
for the forced reconnect situation. Change several tests for equality with
SHUTDOWN to test for that or RECOVER.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/super.c
include/linux/ceph/libceph.h

index 35c83f6..e10b07e 100644 (file)
@@ -840,7 +840,7 @@ static int ceph_writepages_start(struct address_space *mapping,
             wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
             (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
 
-       if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+       if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
                if (ci->i_wrbuffer_ref > 0) {
                        pr_warn_ratelimited(
                                "writepage_start %p %lld forced umount\n",
@@ -1264,7 +1264,7 @@ ceph_find_incompatible(struct page *page)
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+       if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
                dout(" page %p forced umount\n", page);
                return ERR_PTR(-EIO);
        }
index 8552d10..c74d818 100644 (file)
@@ -2747,7 +2747,7 @@ again:
                        goto out_unlock;
                }
 
-               if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+               if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
                        dout("get_cap_refs %p forced umount\n", inode);
                        ret = -EIO;
                        goto out_unlock;
index 526faf4..02b11a4 100644 (file)
@@ -1888,7 +1888,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
 
        mutex_lock(&ci->i_truncate_mutex);
 
-       if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+       if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
                pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
                                    inode, ceph_ino(inode));
                mapping_set_error(inode->i_mapping, -EIO);
index 8f1d750..a2d6ef8 100644 (file)
@@ -1595,7 +1595,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_cap_flush *cf;
                struct ceph_mds_client *mdsc = fsc->mdsc;
 
-               if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+               if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
                        if (inode->i_data.nrpages > 0)
                                invalidate = true;
                        if (ci->i_wrbuffer_ref > 0)
@@ -4678,7 +4678,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
        u64 want_tid, want_flush;
 
-       if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
+       if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
                return;
 
        dout("sync\n");
index 33ba6f0..9b1b7f4 100644 (file)
@@ -831,6 +831,13 @@ static void destroy_caches(void)
        ceph_fscache_unregister();
 }
 
+static void __ceph_umount_begin(struct ceph_fs_client *fsc)
+{
+       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
+       ceph_mdsc_force_umount(fsc->mdsc);
+       fsc->filp_gen++; // invalidate open files
+}
+
 /*
  * ceph_umount_begin - initiate forced umount.  Tear down the
  * mount, skipping steps that may hang while waiting for server(s).
@@ -843,9 +850,7 @@ static void ceph_umount_begin(struct super_block *sb)
        if (!fsc)
                return;
        fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
-       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
-       ceph_mdsc_force_umount(fsc->mdsc);
-       fsc->filp_gen++; // invalidate open files
+       __ceph_umount_begin(fsc);
 }
 
 static const struct super_operations ceph_super_ops = {
@@ -1234,7 +1239,8 @@ int ceph_force_reconnect(struct super_block *sb)
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
        int err = 0;
 
-       ceph_umount_begin(sb);
+       fsc->mount_state = CEPH_MOUNT_RECOVER;
+       __ceph_umount_begin(fsc);
 
        /* Make sure all page caches get invalidated.
         * see remove_session_caps_cb() */
index c8645f0..eb5a7ca 100644 (file)
@@ -104,6 +104,7 @@ enum {
        CEPH_MOUNT_UNMOUNTING,
        CEPH_MOUNT_UNMOUNTED,
        CEPH_MOUNT_SHUTDOWN,
+       CEPH_MOUNT_RECOVER,
 };
 
 static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)