xfs: replace xfs_sb_version checks with feature flag checks
[linux-2.6-microblaze.git] / fs / xfs / xfs_super.c
index 2c9e26a..5fc7f7d 100644 (file)
@@ -49,6 +49,28 @@ static struct kset *xfs_kset;                /* top-level xfs sysfs dir */
 static struct xfs_kobj xfs_dbg_kobj;   /* global debug sysfs attrs */
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+static LIST_HEAD(xfs_mount_list);
+static DEFINE_SPINLOCK(xfs_mount_list_lock);
+
+static inline void xfs_mount_list_add(struct xfs_mount *mp)
+{
+       spin_lock(&xfs_mount_list_lock);
+       list_add(&mp->m_mount_list, &xfs_mount_list);
+       spin_unlock(&xfs_mount_list_lock);
+}
+
+static inline void xfs_mount_list_del(struct xfs_mount *mp)
+{
+       spin_lock(&xfs_mount_list_lock);
+       list_del(&mp->m_mount_list);
+       spin_unlock(&xfs_mount_list_lock);
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
+static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
+#endif
+
 enum xfs_dax_mode {
        XFS_DAX_INODE = 0,
        XFS_DAX_ALWAYS = 1,
@@ -201,25 +223,20 @@ xfs_fs_show_options(
                seq_printf(m, ",swidth=%d",
                                (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 
-       if (mp->m_qflags & XFS_UQUOTA_ACCT) {
-               if (mp->m_qflags & XFS_UQUOTA_ENFD)
-                       seq_puts(m, ",usrquota");
-               else
-                       seq_puts(m, ",uqnoenforce");
-       }
+       if (mp->m_qflags & XFS_UQUOTA_ENFD)
+               seq_puts(m, ",usrquota");
+       else if (mp->m_qflags & XFS_UQUOTA_ACCT)
+               seq_puts(m, ",uqnoenforce");
 
-       if (mp->m_qflags & XFS_PQUOTA_ACCT) {
-               if (mp->m_qflags & XFS_PQUOTA_ENFD)
-                       seq_puts(m, ",prjquota");
-               else
-                       seq_puts(m, ",pqnoenforce");
-       }
-       if (mp->m_qflags & XFS_GQUOTA_ACCT) {
-               if (mp->m_qflags & XFS_GQUOTA_ENFD)
-                       seq_puts(m, ",grpquota");
-               else
-                       seq_puts(m, ",gqnoenforce");
-       }
+       if (mp->m_qflags & XFS_PQUOTA_ENFD)
+               seq_puts(m, ",prjquota");
+       else if (mp->m_qflags & XFS_PQUOTA_ACCT)
+               seq_puts(m, ",pqnoenforce");
+
+       if (mp->m_qflags & XFS_GQUOTA_ENFD)
+               seq_puts(m, ",grpquota");
+       else if (mp->m_qflags & XFS_GQUOTA_ACCT)
+               seq_puts(m, ",gqnoenforce");
 
        if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
                seq_puts(m, ",noquota");
@@ -468,7 +485,7 @@ xfs_setup_devices(
        if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
                unsigned int    log_sector_size = BBSIZE;
 
-               if (xfs_sb_version_hassector(&mp->m_sb))
+               if (xfs_has_sector(mp))
                        log_sector_size = mp->m_sb.sb_logsectsize;
                error = xfs_setsize_buftarg(mp->m_logdev_targp,
                                            log_sector_size);
@@ -501,37 +518,37 @@ xfs_init_mount_workqueues(
        if (!mp->m_unwritten_workqueue)
                goto out_destroy_buf;
 
-       mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
-                       XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
-                       0, mp->m_super->s_id);
-       if (!mp->m_cil_workqueue)
-               goto out_destroy_unwritten;
-
        mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
                        XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
                        0, mp->m_super->s_id);
        if (!mp->m_reclaim_workqueue)
-               goto out_destroy_cil;
+               goto out_destroy_unwritten;
 
-       mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
-                       WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
+       mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
+                       XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
                        0, mp->m_super->s_id);
-       if (!mp->m_gc_workqueue)
+       if (!mp->m_blockgc_wq)
                goto out_destroy_reclaim;
 
+       mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
+                       XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
+                       1, mp->m_super->s_id);
+       if (!mp->m_inodegc_wq)
+               goto out_destroy_blockgc;
+
        mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
                        XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
        if (!mp->m_sync_workqueue)
-               goto out_destroy_eofb;
+               goto out_destroy_inodegc;
 
        return 0;
 
-out_destroy_eofb:
-       destroy_workqueue(mp->m_gc_workqueue);
+out_destroy_inodegc:
+       destroy_workqueue(mp->m_inodegc_wq);
+out_destroy_blockgc:
+       destroy_workqueue(mp->m_blockgc_wq);
 out_destroy_reclaim:
        destroy_workqueue(mp->m_reclaim_workqueue);
-out_destroy_cil:
-       destroy_workqueue(mp->m_cil_workqueue);
 out_destroy_unwritten:
        destroy_workqueue(mp->m_unwritten_workqueue);
 out_destroy_buf:
@@ -545,9 +562,9 @@ xfs_destroy_mount_workqueues(
        struct xfs_mount        *mp)
 {
        destroy_workqueue(mp->m_sync_workqueue);
-       destroy_workqueue(mp->m_gc_workqueue);
+       destroy_workqueue(mp->m_blockgc_wq);
+       destroy_workqueue(mp->m_inodegc_wq);
        destroy_workqueue(mp->m_reclaim_workqueue);
-       destroy_workqueue(mp->m_cil_workqueue);
        destroy_workqueue(mp->m_unwritten_workqueue);
        destroy_workqueue(mp->m_buf_workqueue);
 }
@@ -596,32 +613,6 @@ xfs_fs_alloc_inode(
        return NULL;
 }
 
-#ifdef DEBUG
-static void
-xfs_check_delalloc(
-       struct xfs_inode        *ip,
-       int                     whichfork)
-{
-       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
-       struct xfs_bmbt_irec    got;
-       struct xfs_iext_cursor  icur;
-
-       if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
-               return;
-       do {
-               if (isnullstartblock(got.br_startblock)) {
-                       xfs_warn(ip->i_mount,
-       "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
-                               ip->i_ino,
-                               whichfork == XFS_DATA_FORK ? "data" : "cow",
-                               got.br_startoff, got.br_blockcount);
-               }
-       } while (xfs_iext_next_extent(ifp, &icur, &got));
-}
-#else
-#define xfs_check_delalloc(ip, whichfork)      do { } while (0)
-#endif
-
 /*
  * Now that the generic code is guaranteed not to be accessing
  * the linux inode, we can inactivate and reclaim the inode.
@@ -637,30 +628,6 @@ xfs_fs_destroy_inode(
        ASSERT(!rwsem_is_locked(&inode->i_rwsem));
        XFS_STATS_INC(ip->i_mount, vn_rele);
        XFS_STATS_INC(ip->i_mount, vn_remove);
-
-       xfs_inactive(ip);
-
-       if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
-               xfs_check_delalloc(ip, XFS_DATA_FORK);
-               xfs_check_delalloc(ip, XFS_COW_FORK);
-               ASSERT(0);
-       }
-
-       XFS_STATS_INC(ip->i_mount, vn_reclaim);
-
-       /*
-        * We should never get here with one of the reclaim flags already set.
-        */
-       ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-       ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
-
-       /*
-        * We always use background reclaim here because even if the inode is
-        * clean, it still may be under IO and hence we have wait for IO
-        * completion to occur before we can reclaim the inode. The background
-        * reclaim path handles this more efficiently than we can here, so
-        * simply let background reclaim tear down all inodes.
-        */
        xfs_inode_mark_reclaimable(ip);
 }
 
@@ -734,7 +701,7 @@ xfs_fs_drop_inode(
         * that.  See the comment for this inode flag.
         */
        if (ip->i_flags & XFS_IRECOVERY) {
-               ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
+               ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
                return 0;
        }
 
@@ -757,6 +724,8 @@ xfs_fs_sync_fs(
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
+       trace_xfs_fs_sync_fs(mp, __return_address);
+
        /*
         * Doing anything during the async pass would be counterproductive.
         */
@@ -773,6 +742,25 @@ xfs_fs_sync_fs(
                flush_delayed_work(&mp->m_log->l_work);
        }
 
+       /*
+        * If we are called with page faults frozen out, it means we are about
+        * to freeze the transaction subsystem. Take the opportunity to shut
+        * down inodegc because once SB_FREEZE_FS is set it's too late to
+        * prevent inactivation races with freeze. The fs doesn't get called
+        * again by the freezing process until after SB_FREEZE_FS has been set,
+        * so it's now or never.  Same logic applies to speculative allocation
+        * garbage collection.
+        *
+        * We don't care if this is a normal syncfs call that does this or
+        * freeze that does this - we can run this multiple times without issue
+        * and we won't race with a restart because a restart can only occur
+        * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
+        */
+       if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
+               xfs_inodegc_stop(mp);
+               xfs_blockgc_stop(mp);
+       }
+
        return 0;
 }
 
@@ -791,6 +779,9 @@ xfs_fs_statfs(
        xfs_extlen_t            lsize;
        int64_t                 ffree;
 
+       /* Wait for whatever inactivations are in progress. */
+       xfs_inodegc_flush(mp);
+
        statp->f_type = XFS_SUPER_MAGIC;
        statp->f_namelen = MAXNAMELEN - 1;
 
@@ -886,10 +877,22 @@ xfs_fs_freeze(
         * set a GFP_NOFS context here to avoid recursion deadlocks.
         */
        flags = memalloc_nofs_save();
-       xfs_blockgc_stop(mp);
        xfs_save_resvblks(mp);
        ret = xfs_log_quiesce(mp);
        memalloc_nofs_restore(flags);
+
+       /*
+        * For read-write filesystems, we need to restart the inodegc on error
+        * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
+        * going to be run to restart it now.  We are at SB_FREEZE_FS level
+        * here, so we can restart safely without racing with a stop in
+        * xfs_fs_sync_fs().
+        */
+       if (ret && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               xfs_blockgc_start(mp);
+               xfs_inodegc_start(mp);
+       }
+
        return ret;
 }
 
@@ -901,7 +904,18 @@ xfs_fs_unfreeze(
 
        xfs_restore_resvblks(mp);
        xfs_log_work_queue(mp);
-       xfs_blockgc_start(mp);
+
+       /*
+        * Don't reactivate the inodegc worker on a readonly filesystem because
+        * inodes are sent directly to reclaim.  Don't reactivate the blockgc
+        * worker because there are no speculative preallocations on a readonly
+        * filesystem.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               xfs_blockgc_start(mp);
+               xfs_inodegc_start(mp);
+       }
+
        return 0;
 }
 
@@ -916,7 +930,7 @@ xfs_finish_flags(
        int                     ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
 
        /* Fail a mount where the logbuf is smaller than the log stripe */
-       if (xfs_sb_version_haslogv2(&mp->m_sb)) {
+       if (xfs_has_logv2(mp)) {
                if (mp->m_logbsize <= 0 &&
                    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
                        mp->m_logbsize = mp->m_sb.sb_logsunit;
@@ -938,21 +952,13 @@ xfs_finish_flags(
        /*
         * V5 filesystems always use attr2 format for attributes.
         */
-       if (xfs_sb_version_hascrc(&mp->m_sb) &&
+       if (xfs_has_crc(mp) &&
            (mp->m_flags & XFS_MOUNT_NOATTR2)) {
                xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
                             "attr2 is always enabled for V5 filesystems.");
                return -EINVAL;
        }
 
-       /*
-        * mkfs'ed attr2 will turn on attr2 mount unless explicitly
-        * told by noattr2 to turn it off
-        */
-       if (xfs_sb_version_hasattr2(&mp->m_sb) &&
-           !(mp->m_flags & XFS_MOUNT_NOATTR2))
-               mp->m_flags |= XFS_MOUNT_ATTR2;
-
        /*
         * prohibit r/w mounts of read-only filesystems
         */
@@ -962,9 +968,9 @@ xfs_finish_flags(
                return -EROFS;
        }
 
-       if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
-           (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
-           !xfs_sb_version_has_pquotino(&mp->m_sb)) {
+       if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
+           (mp->m_qflags & XFS_PQUOTA_ACCT) &&
+           !xfs_has_pquotino(mp)) {
                xfs_warn(mp,
                  "Super block does not support project and group quota together");
                return -EINVAL;
@@ -1027,6 +1033,35 @@ xfs_destroy_percpu_counters(
        percpu_counter_destroy(&mp->m_delalloc_blks);
 }
 
+static int
+xfs_inodegc_init_percpu(
+       struct xfs_mount        *mp)
+{
+       struct xfs_inodegc      *gc;
+       int                     cpu;
+
+       mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
+       if (!mp->m_inodegc)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               gc = per_cpu_ptr(mp->m_inodegc, cpu);
+               init_llist_head(&gc->list);
+               gc->items = 0;
+               INIT_WORK(&gc->work, xfs_inodegc_worker);
+       }
+       return 0;
+}
+
+static void
+xfs_inodegc_free_percpu(
+       struct xfs_mount        *mp)
+{
+       if (!mp->m_inodegc)
+               return;
+       free_percpu(mp->m_inodegc);
+}
+
 static void
 xfs_fs_put_super(
        struct super_block      *sb)
@@ -1043,6 +1078,8 @@ xfs_fs_put_super(
 
        xfs_freesb(mp);
        free_percpu(mp->m_stats.xs_stats);
+       xfs_mount_list_del(mp);
+       xfs_inodegc_free_percpu(mp);
        xfs_destroy_percpu_counters(mp);
        xfs_destroy_mount_workqueues(mp);
        xfs_close_devices(mp);
@@ -1228,35 +1265,31 @@ xfs_fs_parse_param(
        case Opt_noquota:
                parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
                parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
-               parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
                return 0;
        case Opt_quota:
        case Opt_uquota:
        case Opt_usrquota:
-               parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
-                                XFS_UQUOTA_ENFD);
+               parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
                return 0;
        case Opt_qnoenforce:
        case Opt_uqnoenforce:
-               parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+               parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
                parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
                return 0;
        case Opt_pquota:
        case Opt_prjquota:
-               parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
-                                XFS_PQUOTA_ENFD);
+               parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
                return 0;
        case Opt_pqnoenforce:
-               parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
+               parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
                parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
                return 0;
        case Opt_gquota:
        case Opt_grpquota:
-               parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
-                                XFS_GQUOTA_ENFD);
+               parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
                return 0;
        case Opt_gqnoenforce:
-               parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+               parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
                parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
                return 0;
        case Opt_discard:
@@ -1288,7 +1321,6 @@ xfs_fs_parse_param(
                return 0;
        case Opt_noattr2:
                xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
-               parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
                parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
                return 0;
        default:
@@ -1312,6 +1344,13 @@ xfs_fs_validate_params(
                return -EINVAL;
        }
 
+       if ((mp->m_flags & (XFS_MOUNT_ATTR2|XFS_MOUNT_NOATTR2)) ==
+                         (XFS_MOUNT_ATTR2|XFS_MOUNT_NOATTR2)) {
+               xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
+               return -EINVAL;
+       }
+
+
        if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
            (mp->m_dalign || mp->m_swidth)) {
                xfs_warn(mp,
@@ -1418,11 +1457,22 @@ xfs_fs_fill_super(
        if (error)
                goto out_destroy_workqueues;
 
+       error = xfs_inodegc_init_percpu(mp);
+       if (error)
+               goto out_destroy_counters;
+
+       /*
+        * All percpu data structures requiring cleanup when a cpu goes offline
+        * must be allocated before adding this @mp to the cpu-dead handler's
+        * mount list.
+        */
+       xfs_mount_list_add(mp);
+
        /* Allocate stats memory before we do operations that might use it */
        mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
        if (!mp->m_stats.xs_stats) {
                error = -ENOMEM;
-               goto out_destroy_counters;
+               goto out_destroy_inodegc;
        }
 
        error = xfs_readsb(mp, flags);
@@ -1438,7 +1488,7 @@ xfs_fs_fill_super(
                goto out_free_sb;
 
        /* V4 support is undergoing deprecation. */
-       if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+       if (!xfs_has_crc(mp)) {
 #ifdef CONFIG_XFS_SUPPORT_V4
                xfs_warn_once(mp,
        "Deprecated V4 format (crc=0) will not be supported after September 2030.");
@@ -1523,7 +1573,7 @@ xfs_fs_fill_super(
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_max_links = XFS_MAXLINK;
        sb->s_time_gran = 1;
-       if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
+       if (xfs_has_bigtime(mp)) {
                sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
                sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
        } else {
@@ -1539,10 +1589,6 @@ xfs_fs_fill_super(
        if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
                sb->s_flags |= SB_I_VERSION;
 
-       if (xfs_sb_version_hasbigtime(&mp->m_sb))
-               xfs_warn(mp,
- "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
-
        if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
                bool rtdev_is_dax = false, datadev_is_dax;
 
@@ -1559,7 +1605,7 @@ xfs_fs_fill_super(
                        "DAX unsupported by block device. Turning off DAX.");
                        xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
                }
-               if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+               if (xfs_has_reflink(mp)) {
                        xfs_alert(mp,
                "DAX and reflink cannot be used together!");
                        error = -EINVAL;
@@ -1577,7 +1623,7 @@ xfs_fs_fill_super(
                }
        }
 
-       if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+       if (xfs_has_reflink(mp)) {
                if (mp->m_sb.sb_rblocks) {
                        xfs_alert(mp,
        "reflink not compatible with realtime device!");
@@ -1591,17 +1637,13 @@ xfs_fs_fill_super(
                }
        }
 
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
+       if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
                xfs_alert(mp,
        "reverse mapping btree not compatible with realtime device!");
                error = -EINVAL;
                goto out_filestream_unmount;
        }
 
-       if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
-               xfs_warn(mp,
- "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
-
        error = xfs_mountfs(mp);
        if (error)
                goto out_filestream_unmount;
@@ -1625,6 +1667,9 @@ xfs_fs_fill_super(
        xfs_freesb(mp);
  out_free_stats:
        free_percpu(mp->m_stats.xs_stats);
+ out_destroy_inodegc:
+       xfs_mount_list_del(mp);
+       xfs_inodegc_free_percpu(mp);
  out_destroy_counters:
        xfs_destroy_percpu_counters(mp);
  out_destroy_workqueues:
@@ -1708,6 +1753,9 @@ xfs_remount_rw(
        if (error && error != -ENOSPC)
                return error;
 
+       /* Re-enable the background inode inactivation worker. */
+       xfs_inodegc_start(mp);
+
        return 0;
 }
 
@@ -1730,6 +1778,15 @@ xfs_remount_ro(
                return error;
        }
 
+       /*
+        * Stop the inodegc background worker.  xfs_fs_reconfigure already
+        * flushed all pending inodegc work when it sync'd the filesystem.
+        * The VFS holds s_umount, so we know that inodes cannot enter
+        * xfs_fs_destroy_inode during a remount operation.  In readonly mode
+        * we send inodes straight to reclaim, so no inodes will be queued.
+        */
+       xfs_inodegc_stop(mp);
+
        /* Free the per-AG metadata reservation pool. */
        error = xfs_fs_unreserve_ag_blocks(mp);
        if (error) {
@@ -2120,6 +2177,48 @@ xfs_destroy_workqueues(void)
        destroy_workqueue(xfs_alloc_wq);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int
+xfs_cpu_dead(
+       unsigned int            cpu)
+{
+       struct xfs_mount        *mp, *n;
+
+       spin_lock(&xfs_mount_list_lock);
+       list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
+               spin_unlock(&xfs_mount_list_lock);
+               xfs_inodegc_cpu_dead(mp, cpu);
+               spin_lock(&xfs_mount_list_lock);
+       }
+       spin_unlock(&xfs_mount_list_lock);
+       return 0;
+}
+
+static int __init
+xfs_cpu_hotplug_init(void)
+{
+       int     error;
+
+       error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
+                       xfs_cpu_dead);
+       if (error < 0)
+               xfs_alert(NULL,
+"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
+                       error);
+       return error;
+}
+
+static void
+xfs_cpu_hotplug_destroy(void)
+{
+       cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static inline int xfs_cpu_hotplug_init(void) { return 0; }
+static inline void xfs_cpu_hotplug_destroy(void) {}
+#endif
+
 STATIC int __init
 init_xfs_fs(void)
 {
@@ -2132,10 +2231,14 @@ init_xfs_fs(void)
 
        xfs_dir_startup();
 
-       error = xfs_init_zones();
+       error = xfs_cpu_hotplug_init();
        if (error)
                goto out;
 
+       error = xfs_init_zones();
+       if (error)
+               goto out_destroy_hp;
+
        error = xfs_init_workqueues();
        if (error)
                goto out_destroy_zones;
@@ -2215,6 +2318,8 @@ init_xfs_fs(void)
        xfs_destroy_workqueues();
  out_destroy_zones:
        xfs_destroy_zones();
+ out_destroy_hp:
+       xfs_cpu_hotplug_destroy();
  out:
        return error;
 }
@@ -2237,6 +2342,7 @@ exit_xfs_fs(void)
        xfs_destroy_workqueues();
        xfs_destroy_zones();
        xfs_uuid_table_free();
+       xfs_cpu_hotplug_destroy();
 }
 
 module_init(init_xfs_fs);