btrfs: make extent state merges more efficient during insertions
[linux-2.6-microblaze.git] / fs / btrfs / qgroup.c
index 0771633..edb84cc 100644 (file)
 #include "root-tree.h"
 #include "tree-checker.h"
 
+enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
+{
+       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+               return BTRFS_QGROUP_MODE_DISABLED;
+       if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
+               return BTRFS_QGROUP_MODE_SIMPLE;
+       return BTRFS_QGROUP_MODE_FULL;
+}
+
+bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info)
+{
+       return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
+}
+
+bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info)
+{
+       return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
+}
+
 /*
  * Helpers to access qgroup reservation
  *
@@ -343,11 +362,22 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
 
 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
 {
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+               return;
        fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
                                  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
                                  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
 }
 
+static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
+                                  struct extent_buffer *leaf, int slot,
+                                  struct btrfs_qgroup_status_item *ptr)
+{
+       ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+       ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
+       fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
+}
+
 /*
  * The full config is read in one go, only called from open_ctree()
  * It doesn't use any locking, as at this point we're still single-threaded
@@ -364,7 +394,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
        u64 flags = 0;
        u64 rescan_progress = 0;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!fs_info->quota_root)
                return 0;
 
        fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
@@ -414,14 +444,14 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
                                 "old qgroup version, quota disabled");
                                goto out;
                        }
-                       if (btrfs_qgroup_status_generation(l, ptr) !=
-                           fs_info->generation) {
+                       fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
+                       if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
+                               qgroup_read_enable_gen(fs_info, l, slot, ptr);
+                       } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
                                qgroup_mark_inconsistent(fs_info);
                                btrfs_err(fs_info,
                                        "qgroup generation mismatch, marked as inconsistent");
                        }
-                       fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
-                                                                         ptr);
                        rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
                        goto next1;
                }
@@ -536,13 +566,12 @@ next2:
 out:
        btrfs_free_path(path);
        fs_info->qgroup_flags |= flags;
-       if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
-               clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
-       else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
-                ret >= 0)
-               ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
-
-       if (ret < 0) {
+       if (ret >= 0) {
+               if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
+                       set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+               if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+                       ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
+       } else {
                ulist_free(fs_info->qgroup_ulist);
                fs_info->qgroup_ulist = NULL;
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
@@ -564,7 +593,7 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
        struct rb_node *node;
        bool ret = false;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
                return ret;
        /*
         * Since we're unmounting, there is no race and no need to grab qgroup
@@ -963,7 +992,8 @@ out:
        return ret;
 }
 
-int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+                      struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
 {
        struct btrfs_root *quota_root;
        struct btrfs_root *tree_root = fs_info->tree_root;
@@ -976,6 +1006,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
        struct btrfs_qgroup *prealloc = NULL;
        struct btrfs_trans_handle *trans = NULL;
        struct ulist *ulist = NULL;
+       const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
        int ret = 0;
        int slot;
 
@@ -1078,8 +1109,13 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
                                 struct btrfs_qgroup_status_item);
        btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
        btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
-       fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
-                               BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+       fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
+       if (simple) {
+               fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+               btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
+       } else {
+               fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+       }
        btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
                                      BTRFS_QGROUP_STATUS_FLAGS_MASK);
        btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
@@ -1183,6 +1219,8 @@ out_add_root:
                goto out_free_path;
        }
 
+       fs_info->qgroup_enable_gen = trans->transid;
+
        mutex_unlock(&fs_info->qgroup_ioctl_lock);
        /*
         * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
@@ -1207,8 +1245,14 @@ out_add_root:
        spin_lock(&fs_info->qgroup_lock);
        fs_info->quota_root = quota_root;
        set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+       if (simple)
+               btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
        spin_unlock(&fs_info->qgroup_lock);
 
+       /* Skip rescan for simple qgroups. */
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+               goto out_free_path;
+
        ret = qgroup_rescan_init(fs_info, 0, 1);
        if (!ret) {
                qgroup_rescan_zero_tracking(fs_info);
@@ -1253,6 +1297,38 @@ out:
        return ret;
 }
 
+/*
+ * It is possible to have outstanding ordered extents which reserved bytes
+ * before we disabled. We need to fully flush delalloc, ordered extents, and a
+ * commit to ensure that we don't leak such reservations, only to have them
+ * come back if we re-enable.
+ *
+ * - enable simple quotas
+ * - reserve space
+ * - release it, store rsv_bytes in OE
+ * - disable quotas
+ * - enable simple quotas (qgroup rsv are all 0)
+ * - OE finishes
+ * - run delayed refs
+ * - free rsv_bytes, resulting in miscounting or even underflow
+ */
+static int flush_reservations(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_trans_handle *trans;
+       int ret;
+
+       ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
+       if (ret)
+               return ret;
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       trans = btrfs_join_transaction(fs_info->tree_root);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
+       btrfs_commit_transaction(trans);
+
+       return ret;
+}
+
 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
 {
        struct btrfs_root *quota_root;
@@ -1297,6 +1373,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
+       ret = flush_reservations(fs_info);
+       if (ret)
+               goto out_unlock_cleaner;
+
        /*
         * 1 For the root item
         *
@@ -1323,6 +1403,7 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        quota_root = fs_info->quota_root;
        fs_info->quota_root = NULL;
        fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+       fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
        fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
        spin_unlock(&fs_info->qgroup_lock);
 
@@ -1357,7 +1438,8 @@ out:
        if (ret && trans)
                btrfs_end_transaction(trans);
        else if (trans)
-               ret = btrfs_end_transaction(trans);
+               ret = btrfs_commit_transaction(trans);
+out_unlock_cleaner:
        mutex_unlock(&fs_info->cleaner_mutex);
 
        return ret;
@@ -1478,8 +1560,7 @@ out:
        return ret;
 }
 
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
-                             u64 dst)
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_qgroup *parent;
@@ -1618,6 +1699,9 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
        struct btrfs_qgroup *prealloc = NULL;
        int ret = 0;
 
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
+               return 0;
+
        mutex_lock(&fs_info->qgroup_ioctl_lock);
        if (!fs_info->quota_root) {
                ret = -ENOTCONN;
@@ -1803,6 +1887,9 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
        struct btrfs_qgroup_extent_record *entry;
        u64 bytenr = record->bytenr;
 
+       if (!btrfs_qgroup_full_accounting(fs_info))
+               return 0;
+
        lockdep_assert_held(&delayed_refs->lock);
        trace_btrfs_qgroup_trace_extent(fs_info, record);
 
@@ -1856,6 +1943,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
        struct btrfs_backref_walk_ctx ctx = { 0 };
        int ret;
 
+       if (!btrfs_qgroup_full_accounting(trans->fs_info))
+               return 0;
        /*
         * We are always called in a context where we are already holding a
         * transaction handle. Often we are called when adding a data delayed
@@ -1924,8 +2013,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
        struct btrfs_delayed_ref_root *delayed_refs;
        int ret;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
-           || bytenr == 0 || num_bytes == 0)
+       if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
                return 0;
        record = kzalloc(sizeof(*record), GFP_NOFS);
        if (!record)
@@ -1963,7 +2051,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
        u64 bytenr, num_bytes;
 
        /* We can be called directly from walk_up_proc() */
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_full_accounting(fs_info))
                return 0;
 
        for (i = 0; i < nr; i++) {
@@ -2339,7 +2427,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
        int level;
        int ret;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_full_accounting(fs_info))
                return 0;
 
        /* Wrong parameter order */
@@ -2406,7 +2494,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
        BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
        BUG_ON(root_eb == NULL);
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_full_accounting(fs_info))
                return 0;
 
        spin_lock(&fs_info->qgroup_lock);
@@ -2740,7 +2828,7 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
         * If quotas get disabled meanwhile, the resources need to be freed and
         * we can't just exit here.
         */
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+       if (!btrfs_qgroup_full_accounting(fs_info) ||
            fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
                goto out_free;
 
@@ -2809,6 +2897,9 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
        u64 qgroup_to_skip;
        int ret = 0;
 
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+               return 0;
+
        delayed_refs = &trans->transaction->delayed_refs;
        qgroup_to_skip = delayed_refs->qgroup_to_skip;
        while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
@@ -2924,7 +3015,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
                        qgroup_mark_inconsistent(fs_info);
                spin_lock(&fs_info->qgroup_lock);
        }
-       if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (btrfs_qgroup_enabled(fs_info))
                fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
        else
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -2937,6 +3028,47 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
        return ret;
 }
 
+static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
+                              u64 inode_rootid,
+                              struct btrfs_qgroup_inherit **inherit)
+{
+       int i = 0;
+       u64 num_qgroups = 0;
+       struct btrfs_qgroup *inode_qg;
+       struct btrfs_qgroup_list *qg_list;
+       struct btrfs_qgroup_inherit *res;
+       size_t struct_sz;
+       u64 *qgids;
+
+       if (*inherit)
+               return -EEXIST;
+
+       inode_qg = find_qgroup_rb(fs_info, inode_rootid);
+       if (!inode_qg)
+               return -ENOENT;
+
+       num_qgroups = list_count_nodes(&inode_qg->groups);
+
+       if (!num_qgroups)
+               return 0;
+
+       struct_sz = struct_size(res, qgroups, num_qgroups);
+       if (struct_sz == SIZE_MAX)
+               return -ERANGE;
+
+       res = kzalloc(struct_sz, GFP_NOFS);
+       if (!res)
+               return -ENOMEM;
+       res->num_qgroups = num_qgroups;
+       qgids = res->qgroups;
+
+       list_for_each_entry(qg_list, &inode_qg->groups, next_group)
+               qgids[i] = qg_list->group->qgroupid;
+
+       *inherit = res;
+       return 0;
+}
+
 /*
  * Copy the accounting information between qgroups. This is necessary
  * when a snapshot or a subvolume is created. Throwing an error will
@@ -2944,7 +3076,8 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
  * when a readonly fs is a reasonable outcome.
  */
 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
-                        u64 objectid, struct btrfs_qgroup_inherit *inherit)
+                        u64 objectid, u64 inode_rootid,
+                        struct btrfs_qgroup_inherit *inherit)
 {
        int ret = 0;
        int i;
@@ -2956,6 +3089,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
        struct btrfs_qgroup *dstgroup;
        struct btrfs_qgroup *prealloc;
        struct btrfs_qgroup_list **qlist_prealloc = NULL;
+       bool free_inherit = false;
        bool need_rescan = false;
        u32 level_size = 0;
        u64 nums;
@@ -2983,7 +3117,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 
        if (!committing)
                mutex_lock(&fs_info->qgroup_ioctl_lock);
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_enabled(fs_info))
                goto out;
 
        quota_root = fs_info->quota_root;
@@ -2992,6 +3126,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
                goto out;
        }
 
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
+               ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
+               if (ret)
+                       goto out;
+               free_inherit = true;
+       }
+
        if (inherit) {
                i_qgroups = (u64 *)(inherit + 1);
                nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
@@ -3069,7 +3210,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
                qgroup_dirty(fs_info, dstgroup);
        }
 
-       if (srcid) {
+       if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
                srcgroup = find_qgroup_rb(fs_info, srcid);
                if (!srcgroup)
                        goto unlock;
@@ -3175,6 +3316,8 @@ out:
                        kfree(qlist_prealloc[i]);
                kfree(qlist_prealloc);
        }
+       if (free_inherit)
+               kfree(inherit);
        kfree(prealloc);
        return ret;
 }
@@ -3332,6 +3475,9 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
        int slot;
        int ret;
 
+       if (!btrfs_qgroup_full_accounting(fs_info))
+               return 1;
+
        mutex_lock(&fs_info->qgroup_rescan_lock);
        extent_root = btrfs_extent_root(fs_info,
                                fs_info->qgroup_rescan_progress.objectid);
@@ -3412,10 +3558,15 @@ out:
 
 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
 {
-       return btrfs_fs_closing(fs_info) ||
-               test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
-               !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
-                         fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
+       if (btrfs_fs_closing(fs_info))
+               return true;
+       if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+               return true;
+       if (!btrfs_qgroup_enabled(fs_info))
+               return true;
+       if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
+               return true;
+       return false;
 }
 
 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
@@ -3429,6 +3580,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        bool stopped = false;
        bool did_leaf_rescans = false;
 
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+               return;
+
        path = btrfs_alloc_path();
        if (!path)
                goto out;
@@ -3532,6 +3686,11 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 {
        int ret = 0;
 
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
+               btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
+               return -EINVAL;
+       }
+
        if (!init_flags) {
                /* we're resuming qgroup rescan at mount time */
                if (!(fs_info->qgroup_flags &
@@ -3562,7 +3721,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup is not enabled");
                        ret = -EINVAL;
-               } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+               } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
                        /* Quota disable is in progress */
                        ret = -EBUSY;
                }
@@ -3821,7 +3980,7 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
        u64 to_reserve;
        int ret;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+       if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
            !is_fstree(root->root_key.objectid) || len == 0)
                return 0;
 
@@ -3953,8 +4112,12 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
        int trace_op = QGROUP_RELEASE;
        int ret;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
-               return 0;
+       if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
+               extent_changeset_init(&changeset);
+               return clear_record_extent_bits(&inode->io_tree, start,
+                                               start + len - 1,
+                                               EXTENT_QGROUP_RESERVED, &changeset);
+       }
 
        /* In release case, we shouldn't have @reserved */
        WARN_ON(!free && reserved);
@@ -4064,7 +4227,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
        struct btrfs_fs_info *fs_info = root->fs_info;
        int ret;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
            !is_fstree(root->root_key.objectid) || num_bytes == 0)
                return 0;
 
@@ -4109,7 +4272,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
            !is_fstree(root->root_key.objectid))
                return;
 
@@ -4125,7 +4288,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
            !is_fstree(root->root_key.objectid))
                return;
 
@@ -4184,7 +4347,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+       if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
            !is_fstree(root->root_key.objectid))
                return;
        /* Same as btrfs_qgroup_free_meta_prealloc() */
@@ -4292,7 +4455,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
        int level = btrfs_header_level(subvol_parent) - 1;
        int ret = 0;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_full_accounting(fs_info))
                return 0;
 
        if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
@@ -4402,7 +4565,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
        int ret = 0;
        int i;
 
-       if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+       if (!btrfs_qgroup_full_accounting(fs_info))
                return 0;
        if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
                return 0;
@@ -4485,3 +4648,53 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
        }
        *root = RB_ROOT;
 }
+
+int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
+                             struct btrfs_squota_delta *delta)
+{
+       int ret;
+       struct btrfs_qgroup *qgroup;
+       struct btrfs_qgroup *qg;
+       LIST_HEAD(qgroup_list);
+       u64 root = delta->root;
+       u64 num_bytes = delta->num_bytes;
+       const int sign = (delta->is_inc ? 1 : -1);
+
+       if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
+               return 0;
+
+       if (!is_fstree(root))
+               return 0;
+
+       /* If the extent predates enabling quotas, don't count it. */
+       if (delta->generation < fs_info->qgroup_enable_gen)
+               return 0;
+
+       spin_lock(&fs_info->qgroup_lock);
+       qgroup = find_qgroup_rb(fs_info, root);
+       if (!qgroup) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       ret = 0;
+       qgroup_iterator_add(&qgroup_list, qgroup);
+       list_for_each_entry(qg, &qgroup_list, iterator) {
+               struct btrfs_qgroup_list *glist;
+
+               qg->excl += num_bytes * sign;
+               qg->rfer += num_bytes * sign;
+               qgroup_dirty(fs_info, qg);
+
+               list_for_each_entry(glist, &qg->groups, next_group)
+                       qgroup_iterator_add(&qgroup_list, glist->group);
+       }
+       qgroup_iterator_clean(&qgroup_list);
+
+out:
+       spin_unlock(&fs_info->qgroup_lock);
+       if (!ret && delta->rsv_bytes)
+               btrfs_qgroup_free_refroot(fs_info, root, delta->rsv_bytes,
+                                         BTRFS_QGROUP_RSV_DATA);
+       return ret;
+}