1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #ifndef BTRFS_DELAYED_REF_H
7 #define BTRFS_DELAYED_REF_H
9 #include <linux/types.h>
10 #include <linux/refcount.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/slab.h>
16 #include <uapi/linux/btrfs_tree.h>
18 struct btrfs_trans_handle;
21 /* these are the possible values of struct btrfs_delayed_ref_node->action */
22 enum btrfs_delayed_ref_action {
23 /* Add one backref to the tree */
24 BTRFS_ADD_DELAYED_REF = 1,
25 /* Delete one backref from the tree */
26 BTRFS_DROP_DELAYED_REF,
27 /* Record a full extent allocation */
28 BTRFS_ADD_DELAYED_EXTENT,
29 /* Not changing ref count on head ref */
30 BTRFS_UPDATE_DELAYED_HEAD,
33 struct btrfs_delayed_ref_node {
34 struct rb_node ref_node;
36 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
37 * ref_head->ref_add_list, then we do not need to iterate the
38 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
40 struct list_head add_list;
42 /* the starting bytenr of the extent */
45 /* the size of the extent */
48 /* seq number to keep track of insertion order */
51 /* ref count on this data structure */
55 * how many refs is this entry adding or deleting. For
56 * head refs, this may be a negative number because it is keeping
57 * track of the total mods done to the reference count.
58 * For individual refs, this will always be a positive number
60 * It may be more than one, since it is possible for a single
61 * parent to have more than one ref on an extent
65 unsigned int action:8;
69 struct btrfs_delayed_extent_op {
70 struct btrfs_disk_key key;
78 * the head refs are used to hold a lock on a given extent, which allows us
79 * to make sure that only one process is running the delayed refs
80 * at a time for a single extent. They also store the sum of all the
81 * reference count modifications we've queued up.
83 struct btrfs_delayed_ref_head {
87 * For insertion into struct btrfs_delayed_ref_root::href_root.
88 * Keep it in the same cache line as 'bytenr' for more efficient
89 * searches in the rbtree.
91 struct rb_node href_node;
93 * the mutex is held while running the refs, and it is also
94 * held when checking the sum of reference modifications.
100 /* Protects 'ref_tree' and 'ref_add_list'. */
102 struct rb_root_cached ref_tree;
103 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
104 struct list_head ref_add_list;
106 struct btrfs_delayed_extent_op *extent_op;
109 * This is used to track the final ref_mod from all the refs associated
110 * with this head ref, this is not adjusted as delayed refs are run,
111 * this is meant to track if we need to do the csum accounting or not.
116 * This is the current outstanding mod references for this bytenr. This
117 * is used with lookup_extent_info to get an accurate reference count
118 * for a bytenr, so it is adjusted as delayed refs are run so that any
119 * on disk reference count + ref_mod is accurate.
124 * The root that triggered the allocation when must_insert_reserved is
130 * Track reserved bytes when setting must_insert_reserved. On success
131 * or cleanup, we will need to free the reservation.
136 * when a new extent is allocated, it is just reserved in memory
137 * The actual extent isn't inserted into the extent allocation tree
138 * until the delayed ref is processed. must_insert_reserved is
139 * used to flag a delayed ref so the accounting can be updated
140 * when a full insert is done.
142 * It is possible the extent will be freed before it is ever
143 * inserted into the extent allocation tree. In this case
144 * we need to update the in ram accounting to properly reflect
145 * the free has happened.
147 bool must_insert_reserved;
154 struct btrfs_delayed_tree_ref {
155 struct btrfs_delayed_ref_node node;
161 struct btrfs_delayed_data_ref {
162 struct btrfs_delayed_ref_node node;
169 enum btrfs_delayed_ref_flags {
170 /* Indicate that we are flushing delayed refs for the commit */
171 BTRFS_DELAYED_REFS_FLUSHING,
174 struct btrfs_delayed_ref_root {
175 /* head ref rbtree */
176 struct rb_root_cached href_root;
178 /* dirty extent records */
179 struct rb_root dirty_extent_root;
181 /* this spin lock protects the rbtree and the entries inside */
184 /* how many delayed ref updates we've queued, used by the
187 atomic_t num_entries;
189 /* total number of head nodes in tree */
190 unsigned long num_heads;
192 /* total number of head nodes ready for processing */
193 unsigned long num_heads_ready;
199 u64 run_delayed_start;
202 * To make qgroup to skip given root.
203 * This is for snapshot, as btrfs_qgroup_inherit() will manually
204 * modify counters for snapshot and its source, so we should skip
205 * the snapshot in new_root/old_roots or it will get calculated twice
210 enum btrfs_ref_type {
217 struct btrfs_data_ref {
218 /* For EXTENT_DATA_REF */
220 /* Root which owns this data reference. */
223 /* Inode which refers to this data extent */
227 * file_offset - extent_offset
229 * file_offset is the key.offset of the EXTENT_DATA key.
230 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
235 struct btrfs_tree_ref {
237 * Level of this tree block
239 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
244 * Root which owns this tree block reference.
246 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
250 /* For non-skinny metadata, no special member needed */
254 enum btrfs_ref_type type;
255 enum btrfs_delayed_ref_action action;
258 * Whether this extent should go through qgroup record.
260 * Normally false, but for certain cases like delayed subtree scan,
261 * setting this flag can hugely reduce qgroup overhead.
265 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
266 /* Through which root is this modification. */
273 /* Bytenr of the parent tree block */
276 struct btrfs_data_ref data_ref;
277 struct btrfs_tree_ref tree_ref;
281 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
282 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
283 extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
284 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
286 int __init btrfs_delayed_ref_init(void);
287 void __cold btrfs_delayed_ref_exit(void);
289 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info,
290 int num_delayed_refs)
294 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
297 * We have to check the mount option here because we could be enabling
298 * the free space tree for the first time and don't have the compat_ro
301 * We need extra reservations if we have the free space tree because
302 * we'll have to modify that tree as well.
304 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
310 static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
314 * Deleting csum items does not result in new nodes/leaves and does not
315 * require changing the free space tree, only the csum tree, so this is
318 return btrfs_calc_metadata_size(fs_info, num_csum_items);
321 void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, int action, u64 bytenr,
322 u64 len, u64 parent, u64 owning_root);
323 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
324 u64 mod_root, bool skip_qgroup);
325 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ref_root, u64 ino,
326 u64 offset, u64 mod_root, bool skip_qgroup);
328 static inline struct btrfs_delayed_extent_op *
329 btrfs_alloc_delayed_extent_op(void)
331 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
335 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
338 kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
341 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref);
343 static inline u64 btrfs_ref_head_to_space_flags(
344 struct btrfs_delayed_ref_head *head_ref)
346 if (head_ref->is_data)
347 return BTRFS_BLOCK_GROUP_DATA;
348 else if (head_ref->is_system)
349 return BTRFS_BLOCK_GROUP_SYSTEM;
350 return BTRFS_BLOCK_GROUP_METADATA;
353 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
355 if (refcount_dec_and_test(&head->refs))
356 kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
359 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
360 struct btrfs_ref *generic_ref,
361 struct btrfs_delayed_extent_op *extent_op);
362 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
363 struct btrfs_ref *generic_ref,
365 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
366 u64 bytenr, u64 num_bytes,
367 struct btrfs_delayed_extent_op *extent_op);
368 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
369 struct btrfs_delayed_ref_root *delayed_refs,
370 struct btrfs_delayed_ref_head *head);
372 struct btrfs_delayed_ref_head *
373 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
375 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
376 struct btrfs_delayed_ref_head *head);
377 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
379 mutex_unlock(&head->mutex);
381 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
382 struct btrfs_delayed_ref_head *head);
384 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
385 struct btrfs_delayed_ref_root *delayed_refs);
387 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
389 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
390 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
391 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
392 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
393 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
394 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
395 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
396 enum btrfs_reserve_flush_enum flush);
397 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
399 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
402 * helper functions to cast a node into its container
404 static inline struct btrfs_delayed_tree_ref *
405 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
407 return container_of(node, struct btrfs_delayed_tree_ref, node);
410 static inline struct btrfs_delayed_data_ref *
411 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
413 return container_of(node, struct btrfs_delayed_data_ref, node);