1 // SPDX-License-Identifier: GPL-2.0
4 #include "block-group.h"
5 #include "space-info.h"
7 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
9 atomic_inc(&cache->count);
12 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
14 if (atomic_dec_and_test(&cache->count)) {
15 WARN_ON(cache->pinned > 0);
16 WARN_ON(cache->reserved > 0);
19 * If not empty, someone is still holding mutex of
20 * full_stripe_lock, which can only be released by caller.
21 * And it will definitely cause use-after-free when caller
22 * tries to release full stripe lock.
24 * No better way to resolve, but only to warn.
26 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
27 kfree(cache->free_space_ctl);
33 * This will return the block group at or after bytenr if contains is 0, else
34 * it will return the block group that contains the bytenr
36 static struct btrfs_block_group_cache *block_group_cache_tree_search(
37 struct btrfs_fs_info *info, u64 bytenr, int contains)
39 struct btrfs_block_group_cache *cache, *ret = NULL;
43 spin_lock(&info->block_group_cache_lock);
44 n = info->block_group_cache_tree.rb_node;
47 cache = rb_entry(n, struct btrfs_block_group_cache,
49 end = cache->key.objectid + cache->key.offset - 1;
50 start = cache->key.objectid;
53 if (!contains && (!ret || start < ret->key.objectid))
56 } else if (bytenr > start) {
57 if (contains && bytenr <= end) {
68 btrfs_get_block_group(ret);
69 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
70 info->first_logical_byte = ret->key.objectid;
72 spin_unlock(&info->block_group_cache_lock);
78 * Return the block group that starts at or after bytenr
80 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
81 struct btrfs_fs_info *info, u64 bytenr)
83 return block_group_cache_tree_search(info, bytenr, 0);
87 * Return the block group that contains the given bytenr
89 struct btrfs_block_group_cache *btrfs_lookup_block_group(
90 struct btrfs_fs_info *info, u64 bytenr)
92 return block_group_cache_tree_search(info, bytenr, 1);
95 struct btrfs_block_group_cache *btrfs_next_block_group(
96 struct btrfs_block_group_cache *cache)
98 struct btrfs_fs_info *fs_info = cache->fs_info;
101 spin_lock(&fs_info->block_group_cache_lock);
103 /* If our block group was removed, we need a full search. */
104 if (RB_EMPTY_NODE(&cache->cache_node)) {
105 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
107 spin_unlock(&fs_info->block_group_cache_lock);
108 btrfs_put_block_group(cache);
109 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
111 node = rb_next(&cache->cache_node);
112 btrfs_put_block_group(cache);
114 cache = rb_entry(node, struct btrfs_block_group_cache,
116 btrfs_get_block_group(cache);
119 spin_unlock(&fs_info->block_group_cache_lock);
123 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
125 struct btrfs_block_group_cache *bg;
128 bg = btrfs_lookup_block_group(fs_info, bytenr);
132 spin_lock(&bg->lock);
136 atomic_inc(&bg->nocow_writers);
137 spin_unlock(&bg->lock);
139 /* No put on block group, done by btrfs_dec_nocow_writers */
141 btrfs_put_block_group(bg);
146 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
148 struct btrfs_block_group_cache *bg;
150 bg = btrfs_lookup_block_group(fs_info, bytenr);
152 if (atomic_dec_and_test(&bg->nocow_writers))
153 wake_up_var(&bg->nocow_writers);
155 * Once for our lookup and once for the lookup done by a previous call
156 * to btrfs_inc_nocow_writers()
158 btrfs_put_block_group(bg);
159 btrfs_put_block_group(bg);
162 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
164 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
167 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
170 struct btrfs_block_group_cache *bg;
172 bg = btrfs_lookup_block_group(fs_info, start);
174 if (atomic_dec_and_test(&bg->reservations))
175 wake_up_var(&bg->reservations);
176 btrfs_put_block_group(bg);
179 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
181 struct btrfs_space_info *space_info = bg->space_info;
185 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
189 * Our block group is read only but before we set it to read only,
190 * some task might have had allocated an extent from it already, but it
191 * has not yet created a respective ordered extent (and added it to a
192 * root's list of ordered extents).
193 * Therefore wait for any task currently allocating extents, since the
194 * block group's reservations counter is incremented while a read lock
195 * on the groups' semaphore is held and decremented after releasing
196 * the read access on that semaphore and creating the ordered extent.
198 down_write(&space_info->groups_sem);
199 up_write(&space_info->groups_sem);
201 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));