1f3afa0b42ba10e852a009be55bb0a9fb149fbf8
[linux-2.6-microblaze.git] / fs / btrfs / block-group.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "ctree.h"
4 #include "block-group.h"
5 #include "space-info.h"
6
7 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
8 {
9         atomic_inc(&cache->count);
10 }
11
12 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
13 {
14         if (atomic_dec_and_test(&cache->count)) {
15                 WARN_ON(cache->pinned > 0);
16                 WARN_ON(cache->reserved > 0);
17
18                 /*
19                  * If not empty, someone is still holding mutex of
20                  * full_stripe_lock, which can only be released by caller.
21                  * And it will definitely cause use-after-free when caller
22                  * tries to release full stripe lock.
23                  *
24                  * No better way to resolve, but only to warn.
25                  */
26                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
27                 kfree(cache->free_space_ctl);
28                 kfree(cache);
29         }
30 }
31
32 /*
33  * This will return the block group at or after bytenr if contains is 0, else
34  * it will return the block group that contains the bytenr
35  */
36 static struct btrfs_block_group_cache *block_group_cache_tree_search(
37                 struct btrfs_fs_info *info, u64 bytenr, int contains)
38 {
39         struct btrfs_block_group_cache *cache, *ret = NULL;
40         struct rb_node *n;
41         u64 end, start;
42
43         spin_lock(&info->block_group_cache_lock);
44         n = info->block_group_cache_tree.rb_node;
45
46         while (n) {
47                 cache = rb_entry(n, struct btrfs_block_group_cache,
48                                  cache_node);
49                 end = cache->key.objectid + cache->key.offset - 1;
50                 start = cache->key.objectid;
51
52                 if (bytenr < start) {
53                         if (!contains && (!ret || start < ret->key.objectid))
54                                 ret = cache;
55                         n = n->rb_left;
56                 } else if (bytenr > start) {
57                         if (contains && bytenr <= end) {
58                                 ret = cache;
59                                 break;
60                         }
61                         n = n->rb_right;
62                 } else {
63                         ret = cache;
64                         break;
65                 }
66         }
67         if (ret) {
68                 btrfs_get_block_group(ret);
69                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
70                         info->first_logical_byte = ret->key.objectid;
71         }
72         spin_unlock(&info->block_group_cache_lock);
73
74         return ret;
75 }
76
77 /*
78  * Return the block group that starts at or after bytenr
79  */
80 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
81                 struct btrfs_fs_info *info, u64 bytenr)
82 {
83         return block_group_cache_tree_search(info, bytenr, 0);
84 }
85
86 /*
87  * Return the block group that contains the given bytenr
88  */
89 struct btrfs_block_group_cache *btrfs_lookup_block_group(
90                 struct btrfs_fs_info *info, u64 bytenr)
91 {
92         return block_group_cache_tree_search(info, bytenr, 1);
93 }
94
95 struct btrfs_block_group_cache *btrfs_next_block_group(
96                 struct btrfs_block_group_cache *cache)
97 {
98         struct btrfs_fs_info *fs_info = cache->fs_info;
99         struct rb_node *node;
100
101         spin_lock(&fs_info->block_group_cache_lock);
102
103         /* If our block group was removed, we need a full search. */
104         if (RB_EMPTY_NODE(&cache->cache_node)) {
105                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
106
107                 spin_unlock(&fs_info->block_group_cache_lock);
108                 btrfs_put_block_group(cache);
109                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
110         }
111         node = rb_next(&cache->cache_node);
112         btrfs_put_block_group(cache);
113         if (node) {
114                 cache = rb_entry(node, struct btrfs_block_group_cache,
115                                  cache_node);
116                 btrfs_get_block_group(cache);
117         } else
118                 cache = NULL;
119         spin_unlock(&fs_info->block_group_cache_lock);
120         return cache;
121 }
122
123 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
124 {
125         struct btrfs_block_group_cache *bg;
126         bool ret = true;
127
128         bg = btrfs_lookup_block_group(fs_info, bytenr);
129         if (!bg)
130                 return false;
131
132         spin_lock(&bg->lock);
133         if (bg->ro)
134                 ret = false;
135         else
136                 atomic_inc(&bg->nocow_writers);
137         spin_unlock(&bg->lock);
138
139         /* No put on block group, done by btrfs_dec_nocow_writers */
140         if (!ret)
141                 btrfs_put_block_group(bg);
142
143         return ret;
144 }
145
146 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
147 {
148         struct btrfs_block_group_cache *bg;
149
150         bg = btrfs_lookup_block_group(fs_info, bytenr);
151         ASSERT(bg);
152         if (atomic_dec_and_test(&bg->nocow_writers))
153                 wake_up_var(&bg->nocow_writers);
154         /*
155          * Once for our lookup and once for the lookup done by a previous call
156          * to btrfs_inc_nocow_writers()
157          */
158         btrfs_put_block_group(bg);
159         btrfs_put_block_group(bg);
160 }
161
162 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
163 {
164         wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
165 }
166
167 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
168                                         const u64 start)
169 {
170         struct btrfs_block_group_cache *bg;
171
172         bg = btrfs_lookup_block_group(fs_info, start);
173         ASSERT(bg);
174         if (atomic_dec_and_test(&bg->reservations))
175                 wake_up_var(&bg->reservations);
176         btrfs_put_block_group(bg);
177 }
178
179 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
180 {
181         struct btrfs_space_info *space_info = bg->space_info;
182
183         ASSERT(bg->ro);
184
185         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
186                 return;
187
188         /*
189          * Our block group is read only but before we set it to read only,
190          * some task might have had allocated an extent from it already, but it
191          * has not yet created a respective ordered extent (and added it to a
192          * root's list of ordered extents).
193          * Therefore wait for any task currently allocating extents, since the
194          * block group's reservations counter is incremented while a read lock
195          * on the groups' semaphore is held and decremented after releasing
196          * the read access on that semaphore and creating the ordered extent.
197          */
198         down_write(&space_info->groups_sem);
199         up_write(&space_info->groups_sem);
200
201         wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
202 }