1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
6 #include "free-space-cache.h"
8 enum btrfs_disk_cache_state {
16 * This describes the state of the block_group for async discard. This is due
17 * to the two pass nature of it where extent discarding is prioritized over
18 * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
19 * between lists to prevent contention for discard state variables
20 * (eg. discard_cursor).
22 enum btrfs_discard_state {
23 BTRFS_DISCARD_EXTENTS,
24 BTRFS_DISCARD_BITMAPS,
25 BTRFS_DISCARD_RESET_CURSOR,
29 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
30 * only allocate a chunk if we really need one.
32 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
33 * chunks already allocated. This is used as part of the clustering code to
34 * help make sure we have a good pool of storage to cluster in, without filling
35 * the FS with empty chunks
37 * CHUNK_ALLOC_FORCE means it must try to allocate one
39 * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
40 * find_free_extent() that also activaes the zone
42 enum btrfs_chunk_alloc_enum {
46 CHUNK_ALLOC_FORCE_FOR_EXTENT,
49 struct btrfs_caching_control {
50 struct list_head list;
52 wait_queue_head_t wait;
53 struct btrfs_work work;
54 struct btrfs_block_group *block_group;
59 /* Once caching_thread() finds this much free space, it will wake up waiters. */
60 #define CACHING_CTL_WAKE_UP SZ_2M
62 struct btrfs_block_group {
63 struct btrfs_fs_info *fs_info;
78 * If the free space extent count exceeds this number, convert the block
81 u32 bitmap_high_thresh;
84 * If the free space extent count drops below this number, convert the
85 * block group back to extents.
87 u32 bitmap_low_thresh;
90 * It is just used for the delayed data space allocation because
91 * only the data space allocation and the relative metadata update
92 * can be done cross the transaction.
94 struct rw_semaphore data_rwsem;
96 /* For raid56, this is a full stripe, without parity */
97 unsigned long full_stripe_len;
101 unsigned int has_caching_ctl:1;
102 unsigned int removed:1;
103 unsigned int to_copy:1;
104 unsigned int relocating_repair:1;
105 unsigned int chunk_item_inserted:1;
106 unsigned int zone_is_active:1;
108 int disk_cache_state;
110 /* Cache tracking stuff */
112 struct btrfs_caching_control *caching_ctl;
113 u64 last_byte_to_unpin;
115 struct btrfs_space_info *space_info;
117 /* Free space cache stuff */
118 struct btrfs_free_space_ctl *free_space_ctl;
120 /* Block group cache stuff */
121 struct rb_node cache_node;
123 /* For block groups in the same raid type */
124 struct list_head list;
129 * List of struct btrfs_free_clusters for this block group.
130 * Today it will only have one thing on it, but that may change
132 struct list_head cluster_list;
134 /* For delayed block group creation or deletion of empty block groups */
135 struct list_head bg_list;
137 /* For read-only block groups */
138 struct list_head ro_list;
141 * When non-zero it means the block group's logical address and its
142 * device extents can not be reused for future block group allocations
143 * until the counter goes down to 0. This is to prevent them from being
144 * reused while some task is still using the block group after it was
145 * deleted - we want to make sure they can only be reused for new block
146 * groups after that task is done with the deleted block group.
150 /* For discard operations */
151 struct list_head discard_list;
153 u64 discard_eligible_time;
155 enum btrfs_discard_state discard_state;
157 /* For dirty block groups */
158 struct list_head dirty_list;
159 struct list_head io_list;
161 struct btrfs_io_ctl io_ctl;
164 * Incremented when doing extent allocations and holding a read lock
165 * on the space_info's groups_sem semaphore.
166 * Decremented when an ordered extent that represents an IO against this
167 * block group's range is created (after it's added to its inode's
168 * root's list of ordered extents) or immediately after the allocation
169 * if it's a metadata extent or fallocate extent (for these cases we
170 * don't create ordered extents).
172 atomic_t reservations;
175 * Incremented while holding the spinlock *lock* by a task checking if
176 * it can perform a nocow write (incremented if the value for the *ro*
177 * field is 0). Decremented by such tasks once they create an ordered
178 * extent or before that if some error happens before reaching that step.
179 * This is to prevent races between block group relocation and nocow
180 * writes through direct IO.
182 atomic_t nocow_writers;
184 /* Lock for free space tree operations. */
185 struct mutex free_space_lock;
188 * Does the block group need to be added to the free space tree?
189 * Protected by free_space_lock.
191 int needs_free_space;
193 /* Flag indicating this block group is placed on a sequential zone */
197 * Number of extents in this block group used for swap files.
198 * All accesses protected by the spinlock 'lock'.
202 /* Record locked full stripes for RAID5/6 block group */
203 struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
206 * Allocation offset for the block group to implement sequential
207 * allocation. This is used only on a zoned filesystem.
212 u64 meta_write_pointer;
213 struct map_lookup *physical_map;
214 struct list_head active_bg_list;
215 struct work_struct zone_finish_work;
216 struct extent_buffer *last_eb;
219 static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
221 return (block_group->start + block_group->length);
224 static inline bool btrfs_is_block_group_data_only(
225 struct btrfs_block_group *block_group)
228 * In mixed mode the fragmentation is expected to be high, lowering the
229 * efficiency, so only proper data block groups are considered.
231 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
232 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
235 #ifdef CONFIG_BTRFS_DEBUG
236 static inline int btrfs_should_fragment_free_space(
237 struct btrfs_block_group *block_group)
239 struct btrfs_fs_info *fs_info = block_group->fs_info;
241 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
242 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
243 (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
244 block_group->flags & BTRFS_BLOCK_GROUP_DATA);
248 struct btrfs_block_group *btrfs_lookup_first_block_group(
249 struct btrfs_fs_info *info, u64 bytenr);
250 struct btrfs_block_group *btrfs_lookup_block_group(
251 struct btrfs_fs_info *info, u64 bytenr);
252 struct btrfs_block_group *btrfs_next_block_group(
253 struct btrfs_block_group *cache);
254 void btrfs_get_block_group(struct btrfs_block_group *cache);
255 void btrfs_put_block_group(struct btrfs_block_group *cache);
256 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
258 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
259 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
261 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
262 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
263 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
265 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
266 int btrfs_cache_block_group(struct btrfs_block_group *cache,
267 int load_cache_only);
268 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
269 struct btrfs_caching_control *btrfs_get_caching_control(
270 struct btrfs_block_group *cache);
271 u64 add_new_free_space(struct btrfs_block_group *block_group,
273 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
274 struct btrfs_fs_info *fs_info,
275 const u64 chunk_offset);
276 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
277 u64 group_start, struct extent_map *em);
278 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
279 void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
280 void btrfs_reclaim_bgs_work(struct work_struct *work);
281 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
282 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
283 int btrfs_read_block_groups(struct btrfs_fs_info *info);
284 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
285 u64 bytes_used, u64 type,
286 u64 chunk_offset, u64 size);
287 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
288 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
289 bool do_chunk_alloc);
290 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
291 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
292 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
293 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
294 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
295 u64 bytenr, u64 num_bytes, bool alloc);
296 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
297 u64 ram_bytes, u64 num_bytes, int delalloc);
298 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
299 u64 num_bytes, int delalloc);
300 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
301 enum btrfs_chunk_alloc_enum force);
302 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
303 void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
304 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
305 bool is_item_insertion);
306 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
307 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
308 int btrfs_free_block_groups(struct btrfs_fs_info *info);
309 void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
310 struct btrfs_caching_control *caching_ctl);
311 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
312 struct block_device *bdev, u64 physical, u64 **logical,
313 int *naddrs, int *stripe_len);
315 static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
317 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
320 static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
322 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
325 static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
327 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
330 static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
333 return cache->cached == BTRFS_CACHE_FINISHED ||
334 cache->cached == BTRFS_CACHE_ERROR;
337 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
338 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
340 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
341 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
343 #endif /* BTRFS_BLOCK_GROUP_H */