1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
12 #include "rcu-string.h"
14 #include "block-group.h"
15 #include "dev-replace.h"
16 #include "space-info.h"
18 #include "accessors.h"
21 /* Maximum number of zones to report per blkdev_report_zones() call */
22 #define BTRFS_REPORT_NR_ZONES 4096
23 /* Invalid allocation pointer value for missing devices */
24 #define WP_MISSING_DEV ((u64)-1)
25 /* Pseudo write pointer value for conventional zone */
26 #define WP_CONVENTIONAL ((u64)-2)
29 * Location of the first zone of superblock logging zone pairs.
31 * - primary superblock: 0B (zone 0)
32 * - first copy: 512G (zone starting at that offset)
33 * - second copy: 4T (zone starting at that offset)
35 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
36 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
37 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
39 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
40 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
42 /* Number of superblock log zones */
43 #define BTRFS_NR_SB_LOG_ZONES 2
46 * Minimum of active zones we need:
48 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
49 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
50 * - 1 zone for tree-log dedicated block group
51 * - 1 zone for relocation
53 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
56 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
57 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
58 * We do not expect the zone size to become larger than 8GiB or smaller than
59 * 4MiB in the near future.
61 #define BTRFS_MAX_ZONE_SIZE SZ_8G
62 #define BTRFS_MIN_ZONE_SIZE SZ_4M
64 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
66 static void wait_eb_writebacks(struct btrfs_block_group *block_group);
67 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
69 static inline bool sb_zone_is_full(const struct blk_zone *zone)
71 return (zone->cond == BLK_ZONE_COND_FULL) ||
72 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
75 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
77 struct blk_zone *zones = data;
79 memcpy(&zones[idx], zone, sizeof(*zone));
84 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
87 bool empty[BTRFS_NR_SB_LOG_ZONES];
88 bool full[BTRFS_NR_SB_LOG_ZONES];
92 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
93 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
94 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
95 full[i] = sb_zone_is_full(&zones[i]);
99 * Possible states of log buffer zones
101 * Empty[0] In use[0] Full[0]
107 * *: Special case, no superblock is written
108 * 0: Use write pointer of zones[0]
109 * 1: Use write pointer of zones[1]
110 * C: Compare super blocks from zones[0] and zones[1], use the latest
111 * one determined by generation
115 if (empty[0] && empty[1]) {
116 /* Special case to distinguish no superblock to read */
117 *wp_ret = zones[0].start << SECTOR_SHIFT;
119 } else if (full[0] && full[1]) {
120 /* Compare two super blocks */
121 struct address_space *mapping = bdev->bd_inode->i_mapping;
122 struct page *page[BTRFS_NR_SB_LOG_ZONES];
123 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
126 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
127 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
128 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
129 BTRFS_SUPER_INFO_SIZE;
131 page[i] = read_cache_page_gfp(mapping,
132 bytenr >> PAGE_SHIFT, GFP_NOFS);
133 if (IS_ERR(page[i])) {
135 btrfs_release_disk_super(super[0]);
136 return PTR_ERR(page[i]);
138 super[i] = page_address(page[i]);
141 if (btrfs_super_generation(super[0]) >
142 btrfs_super_generation(super[1]))
143 sector = zones[1].start;
145 sector = zones[0].start;
147 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
148 btrfs_release_disk_super(super[i]);
149 } else if (!full[0] && (empty[1] || full[1])) {
150 sector = zones[0].wp;
151 } else if (full[0]) {
152 sector = zones[1].wp;
156 *wp_ret = sector << SECTOR_SHIFT;
161 * Get the first zone number of the superblock mirror
163 static inline u32 sb_zone_number(int shift, int mirror)
167 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
169 case 0: zone = 0; break;
170 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
171 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
174 ASSERT(zone <= U32_MAX);
179 static inline sector_t zone_start_sector(u32 zone_number,
180 struct block_device *bdev)
182 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
185 static inline u64 zone_start_physical(u32 zone_number,
186 struct btrfs_zoned_device_info *zone_info)
188 return (u64)zone_number << zone_info->zone_size_shift;
192 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
193 * device into static sized chunks and fake a conventional zone on each of
196 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
197 struct blk_zone *zones, unsigned int nr_zones)
199 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
200 sector_t bdev_size = bdev_nr_sectors(device->bdev);
203 pos >>= SECTOR_SHIFT;
204 for (i = 0; i < nr_zones; i++) {
205 zones[i].start = i * zone_sectors + pos;
206 zones[i].len = zone_sectors;
207 zones[i].capacity = zone_sectors;
208 zones[i].wp = zones[i].start + zone_sectors;
209 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
210 zones[i].cond = BLK_ZONE_COND_NOT_WP;
212 if (zones[i].wp >= bdev_size) {
221 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
222 struct blk_zone *zones, unsigned int *nr_zones)
224 struct btrfs_zoned_device_info *zinfo = device->zone_info;
230 if (!bdev_is_zoned(device->bdev)) {
231 ret = emulate_report_zones(device, pos, zones, *nr_zones);
237 if (zinfo->zone_cache) {
241 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
242 zno = pos >> zinfo->zone_size_shift;
244 * We cannot report zones beyond the zone end. So, it is OK to
245 * cap *nr_zones to at the end.
247 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
249 for (i = 0; i < *nr_zones; i++) {
250 struct blk_zone *zone_info;
252 zone_info = &zinfo->zone_cache[zno + i];
257 if (i == *nr_zones) {
258 /* Cache hit on all the zones */
259 memcpy(zones, zinfo->zone_cache + zno,
260 sizeof(*zinfo->zone_cache) * *nr_zones);
265 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
266 copy_zone_info_cb, zones);
268 btrfs_err_in_rcu(device->fs_info,
269 "zoned: failed to read zone %llu on %s (devid %llu)",
270 pos, rcu_str_deref(device->name),
279 if (zinfo->zone_cache) {
280 u32 zno = pos >> zinfo->zone_size_shift;
282 memcpy(zinfo->zone_cache + zno, zones,
283 sizeof(*zinfo->zone_cache) * *nr_zones);
289 /* The emulated zone size is determined from the size of device extent */
290 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
292 struct btrfs_path *path;
293 struct btrfs_root *root = fs_info->dev_root;
294 struct btrfs_key key;
295 struct extent_buffer *leaf;
296 struct btrfs_dev_extent *dext;
300 key.type = BTRFS_DEV_EXTENT_KEY;
303 path = btrfs_alloc_path();
307 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
311 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
312 ret = btrfs_next_leaf(root, path);
315 /* No dev extents at all? Not good */
322 leaf = path->nodes[0];
323 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
324 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
328 btrfs_free_path(path);
333 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
335 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
336 struct btrfs_device *device;
339 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
340 if (!btrfs_fs_incompat(fs_info, ZONED))
343 mutex_lock(&fs_devices->device_list_mutex);
344 list_for_each_entry(device, &fs_devices->devices, dev_list) {
345 /* We can skip reading of zone info for missing devices */
349 ret = btrfs_get_dev_zone_info(device, true);
353 mutex_unlock(&fs_devices->device_list_mutex);
358 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
360 struct btrfs_fs_info *fs_info = device->fs_info;
361 struct btrfs_zoned_device_info *zone_info = NULL;
362 struct block_device *bdev = device->bdev;
363 unsigned int max_active_zones;
364 unsigned int nactive;
367 struct blk_zone *zones = NULL;
368 unsigned int i, nreported = 0, nr_zones;
369 sector_t zone_sectors;
370 char *model, *emulated;
374 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
377 if (!btrfs_fs_incompat(fs_info, ZONED))
380 if (device->zone_info)
383 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
387 device->zone_info = zone_info;
389 if (!bdev_is_zoned(bdev)) {
390 if (!fs_info->zone_size) {
391 ret = calculate_emulated_zone_size(fs_info);
396 ASSERT(fs_info->zone_size);
397 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
399 zone_sectors = bdev_zone_sectors(bdev);
402 ASSERT(is_power_of_two_u64(zone_sectors));
403 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
405 /* We reject devices with a zone size larger than 8GB */
406 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
407 btrfs_err_in_rcu(fs_info,
408 "zoned: %s: zone size %llu larger than supported maximum %llu",
409 rcu_str_deref(device->name),
410 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
413 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
414 btrfs_err_in_rcu(fs_info,
415 "zoned: %s: zone size %llu smaller than supported minimum %u",
416 rcu_str_deref(device->name),
417 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
422 nr_sectors = bdev_nr_sectors(bdev);
423 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
424 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
425 if (!IS_ALIGNED(nr_sectors, zone_sectors))
426 zone_info->nr_zones++;
428 max_active_zones = bdev_max_active_zones(bdev);
429 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
430 btrfs_err_in_rcu(fs_info,
431 "zoned: %s: max active zones %u is too small, need at least %u active zones",
432 rcu_str_deref(device->name), max_active_zones,
433 BTRFS_MIN_ACTIVE_ZONES);
437 zone_info->max_active_zones = max_active_zones;
439 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
440 if (!zone_info->seq_zones) {
445 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
446 if (!zone_info->empty_zones) {
451 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
452 if (!zone_info->active_zones) {
457 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
464 * Enable zone cache only for a zoned device. On a non-zoned device, we
465 * fill the zone info with emulated CONVENTIONAL zones, so no need to
468 if (populate_cache && bdev_is_zoned(device->bdev)) {
469 zone_info->zone_cache = vcalloc(zone_info->nr_zones,
470 sizeof(struct blk_zone));
471 if (!zone_info->zone_cache) {
472 btrfs_err_in_rcu(device->fs_info,
473 "zoned: failed to allocate zone cache for %s",
474 rcu_str_deref(device->name));
482 while (sector < nr_sectors) {
483 nr_zones = BTRFS_REPORT_NR_ZONES;
484 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
489 for (i = 0; i < nr_zones; i++) {
490 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
491 __set_bit(nreported, zone_info->seq_zones);
492 switch (zones[i].cond) {
493 case BLK_ZONE_COND_EMPTY:
494 __set_bit(nreported, zone_info->empty_zones);
496 case BLK_ZONE_COND_IMP_OPEN:
497 case BLK_ZONE_COND_EXP_OPEN:
498 case BLK_ZONE_COND_CLOSED:
499 __set_bit(nreported, zone_info->active_zones);
505 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
508 if (nreported != zone_info->nr_zones) {
509 btrfs_err_in_rcu(device->fs_info,
510 "inconsistent number of zones on %s (%u/%u)",
511 rcu_str_deref(device->name), nreported,
512 zone_info->nr_zones);
517 if (max_active_zones) {
518 if (nactive > max_active_zones) {
519 btrfs_err_in_rcu(device->fs_info,
520 "zoned: %u active zones on %s exceeds max_active_zones %u",
521 nactive, rcu_str_deref(device->name),
526 atomic_set(&zone_info->active_zones_left,
527 max_active_zones - nactive);
528 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
531 /* Validate superblock log */
532 nr_zones = BTRFS_NR_SB_LOG_ZONES;
533 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
536 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
538 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
539 if (sb_zone + 1 >= zone_info->nr_zones)
542 ret = btrfs_get_dev_zones(device,
543 zone_start_physical(sb_zone, zone_info),
544 &zone_info->sb_zones[sb_pos],
549 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
550 btrfs_err_in_rcu(device->fs_info,
551 "zoned: failed to read super block log zone info at devid %llu zone %u",
552 device->devid, sb_zone);
558 * If zones[0] is conventional, always use the beginning of the
559 * zone to record superblock. No need to validate in that case.
561 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
562 BLK_ZONE_TYPE_CONVENTIONAL)
565 ret = sb_write_pointer(device->bdev,
566 &zone_info->sb_zones[sb_pos], &sb_wp);
567 if (ret != -ENOENT && ret) {
568 btrfs_err_in_rcu(device->fs_info,
569 "zoned: super block log zone corrupted devid %llu zone %u",
570 device->devid, sb_zone);
579 if (bdev_is_zoned(bdev)) {
580 model = "host-managed zoned";
584 emulated = "emulated ";
587 btrfs_info_in_rcu(fs_info,
588 "%s block device %s, %u %szones of %llu bytes",
589 model, rcu_str_deref(device->name), zone_info->nr_zones,
590 emulated, zone_info->zone_size);
596 btrfs_destroy_dev_zone_info(device);
600 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
602 struct btrfs_zoned_device_info *zone_info = device->zone_info;
607 bitmap_free(zone_info->active_zones);
608 bitmap_free(zone_info->seq_zones);
609 bitmap_free(zone_info->empty_zones);
610 vfree(zone_info->zone_cache);
612 device->zone_info = NULL;
615 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
617 struct btrfs_zoned_device_info *zone_info;
619 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
623 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
624 if (!zone_info->seq_zones)
627 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
628 zone_info->nr_zones);
630 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
631 if (!zone_info->empty_zones)
634 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
635 zone_info->nr_zones);
637 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
638 if (!zone_info->active_zones)
641 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
642 zone_info->nr_zones);
643 zone_info->zone_cache = NULL;
648 bitmap_free(zone_info->seq_zones);
649 bitmap_free(zone_info->empty_zones);
650 bitmap_free(zone_info->active_zones);
655 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
656 struct blk_zone *zone)
658 unsigned int nr_zones = 1;
661 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
662 if (ret != 0 || !nr_zones)
663 return ret ? ret : -EIO;
668 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
670 struct btrfs_device *device;
672 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
673 if (device->bdev && bdev_is_zoned(device->bdev)) {
675 "zoned: mode not enabled but zoned device found: %pg",
684 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
686 struct queue_limits *lim = &fs_info->limits;
687 struct btrfs_device *device;
692 * Host-Managed devices can't be used without the ZONED flag. With the
693 * ZONED all devices can be used, using zone emulation if required.
695 if (!btrfs_fs_incompat(fs_info, ZONED))
696 return btrfs_check_for_zoned_device(fs_info);
698 blk_set_stacking_limits(lim);
700 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
701 struct btrfs_zoned_device_info *zone_info = device->zone_info;
707 zone_size = zone_info->zone_size;
708 } else if (zone_info->zone_size != zone_size) {
710 "zoned: unequal block device zone sizes: have %llu found %llu",
711 zone_info->zone_size, zone_size);
716 * With the zoned emulation, we can have non-zoned device on the
717 * zoned mode. In this case, we don't have a valid max zone
720 if (bdev_is_zoned(device->bdev)) {
721 blk_stack_limits(lim,
722 &bdev_get_queue(device->bdev)->limits,
728 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
729 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
730 * check the alignment here.
732 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
734 "zoned: zone size %llu not aligned to stripe %u",
735 zone_size, BTRFS_STRIPE_LEN);
739 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
740 btrfs_err(fs_info, "zoned: mixed block groups not supported");
744 fs_info->zone_size = zone_size;
746 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
747 * Technically, we can have multiple pages per segment. But, since
748 * we add the pages one by one to a bio, and cannot increase the
749 * metadata reservation even if it increases the number of extents, it
750 * is safe to stick with the limit.
752 fs_info->max_zone_append_size = ALIGN_DOWN(
753 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
754 (u64)lim->max_sectors << SECTOR_SHIFT,
755 (u64)lim->max_segments << PAGE_SHIFT),
756 fs_info->sectorsize);
757 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
758 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
759 fs_info->max_extent_size = fs_info->max_zone_append_size;
762 * Check mount options here, because we might change fs_info->zoned
763 * from fs_info->zone_size.
765 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
769 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
773 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt)
775 if (!btrfs_is_zoned(info))
779 * Space cache writing is not COWed. Disable that to avoid write errors
780 * in sequential zones.
782 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
783 btrfs_err(info, "zoned: space cache v1 is not supported");
787 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
788 btrfs_err(info, "zoned: NODATACOW not supported");
792 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
794 "zoned: async discard ignored and disabled for zoned mode");
795 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
801 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
802 int rw, u64 *bytenr_ret)
807 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
808 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
812 ret = sb_write_pointer(bdev, zones, &wp);
813 if (ret != -ENOENT && ret < 0)
817 struct blk_zone *reset = NULL;
819 if (wp == zones[0].start << SECTOR_SHIFT)
821 else if (wp == zones[1].start << SECTOR_SHIFT)
824 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
825 unsigned int nofs_flags;
827 ASSERT(sb_zone_is_full(reset));
829 nofs_flags = memalloc_nofs_save();
830 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
831 reset->start, reset->len);
832 memalloc_nofs_restore(nofs_flags);
836 reset->cond = BLK_ZONE_COND_EMPTY;
837 reset->wp = reset->start;
839 } else if (ret != -ENOENT) {
841 * For READ, we want the previous one. Move write pointer to
842 * the end of a zone, if it is at the head of a zone.
846 if (wp == zones[0].start << SECTOR_SHIFT)
847 zone_end = zones[1].start + zones[1].capacity;
848 else if (wp == zones[1].start << SECTOR_SHIFT)
849 zone_end = zones[0].start + zones[0].capacity;
851 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
852 BTRFS_SUPER_INFO_SIZE);
854 wp -= BTRFS_SUPER_INFO_SIZE;
862 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
865 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
866 sector_t zone_sectors;
869 u8 zone_sectors_shift;
873 if (!bdev_is_zoned(bdev)) {
874 *bytenr_ret = btrfs_sb_offset(mirror);
878 ASSERT(rw == READ || rw == WRITE);
880 zone_sectors = bdev_zone_sectors(bdev);
881 if (!is_power_of_2(zone_sectors))
883 zone_sectors_shift = ilog2(zone_sectors);
884 nr_sectors = bdev_nr_sectors(bdev);
885 nr_zones = nr_sectors >> zone_sectors_shift;
887 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
888 if (sb_zone + 1 >= nr_zones)
891 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
892 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
896 if (ret != BTRFS_NR_SB_LOG_ZONES)
899 return sb_log_location(bdev, zones, rw, bytenr_ret);
902 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
905 struct btrfs_zoned_device_info *zinfo = device->zone_info;
909 * For a zoned filesystem on a non-zoned block device, use the same
910 * super block locations as regular filesystem. Doing so, the super
911 * block can always be retrieved and the zoned flag of the volume
912 * detected from the super block information.
914 if (!bdev_is_zoned(device->bdev)) {
915 *bytenr_ret = btrfs_sb_offset(mirror);
919 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
920 if (zone_num + 1 >= zinfo->nr_zones)
923 return sb_log_location(device->bdev,
924 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
928 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
936 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
937 if (zone_num + 1 >= zinfo->nr_zones)
940 if (!test_bit(zone_num, zinfo->seq_zones))
946 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
948 struct btrfs_zoned_device_info *zinfo = device->zone_info;
949 struct blk_zone *zone;
952 if (!is_sb_log_zone(zinfo, mirror))
955 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
956 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
957 /* Advance the next zone */
958 if (zone->cond == BLK_ZONE_COND_FULL) {
963 if (zone->cond == BLK_ZONE_COND_EMPTY)
964 zone->cond = BLK_ZONE_COND_IMP_OPEN;
966 zone->wp += SUPER_INFO_SECTORS;
968 if (sb_zone_is_full(zone)) {
970 * No room left to write new superblock. Since
971 * superblock is written with REQ_SYNC, it is safe to
972 * finish the zone now.
974 * If the write pointer is exactly at the capacity,
975 * explicit ZONE_FINISH is not necessary.
977 if (zone->wp != zone->start + zone->capacity) {
978 unsigned int nofs_flags;
981 nofs_flags = memalloc_nofs_save();
982 ret = blkdev_zone_mgmt(device->bdev,
983 REQ_OP_ZONE_FINISH, zone->start,
985 memalloc_nofs_restore(nofs_flags);
990 zone->wp = zone->start + zone->len;
991 zone->cond = BLK_ZONE_COND_FULL;
996 /* All the zones are FULL. Should not reach here. */
1001 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1003 unsigned int nofs_flags;
1004 sector_t zone_sectors;
1005 sector_t nr_sectors;
1006 u8 zone_sectors_shift;
1011 zone_sectors = bdev_zone_sectors(bdev);
1012 zone_sectors_shift = ilog2(zone_sectors);
1013 nr_sectors = bdev_nr_sectors(bdev);
1014 nr_zones = nr_sectors >> zone_sectors_shift;
1016 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1017 if (sb_zone + 1 >= nr_zones)
1020 nofs_flags = memalloc_nofs_save();
1021 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1022 zone_start_sector(sb_zone, bdev),
1023 zone_sectors * BTRFS_NR_SB_LOG_ZONES);
1024 memalloc_nofs_restore(nofs_flags);
1029 * Find allocatable zones within a given region.
1031 * @device: the device to allocate a region on
1032 * @hole_start: the position of the hole to allocate the region
1033 * @num_bytes: size of wanted region
1034 * @hole_end: the end of the hole
1035 * @return: position of allocatable zones
1037 * Allocatable region should not contain any superblock locations.
1039 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1040 u64 hole_end, u64 num_bytes)
1042 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1043 const u8 shift = zinfo->zone_size_shift;
1044 u64 nzones = num_bytes >> shift;
1045 u64 pos = hole_start;
1050 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1051 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1053 while (pos < hole_end) {
1054 begin = pos >> shift;
1055 end = begin + nzones;
1057 if (end > zinfo->nr_zones)
1060 /* Check if zones in the region are all empty */
1061 if (btrfs_dev_is_sequential(device, pos) &&
1062 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1063 pos += zinfo->zone_size;
1068 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1072 sb_zone = sb_zone_number(shift, i);
1073 if (!(end <= sb_zone ||
1074 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1076 pos = zone_start_physical(
1077 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1081 /* We also need to exclude regular superblock positions */
1082 sb_pos = btrfs_sb_offset(i);
1083 if (!(pos + num_bytes <= sb_pos ||
1084 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1086 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1098 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1100 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1101 unsigned int zno = (pos >> zone_info->zone_size_shift);
1103 /* We can use any number of zones */
1104 if (zone_info->max_active_zones == 0)
1107 if (!test_bit(zno, zone_info->active_zones)) {
1108 /* Active zone left? */
1109 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1111 if (test_and_set_bit(zno, zone_info->active_zones)) {
1112 /* Someone already set the bit */
1113 atomic_inc(&zone_info->active_zones_left);
1120 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1122 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1123 unsigned int zno = (pos >> zone_info->zone_size_shift);
1125 /* We can use any number of zones */
1126 if (zone_info->max_active_zones == 0)
1129 if (test_and_clear_bit(zno, zone_info->active_zones))
1130 atomic_inc(&zone_info->active_zones_left);
1133 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1134 u64 length, u64 *bytes)
1136 unsigned int nofs_flags;
1140 nofs_flags = memalloc_nofs_save();
1141 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1142 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
1143 memalloc_nofs_restore(nofs_flags);
1149 btrfs_dev_set_zone_empty(device, physical);
1150 btrfs_dev_clear_active_zone(device, physical);
1151 physical += device->zone_info->zone_size;
1152 length -= device->zone_info->zone_size;
1158 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1160 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1161 const u8 shift = zinfo->zone_size_shift;
1162 unsigned long begin = start >> shift;
1163 unsigned long nbits = size >> shift;
1167 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1168 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1170 if (begin + nbits > zinfo->nr_zones)
1173 /* All the zones are conventional */
1174 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1177 /* All the zones are sequential and empty */
1178 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1179 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1182 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1185 if (!btrfs_dev_is_sequential(device, pos) ||
1186 btrfs_dev_is_empty_zone(device, pos))
1189 /* Free regions should be empty */
1192 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1193 rcu_str_deref(device->name), device->devid, pos >> shift);
1196 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1206 * Calculate an allocation pointer from the extent allocation information
1207 * for a block group consist of conventional zones. It is pointed to the
1208 * end of the highest addressed extent in the block group as an allocation
1211 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1212 u64 *offset_ret, bool new)
1214 struct btrfs_fs_info *fs_info = cache->fs_info;
1215 struct btrfs_root *root;
1216 struct btrfs_path *path;
1217 struct btrfs_key key;
1218 struct btrfs_key found_key;
1223 * Avoid tree lookups for a new block group, there's no use for it.
1224 * It must always be 0.
1226 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1227 * For new a block group, this function is called from
1228 * btrfs_make_block_group() which is already taking the chunk mutex.
1229 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1230 * buffer locks to avoid deadlock.
1237 path = btrfs_alloc_path();
1241 key.objectid = cache->start + cache->length;
1245 root = btrfs_extent_root(fs_info, key.objectid);
1246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1247 /* We should not find the exact match */
1253 ret = btrfs_previous_extent_item(root, path, cache->start);
1262 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1264 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1265 length = found_key.offset;
1267 length = fs_info->nodesize;
1269 if (!(found_key.objectid >= cache->start &&
1270 found_key.objectid + length <= cache->start + cache->length)) {
1274 *offset_ret = found_key.objectid + length - cache->start;
1278 btrfs_free_path(path);
1288 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1289 struct zone_info *info, unsigned long *active,
1290 struct btrfs_chunk_map *map)
1292 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1293 struct btrfs_device *device = map->stripes[zone_idx].dev;
1294 int dev_replace_is_ongoing = 0;
1295 unsigned int nofs_flag;
1296 struct blk_zone zone;
1299 info->physical = map->stripes[zone_idx].physical;
1301 if (!device->bdev) {
1302 info->alloc_offset = WP_MISSING_DEV;
1306 /* Consider a zone as active if we can allow any number of active zones. */
1307 if (!device->zone_info->max_active_zones)
1308 __set_bit(zone_idx, active);
1310 if (!btrfs_dev_is_sequential(device, info->physical)) {
1311 info->alloc_offset = WP_CONVENTIONAL;
1315 /* This zone will be used for allocation, so mark this zone non-empty. */
1316 btrfs_dev_clear_zone_empty(device, info->physical);
1318 down_read(&dev_replace->rwsem);
1319 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1320 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1321 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1322 up_read(&dev_replace->rwsem);
1325 * The group is mapped to a sequential zone. Get the zone write pointer
1326 * to determine the allocation offset within the zone.
1328 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1329 nofs_flag = memalloc_nofs_save();
1330 ret = btrfs_get_dev_zone(device, info->physical, &zone);
1331 memalloc_nofs_restore(nofs_flag);
1333 if (ret != -EIO && ret != -EOPNOTSUPP)
1335 info->alloc_offset = WP_MISSING_DEV;
1339 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1340 btrfs_err_in_rcu(fs_info,
1341 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1342 zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
1347 info->capacity = (zone.capacity << SECTOR_SHIFT);
1349 switch (zone.cond) {
1350 case BLK_ZONE_COND_OFFLINE:
1351 case BLK_ZONE_COND_READONLY:
1353 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1354 (info->physical >> device->zone_info->zone_size_shift),
1355 rcu_str_deref(device->name), device->devid);
1356 info->alloc_offset = WP_MISSING_DEV;
1358 case BLK_ZONE_COND_EMPTY:
1359 info->alloc_offset = 0;
1361 case BLK_ZONE_COND_FULL:
1362 info->alloc_offset = info->capacity;
1365 /* Partially used zone. */
1366 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1367 __set_bit(zone_idx, active);
1374 static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1375 struct zone_info *info,
1376 unsigned long *active)
1378 if (info->alloc_offset == WP_MISSING_DEV) {
1379 btrfs_err(bg->fs_info,
1380 "zoned: cannot recover write pointer for zone %llu",
1385 bg->alloc_offset = info->alloc_offset;
1386 bg->zone_capacity = info->capacity;
1387 if (test_bit(0, active))
1388 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1392 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1393 struct btrfs_chunk_map *map,
1394 struct zone_info *zone_info,
1395 unsigned long *active)
1397 struct btrfs_fs_info *fs_info = bg->fs_info;
1399 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1400 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1404 if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
1405 btrfs_err(bg->fs_info,
1406 "zoned: cannot recover write pointer for zone %llu",
1407 zone_info[0].physical);
1410 if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
1411 btrfs_err(bg->fs_info,
1412 "zoned: cannot recover write pointer for zone %llu",
1413 zone_info[1].physical);
1416 if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
1417 btrfs_err(bg->fs_info,
1418 "zoned: write pointer offset mismatch of zones in DUP profile");
1422 if (test_bit(0, active) != test_bit(1, active)) {
1423 if (!btrfs_zone_activate(bg))
1425 } else if (test_bit(0, active)) {
1426 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1429 bg->alloc_offset = zone_info[0].alloc_offset;
1430 bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
1434 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1435 struct btrfs_chunk_map *map,
1436 struct zone_info *zone_info,
1437 unsigned long *active)
1439 struct btrfs_fs_info *fs_info = bg->fs_info;
1442 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1443 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1444 btrfs_bg_type_to_raid_name(map->type));
1448 for (i = 0; i < map->num_stripes; i++) {
1449 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1450 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1453 if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1454 !btrfs_test_opt(fs_info, DEGRADED)) {
1456 "zoned: write pointer offset mismatch of zones in %s profile",
1457 btrfs_bg_type_to_raid_name(map->type));
1460 if (test_bit(0, active) != test_bit(i, active)) {
1461 if (!btrfs_test_opt(fs_info, DEGRADED) &&
1462 !btrfs_zone_activate(bg)) {
1466 if (test_bit(0, active))
1467 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1469 /* In case a device is missing we have a cap of 0, so don't use it. */
1470 bg->zone_capacity = min_not_zero(zone_info[0].capacity,
1471 zone_info[1].capacity);
1474 if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1475 bg->alloc_offset = zone_info[0].alloc_offset;
1477 bg->alloc_offset = zone_info[i - 1].alloc_offset;
1482 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1483 struct btrfs_chunk_map *map,
1484 struct zone_info *zone_info,
1485 unsigned long *active)
1487 struct btrfs_fs_info *fs_info = bg->fs_info;
1489 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1490 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1491 btrfs_bg_type_to_raid_name(map->type));
1495 for (int i = 0; i < map->num_stripes; i++) {
1496 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1497 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1500 if (test_bit(0, active) != test_bit(i, active)) {
1501 if (!btrfs_zone_activate(bg))
1504 if (test_bit(0, active))
1505 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1507 bg->zone_capacity += zone_info[i].capacity;
1508 bg->alloc_offset += zone_info[i].alloc_offset;
1514 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1515 struct btrfs_chunk_map *map,
1516 struct zone_info *zone_info,
1517 unsigned long *active)
1519 struct btrfs_fs_info *fs_info = bg->fs_info;
1521 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1522 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1523 btrfs_bg_type_to_raid_name(map->type));
1527 for (int i = 0; i < map->num_stripes; i++) {
1528 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1529 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1532 if (test_bit(0, active) != test_bit(i, active)) {
1533 if (!btrfs_zone_activate(bg))
1536 if (test_bit(0, active))
1537 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1540 if ((i % map->sub_stripes) == 0) {
1541 bg->zone_capacity += zone_info[i].capacity;
1542 bg->alloc_offset += zone_info[i].alloc_offset;
1549 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1551 struct btrfs_fs_info *fs_info = cache->fs_info;
1552 struct btrfs_chunk_map *map;
1553 u64 logical = cache->start;
1554 u64 length = cache->length;
1555 struct zone_info *zone_info = NULL;
1558 unsigned long *active = NULL;
1560 u32 num_sequential = 0, num_conventional = 0;
1562 if (!btrfs_is_zoned(fs_info))
1566 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1568 "zoned: block group %llu len %llu unaligned to zone size %llu",
1569 logical, length, fs_info->zone_size);
1573 map = btrfs_find_chunk_map(fs_info, logical, length);
1577 cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
1578 if (!cache->physical_map) {
1583 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1589 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1595 for (i = 0; i < map->num_stripes; i++) {
1596 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1600 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1606 if (num_sequential > 0)
1607 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1609 if (num_conventional > 0) {
1610 /* Zone capacity is always zone size in emulation */
1611 cache->zone_capacity = cache->length;
1612 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1615 "zoned: failed to determine allocation offset of bg %llu",
1618 } else if (map->num_stripes == num_conventional) {
1619 cache->alloc_offset = last_alloc;
1620 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1625 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1626 case 0: /* single */
1627 ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
1629 case BTRFS_BLOCK_GROUP_DUP:
1630 ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
1632 case BTRFS_BLOCK_GROUP_RAID1:
1633 case BTRFS_BLOCK_GROUP_RAID1C3:
1634 case BTRFS_BLOCK_GROUP_RAID1C4:
1635 ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
1637 case BTRFS_BLOCK_GROUP_RAID0:
1638 ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
1640 case BTRFS_BLOCK_GROUP_RAID10:
1641 ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
1643 case BTRFS_BLOCK_GROUP_RAID5:
1644 case BTRFS_BLOCK_GROUP_RAID6:
1646 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1647 btrfs_bg_type_to_raid_name(map->type));
1653 /* Reject non SINGLE data profiles without RST */
1654 if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
1655 (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1656 !fs_info->stripe_root) {
1657 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1658 btrfs_bg_type_to_raid_name(map->type));
1662 if (cache->alloc_offset > cache->zone_capacity) {
1664 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1665 cache->alloc_offset, cache->zone_capacity,
1670 /* An extent is allocated after the write pointer */
1671 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1673 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1674 logical, last_alloc, cache->alloc_offset);
1679 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1680 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1681 btrfs_get_block_group(cache);
1682 spin_lock(&fs_info->zone_active_bgs_lock);
1683 list_add_tail(&cache->active_bg_list,
1684 &fs_info->zone_active_bgs);
1685 spin_unlock(&fs_info->zone_active_bgs_lock);
1688 btrfs_free_chunk_map(cache->physical_map);
1689 cache->physical_map = NULL;
1691 bitmap_free(active);
1693 btrfs_free_chunk_map(map);
1698 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1702 if (!btrfs_is_zoned(cache->fs_info))
1705 WARN_ON(cache->bytes_super != 0);
1706 unusable = (cache->alloc_offset - cache->used) +
1707 (cache->length - cache->zone_capacity);
1708 free = cache->zone_capacity - cache->alloc_offset;
1710 /* We only need ->free_space in ALLOC_SEQ block groups */
1711 cache->cached = BTRFS_CACHE_FINISHED;
1712 cache->free_space_ctl->free_space = free;
1713 cache->zone_unusable = unusable;
1716 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
1718 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1719 struct btrfs_inode *inode = bbio->inode;
1720 struct btrfs_fs_info *fs_info = bbio->fs_info;
1721 struct btrfs_block_group *cache;
1724 if (!btrfs_is_zoned(fs_info))
1727 if (!inode || !is_data_inode(&inode->vfs_inode))
1730 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1734 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1735 * extent layout the relocation code has.
1736 * Furthermore we have set aside own block-group from which only the
1737 * relocation "process" can allocate and make sure only one process at a
1738 * time can add pages to an extent that gets relocated, so it's safe to
1739 * use regular REQ_OP_WRITE for this special case.
1741 if (btrfs_is_data_reloc_root(inode->root))
1744 cache = btrfs_lookup_block_group(fs_info, start);
1749 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1750 btrfs_put_block_group(cache);
1755 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
1757 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
1758 struct btrfs_ordered_sum *sum = bbio->sums;
1760 if (physical < bbio->orig_physical)
1761 sum->logical -= bbio->orig_physical - physical;
1763 sum->logical += physical - bbio->orig_physical;
1766 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1769 struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
1770 struct extent_map *em;
1772 ordered->disk_bytenr = logical;
1774 write_lock(&em_tree->lock);
1775 em = search_extent_mapping(em_tree, ordered->file_offset,
1776 ordered->num_bytes);
1777 em->block_start = logical;
1778 free_extent_map(em);
1779 write_unlock(&em_tree->lock);
1782 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1783 u64 logical, u64 len)
1785 struct btrfs_ordered_extent *new;
1787 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1788 split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
1789 ordered->num_bytes, len, logical))
1792 new = btrfs_split_ordered_extent(ordered, len);
1795 new->disk_bytenr = logical;
1796 btrfs_finish_one_ordered(new);
1800 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1802 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1803 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1804 struct btrfs_ordered_sum *sum;
1808 * Write to pre-allocated region is for the data relocation, and so
1809 * it should use WRITE operation. No split/rewrite are necessary.
1811 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
1814 ASSERT(!list_empty(&ordered->list));
1815 /* The ordered->list can be empty in the above pre-alloc case. */
1816 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
1817 logical = sum->logical;
1820 while (len < ordered->disk_num_bytes) {
1821 sum = list_next_entry(sum, list);
1822 if (sum->logical == logical + len) {
1826 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1827 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1828 btrfs_err(fs_info, "failed to split ordered extent");
1831 logical = sum->logical;
1835 if (ordered->disk_bytenr != logical)
1836 btrfs_rewrite_logical_zoned(ordered, logical);
1840 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1841 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1842 * addresses and don't contain actual checksums. We thus must free them
1843 * here so that we don't attempt to log the csums later.
1845 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
1846 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
1847 while ((sum = list_first_entry_or_null(&ordered->list,
1848 typeof(*sum), list))) {
1849 list_del(&sum->list);
1855 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
1856 struct btrfs_block_group **active_bg)
1858 const struct writeback_control *wbc = ctx->wbc;
1859 struct btrfs_block_group *block_group = ctx->zoned_bg;
1860 struct btrfs_fs_info *fs_info = block_group->fs_info;
1862 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1865 if (fs_info->treelog_bg == block_group->start) {
1866 if (!btrfs_zone_activate(block_group)) {
1867 int ret_fin = btrfs_zone_finish_one_bg(fs_info);
1869 if (ret_fin != 1 || !btrfs_zone_activate(block_group))
1872 } else if (*active_bg != block_group) {
1873 struct btrfs_block_group *tgt = *active_bg;
1875 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
1876 lockdep_assert_held(&fs_info->zoned_meta_io_lock);
1880 * If there is an unsent IO left in the allocated area,
1881 * we cannot wait for them as it may cause a deadlock.
1883 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1884 if (wbc->sync_mode == WB_SYNC_NONE ||
1885 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
1889 /* Pivot active metadata/system block group. */
1890 btrfs_zoned_meta_io_unlock(fs_info);
1891 wait_eb_writebacks(tgt);
1892 do_zone_finish(tgt, true);
1893 btrfs_zoned_meta_io_lock(fs_info);
1894 if (*active_bg == tgt) {
1895 btrfs_put_block_group(tgt);
1899 if (!btrfs_zone_activate(block_group))
1901 if (*active_bg != block_group) {
1902 ASSERT(*active_bg == NULL);
1903 *active_bg = block_group;
1904 btrfs_get_block_group(block_group);
1912 * Check if @ctx->eb is aligned to the write pointer.
1915 * 0: @ctx->eb is at the write pointer. You can write it.
1916 * -EAGAIN: There is a hole. The caller should handle the case.
1917 * -EBUSY: There is a hole, but the caller can just bail out.
1919 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1920 struct btrfs_eb_write_context *ctx)
1922 const struct writeback_control *wbc = ctx->wbc;
1923 const struct extent_buffer *eb = ctx->eb;
1924 struct btrfs_block_group *block_group = ctx->zoned_bg;
1926 if (!btrfs_is_zoned(fs_info))
1930 if (block_group->start > eb->start ||
1931 block_group->start + block_group->length <= eb->start) {
1932 btrfs_put_block_group(block_group);
1934 ctx->zoned_bg = NULL;
1939 block_group = btrfs_lookup_block_group(fs_info, eb->start);
1942 ctx->zoned_bg = block_group;
1945 if (block_group->meta_write_pointer == eb->start) {
1946 struct btrfs_block_group **tgt;
1948 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
1951 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
1952 tgt = &fs_info->active_system_bg;
1954 tgt = &fs_info->active_meta_bg;
1955 if (check_bg_is_active(ctx, tgt))
1960 * Since we may release fs_info->zoned_meta_io_lock, someone can already
1961 * start writing this eb. In that case, we can just bail out.
1963 if (block_group->meta_write_pointer > eb->start)
1966 /* If for_sync, this hole will be filled with trasnsaction commit. */
1967 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
1972 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1974 if (!btrfs_dev_is_sequential(device, physical))
1977 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1978 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1981 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1982 struct blk_zone *zone)
1984 struct btrfs_io_context *bioc = NULL;
1985 u64 mapped_length = PAGE_SIZE;
1986 unsigned int nofs_flag;
1990 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1991 &mapped_length, &bioc, NULL, NULL);
1992 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1997 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2002 nofs_flag = memalloc_nofs_save();
2003 nmirrors = (int)bioc->num_stripes;
2004 for (i = 0; i < nmirrors; i++) {
2005 u64 physical = bioc->stripes[i].physical;
2006 struct btrfs_device *dev = bioc->stripes[i].dev;
2008 /* Missing device */
2012 ret = btrfs_get_dev_zone(dev, physical, zone);
2013 /* Failing device */
2014 if (ret == -EIO || ret == -EOPNOTSUPP)
2018 memalloc_nofs_restore(nofs_flag);
2020 btrfs_put_bioc(bioc);
2025 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2026 * filling zeros between @physical_pos to a write pointer of dev-replace
2029 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2030 u64 physical_start, u64 physical_pos)
2032 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2033 struct blk_zone zone;
2038 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2041 ret = read_zone_info(fs_info, logical, &zone);
2045 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2047 if (physical_pos == wp)
2050 if (physical_pos > wp)
2053 length = wp - physical_pos;
2054 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2058 * Activate block group and underlying device zones
2060 * @block_group: the block group to activate
2062 * Return: true on success, false otherwise
2064 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2066 struct btrfs_fs_info *fs_info = block_group->fs_info;
2067 struct btrfs_chunk_map *map;
2068 struct btrfs_device *device;
2070 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2074 if (!btrfs_is_zoned(block_group->fs_info))
2077 map = block_group->physical_map;
2079 spin_lock(&fs_info->zone_active_bgs_lock);
2080 spin_lock(&block_group->lock);
2081 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2087 if (btrfs_zoned_bg_is_full(block_group)) {
2092 for (i = 0; i < map->num_stripes; i++) {
2093 struct btrfs_zoned_device_info *zinfo;
2096 device = map->stripes[i].dev;
2097 physical = map->stripes[i].physical;
2098 zinfo = device->zone_info;
2100 if (zinfo->max_active_zones == 0)
2104 reserved = zinfo->reserved_active_zones;
2106 * For the data block group, leave active zones for one
2107 * metadata block group and one system block group.
2109 if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2114 if (!btrfs_dev_set_active_zone(device, physical)) {
2115 /* Cannot activate the zone */
2120 zinfo->reserved_active_zones--;
2123 /* Successfully activated all the zones */
2124 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2125 spin_unlock(&block_group->lock);
2127 /* For the active block group list */
2128 btrfs_get_block_group(block_group);
2129 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2130 spin_unlock(&fs_info->zone_active_bgs_lock);
2135 spin_unlock(&block_group->lock);
2136 spin_unlock(&fs_info->zone_active_bgs_lock);
2140 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2142 struct btrfs_fs_info *fs_info = block_group->fs_info;
2143 const u64 end = block_group->start + block_group->length;
2144 struct radix_tree_iter iter;
2145 struct extent_buffer *eb;
2149 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
2150 block_group->start >> fs_info->sectorsize_bits) {
2151 eb = radix_tree_deref_slot(slot);
2154 if (radix_tree_deref_retry(eb)) {
2155 slot = radix_tree_iter_retry(&iter);
2159 if (eb->start < block_group->start)
2161 if (eb->start >= end)
2164 slot = radix_tree_iter_resume(slot, &iter);
2166 wait_on_extent_buffer_writeback(eb);
2172 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2174 struct btrfs_fs_info *fs_info = block_group->fs_info;
2175 struct btrfs_chunk_map *map;
2176 const bool is_metadata = (block_group->flags &
2177 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2181 spin_lock(&block_group->lock);
2182 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2183 spin_unlock(&block_group->lock);
2187 /* Check if we have unwritten allocated space */
2189 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2190 spin_unlock(&block_group->lock);
2195 * If we are sure that the block group is full (= no more room left for
2196 * new allocation) and the IO for the last usable block is completed, we
2197 * don't need to wait for the other IOs. This holds because we ensure
2198 * the sequential IO submissions using the ZONE_APPEND command for data
2199 * and block_group->meta_write_pointer for metadata.
2201 if (!fully_written) {
2202 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2203 spin_unlock(&block_group->lock);
2206 spin_unlock(&block_group->lock);
2208 ret = btrfs_inc_block_group_ro(block_group, false);
2212 /* Ensure all writes in this block group finish */
2213 btrfs_wait_block_group_reservations(block_group);
2214 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2215 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2216 block_group->length);
2217 /* Wait for extent buffers to be written. */
2219 wait_eb_writebacks(block_group);
2221 spin_lock(&block_group->lock);
2224 * Bail out if someone already deactivated the block group, or
2225 * allocated space is left in the block group.
2227 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2228 &block_group->runtime_flags)) {
2229 spin_unlock(&block_group->lock);
2230 btrfs_dec_block_group_ro(block_group);
2234 if (block_group->reserved ||
2235 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2236 &block_group->runtime_flags)) {
2237 spin_unlock(&block_group->lock);
2238 btrfs_dec_block_group_ro(block_group);
2243 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2244 block_group->alloc_offset = block_group->zone_capacity;
2245 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2246 block_group->meta_write_pointer = block_group->start +
2247 block_group->zone_capacity;
2248 block_group->free_space_ctl->free_space = 0;
2249 btrfs_clear_treelog_bg(block_group);
2250 btrfs_clear_data_reloc_bg(block_group);
2251 spin_unlock(&block_group->lock);
2253 map = block_group->physical_map;
2254 for (i = 0; i < map->num_stripes; i++) {
2255 struct btrfs_device *device = map->stripes[i].dev;
2256 const u64 physical = map->stripes[i].physical;
2257 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2258 unsigned int nofs_flags;
2260 if (zinfo->max_active_zones == 0)
2263 nofs_flags = memalloc_nofs_save();
2264 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2265 physical >> SECTOR_SHIFT,
2266 zinfo->zone_size >> SECTOR_SHIFT);
2267 memalloc_nofs_restore(nofs_flags);
2272 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2273 zinfo->reserved_active_zones++;
2274 btrfs_dev_clear_active_zone(device, physical);
2278 btrfs_dec_block_group_ro(block_group);
2280 spin_lock(&fs_info->zone_active_bgs_lock);
2281 ASSERT(!list_empty(&block_group->active_bg_list));
2282 list_del_init(&block_group->active_bg_list);
2283 spin_unlock(&fs_info->zone_active_bgs_lock);
2285 /* For active_bg_list */
2286 btrfs_put_block_group(block_group);
2288 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2293 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2295 if (!btrfs_is_zoned(block_group->fs_info))
2298 return do_zone_finish(block_group, false);
2301 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2303 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2304 struct btrfs_device *device;
2307 if (!btrfs_is_zoned(fs_info))
2310 /* Check if there is a device with active zones left */
2311 mutex_lock(&fs_info->chunk_mutex);
2312 spin_lock(&fs_info->zone_active_bgs_lock);
2313 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2314 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2320 if (!zinfo->max_active_zones) {
2325 if (flags & BTRFS_BLOCK_GROUP_DATA)
2326 reserved = zinfo->reserved_active_zones;
2328 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2329 case 0: /* single */
2330 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2332 case BTRFS_BLOCK_GROUP_DUP:
2333 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2339 spin_unlock(&fs_info->zone_active_bgs_lock);
2340 mutex_unlock(&fs_info->chunk_mutex);
2343 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2348 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2350 struct btrfs_block_group *block_group;
2351 u64 min_alloc_bytes;
2353 if (!btrfs_is_zoned(fs_info))
2356 block_group = btrfs_lookup_block_group(fs_info, logical);
2357 ASSERT(block_group);
2359 /* No MIXED_BG on zoned btrfs. */
2360 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2361 min_alloc_bytes = fs_info->sectorsize;
2363 min_alloc_bytes = fs_info->nodesize;
2365 /* Bail out if we can allocate more data from this block group. */
2366 if (logical + length + min_alloc_bytes <=
2367 block_group->start + block_group->zone_capacity)
2370 do_zone_finish(block_group, true);
2373 btrfs_put_block_group(block_group);
2376 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2378 struct btrfs_block_group *bg =
2379 container_of(work, struct btrfs_block_group, zone_finish_work);
2381 wait_on_extent_buffer_writeback(bg->last_eb);
2382 free_extent_buffer(bg->last_eb);
2383 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2384 btrfs_put_block_group(bg);
2387 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2388 struct extent_buffer *eb)
2390 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2391 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2394 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2395 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2401 btrfs_get_block_group(bg);
2402 atomic_inc(&eb->refs);
2404 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2405 queue_work(system_unbound_wq, &bg->zone_finish_work);
2408 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2410 struct btrfs_fs_info *fs_info = bg->fs_info;
2412 spin_lock(&fs_info->relocation_bg_lock);
2413 if (fs_info->data_reloc_bg == bg->start)
2414 fs_info->data_reloc_bg = 0;
2415 spin_unlock(&fs_info->relocation_bg_lock);
2418 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2420 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2421 struct btrfs_device *device;
2423 if (!btrfs_is_zoned(fs_info))
2426 mutex_lock(&fs_devices->device_list_mutex);
2427 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2428 if (device->zone_info) {
2429 vfree(device->zone_info->zone_cache);
2430 device->zone_info->zone_cache = NULL;
2433 mutex_unlock(&fs_devices->device_list_mutex);
2436 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2438 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2439 struct btrfs_device *device;
2444 ASSERT(btrfs_is_zoned(fs_info));
2446 if (fs_info->bg_reclaim_threshold == 0)
2449 mutex_lock(&fs_devices->device_list_mutex);
2450 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2454 total += device->disk_total_bytes;
2455 used += device->bytes_used;
2457 mutex_unlock(&fs_devices->device_list_mutex);
2459 factor = div64_u64(used * 100, total);
2460 return factor >= fs_info->bg_reclaim_threshold;
2463 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2466 struct btrfs_block_group *block_group;
2468 if (!btrfs_is_zoned(fs_info))
2471 block_group = btrfs_lookup_block_group(fs_info, logical);
2472 /* It should be called on a previous data relocation block group. */
2473 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2475 spin_lock(&block_group->lock);
2476 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2479 /* All relocation extents are written. */
2480 if (block_group->start + block_group->alloc_offset == logical + length) {
2482 * Now, release this block group for further allocations and
2485 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2486 &block_group->runtime_flags);
2490 spin_unlock(&block_group->lock);
2491 btrfs_put_block_group(block_group);
2494 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2496 struct btrfs_block_group *block_group;
2497 struct btrfs_block_group *min_bg = NULL;
2498 u64 min_avail = U64_MAX;
2501 spin_lock(&fs_info->zone_active_bgs_lock);
2502 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2506 spin_lock(&block_group->lock);
2507 if (block_group->reserved || block_group->alloc_offset == 0 ||
2508 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
2509 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2510 spin_unlock(&block_group->lock);
2514 avail = block_group->zone_capacity - block_group->alloc_offset;
2515 if (min_avail > avail) {
2517 btrfs_put_block_group(min_bg);
2518 min_bg = block_group;
2520 btrfs_get_block_group(min_bg);
2522 spin_unlock(&block_group->lock);
2524 spin_unlock(&fs_info->zone_active_bgs_lock);
2529 ret = btrfs_zone_finish(min_bg);
2530 btrfs_put_block_group(min_bg);
2532 return ret < 0 ? ret : 1;
2535 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2536 struct btrfs_space_info *space_info,
2539 struct btrfs_block_group *bg;
2542 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2547 bool need_finish = false;
2549 down_read(&space_info->groups_sem);
2550 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2551 list_for_each_entry(bg, &space_info->block_groups[index],
2553 if (!spin_trylock(&bg->lock))
2555 if (btrfs_zoned_bg_is_full(bg) ||
2556 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2557 &bg->runtime_flags)) {
2558 spin_unlock(&bg->lock);
2561 spin_unlock(&bg->lock);
2563 if (btrfs_zone_activate(bg)) {
2564 up_read(&space_info->groups_sem);
2571 up_read(&space_info->groups_sem);
2573 if (!do_finish || !need_finish)
2576 ret = btrfs_zone_finish_one_bg(fs_info);
2587 * Reserve zones for one metadata block group, one tree-log block group, and one
2588 * system block group.
2590 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
2592 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2593 struct btrfs_block_group *block_group;
2594 struct btrfs_device *device;
2595 /* Reserve zones for normal SINGLE metadata and tree-log block group. */
2596 unsigned int metadata_reserve = 2;
2597 /* Reserve a zone for SINGLE system block group. */
2598 unsigned int system_reserve = 1;
2600 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2604 * This function is called from the mount context. So, there is no
2605 * parallel process touching the bits. No need for read_seqretry().
2607 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2608 metadata_reserve = 4;
2609 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2612 /* Apply the reservation on all the devices. */
2613 mutex_lock(&fs_devices->device_list_mutex);
2614 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2618 device->zone_info->reserved_active_zones =
2619 metadata_reserve + system_reserve;
2621 mutex_unlock(&fs_devices->device_list_mutex);
2623 /* Release reservation for currently active block groups. */
2624 spin_lock(&fs_info->zone_active_bgs_lock);
2625 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
2626 struct btrfs_chunk_map *map = block_group->physical_map;
2628 if (!(block_group->flags &
2629 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2632 for (int i = 0; i < map->num_stripes; i++)
2633 map->stripes[i].dev->zone_info->reserved_active_zones--;
2635 spin_unlock(&fs_info->zone_active_bgs_lock);