1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
12 #include "rcu-string.h"
14 #include "block-group.h"
15 #include "transaction.h"
16 #include "dev-replace.h"
17 #include "space-info.h"
19 /* Maximum number of zones to report per blkdev_report_zones() call */
20 #define BTRFS_REPORT_NR_ZONES 4096
21 /* Invalid allocation pointer value for missing devices */
22 #define WP_MISSING_DEV ((u64)-1)
23 /* Pseudo write pointer value for conventional zone */
24 #define WP_CONVENTIONAL ((u64)-2)
27 * Location of the first zone of superblock logging zone pairs.
29 * - primary superblock: 0B (zone 0)
30 * - first copy: 512G (zone starting at that offset)
31 * - second copy: 4T (zone starting at that offset)
33 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
34 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
35 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
37 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
38 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
40 /* Number of superblock log zones */
41 #define BTRFS_NR_SB_LOG_ZONES 2
44 * Minimum of active zones we need:
46 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
47 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
48 * - 1 zone for tree-log dedicated block group
49 * - 1 zone for relocation
51 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
54 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
55 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
56 * We do not expect the zone size to become larger than 8GiB or smaller than
57 * 4MiB in the near future.
59 #define BTRFS_MAX_ZONE_SIZE SZ_8G
60 #define BTRFS_MIN_ZONE_SIZE SZ_4M
62 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
64 static inline bool sb_zone_is_full(const struct blk_zone *zone)
66 return (zone->cond == BLK_ZONE_COND_FULL) ||
67 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
70 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
72 struct blk_zone *zones = data;
74 memcpy(&zones[idx], zone, sizeof(*zone));
79 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
82 bool empty[BTRFS_NR_SB_LOG_ZONES];
83 bool full[BTRFS_NR_SB_LOG_ZONES];
87 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
88 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
89 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
90 full[i] = sb_zone_is_full(&zones[i]);
94 * Possible states of log buffer zones
96 * Empty[0] In use[0] Full[0]
102 * *: Special case, no superblock is written
103 * 0: Use write pointer of zones[0]
104 * 1: Use write pointer of zones[1]
105 * C: Compare super blocks from zones[0] and zones[1], use the latest
106 * one determined by generation
110 if (empty[0] && empty[1]) {
111 /* Special case to distinguish no superblock to read */
112 *wp_ret = zones[0].start << SECTOR_SHIFT;
114 } else if (full[0] && full[1]) {
115 /* Compare two super blocks */
116 struct address_space *mapping = bdev->bd_inode->i_mapping;
117 struct page *page[BTRFS_NR_SB_LOG_ZONES];
118 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
121 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
124 bytenr = ((zones[i].start + zones[i].len)
125 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
127 page[i] = read_cache_page_gfp(mapping,
128 bytenr >> PAGE_SHIFT, GFP_NOFS);
129 if (IS_ERR(page[i])) {
131 btrfs_release_disk_super(super[0]);
132 return PTR_ERR(page[i]);
134 super[i] = page_address(page[i]);
137 if (super[0]->generation > super[1]->generation)
138 sector = zones[1].start;
140 sector = zones[0].start;
142 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
143 btrfs_release_disk_super(super[i]);
144 } else if (!full[0] && (empty[1] || full[1])) {
145 sector = zones[0].wp;
146 } else if (full[0]) {
147 sector = zones[1].wp;
151 *wp_ret = sector << SECTOR_SHIFT;
156 * Get the first zone number of the superblock mirror
158 static inline u32 sb_zone_number(int shift, int mirror)
162 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
164 case 0: zone = 0; break;
165 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
166 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
169 ASSERT(zone <= U32_MAX);
174 static inline sector_t zone_start_sector(u32 zone_number,
175 struct block_device *bdev)
177 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
180 static inline u64 zone_start_physical(u32 zone_number,
181 struct btrfs_zoned_device_info *zone_info)
183 return (u64)zone_number << zone_info->zone_size_shift;
187 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
188 * device into static sized chunks and fake a conventional zone on each of
191 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
192 struct blk_zone *zones, unsigned int nr_zones)
194 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
195 sector_t bdev_size = bdev_nr_sectors(device->bdev);
198 pos >>= SECTOR_SHIFT;
199 for (i = 0; i < nr_zones; i++) {
200 zones[i].start = i * zone_sectors + pos;
201 zones[i].len = zone_sectors;
202 zones[i].capacity = zone_sectors;
203 zones[i].wp = zones[i].start + zone_sectors;
204 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
205 zones[i].cond = BLK_ZONE_COND_NOT_WP;
207 if (zones[i].wp >= bdev_size) {
216 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
217 struct blk_zone *zones, unsigned int *nr_zones)
219 struct btrfs_zoned_device_info *zinfo = device->zone_info;
226 if (!bdev_is_zoned(device->bdev)) {
227 ret = emulate_report_zones(device, pos, zones, *nr_zones);
233 if (zinfo->zone_cache) {
236 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
237 zno = pos >> zinfo->zone_size_shift;
239 * We cannot report zones beyond the zone end. So, it is OK to
240 * cap *nr_zones to at the end.
242 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
244 for (i = 0; i < *nr_zones; i++) {
245 struct blk_zone *zone_info;
247 zone_info = &zinfo->zone_cache[zno + i];
252 if (i == *nr_zones) {
253 /* Cache hit on all the zones */
254 memcpy(zones, zinfo->zone_cache + zno,
255 sizeof(*zinfo->zone_cache) * *nr_zones);
260 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
261 copy_zone_info_cb, zones);
263 btrfs_err_in_rcu(device->fs_info,
264 "zoned: failed to read zone %llu on %s (devid %llu)",
265 pos, rcu_str_deref(device->name),
274 if (zinfo->zone_cache)
275 memcpy(zinfo->zone_cache + zno, zones,
276 sizeof(*zinfo->zone_cache) * *nr_zones);
281 /* The emulated zone size is determined from the size of device extent */
282 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
284 struct btrfs_path *path;
285 struct btrfs_root *root = fs_info->dev_root;
286 struct btrfs_key key;
287 struct extent_buffer *leaf;
288 struct btrfs_dev_extent *dext;
292 key.type = BTRFS_DEV_EXTENT_KEY;
295 path = btrfs_alloc_path();
299 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
303 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
304 ret = btrfs_next_leaf(root, path);
307 /* No dev extents at all? Not good */
314 leaf = path->nodes[0];
315 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
316 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
320 btrfs_free_path(path);
325 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
327 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
328 struct btrfs_device *device;
331 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
332 if (!btrfs_fs_incompat(fs_info, ZONED))
335 mutex_lock(&fs_devices->device_list_mutex);
336 list_for_each_entry(device, &fs_devices->devices, dev_list) {
337 /* We can skip reading of zone info for missing devices */
341 ret = btrfs_get_dev_zone_info(device, true);
345 mutex_unlock(&fs_devices->device_list_mutex);
350 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
352 struct btrfs_fs_info *fs_info = device->fs_info;
353 struct btrfs_zoned_device_info *zone_info = NULL;
354 struct block_device *bdev = device->bdev;
355 unsigned int max_active_zones;
356 unsigned int nactive;
359 struct blk_zone *zones = NULL;
360 unsigned int i, nreported = 0, nr_zones;
361 sector_t zone_sectors;
362 char *model, *emulated;
366 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
369 if (!btrfs_fs_incompat(fs_info, ZONED))
372 if (device->zone_info)
375 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
379 device->zone_info = zone_info;
381 if (!bdev_is_zoned(bdev)) {
382 if (!fs_info->zone_size) {
383 ret = calculate_emulated_zone_size(fs_info);
388 ASSERT(fs_info->zone_size);
389 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
391 zone_sectors = bdev_zone_sectors(bdev);
394 /* Check if it's power of 2 (see is_power_of_2) */
395 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
396 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
398 /* We reject devices with a zone size larger than 8GB */
399 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
400 btrfs_err_in_rcu(fs_info,
401 "zoned: %s: zone size %llu larger than supported maximum %llu",
402 rcu_str_deref(device->name),
403 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
406 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
407 btrfs_err_in_rcu(fs_info,
408 "zoned: %s: zone size %llu smaller than supported minimum %u",
409 rcu_str_deref(device->name),
410 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
415 nr_sectors = bdev_nr_sectors(bdev);
416 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
417 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
419 * We limit max_zone_append_size also by max_segments *
420 * PAGE_SIZE. Technically, we can have multiple pages per segment. But,
421 * since btrfs adds the pages one by one to a bio, and btrfs cannot
422 * increase the metadata reservation even if it increases the number of
423 * extents, it is safe to stick with the limit.
425 zone_info->max_zone_append_size =
426 min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
427 (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
428 if (!IS_ALIGNED(nr_sectors, zone_sectors))
429 zone_info->nr_zones++;
431 max_active_zones = bdev_max_active_zones(bdev);
432 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
433 btrfs_err_in_rcu(fs_info,
434 "zoned: %s: max active zones %u is too small, need at least %u active zones",
435 rcu_str_deref(device->name), max_active_zones,
436 BTRFS_MIN_ACTIVE_ZONES);
440 zone_info->max_active_zones = max_active_zones;
442 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
443 if (!zone_info->seq_zones) {
448 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
449 if (!zone_info->empty_zones) {
454 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
455 if (!zone_info->active_zones) {
460 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
467 * Enable zone cache only for a zoned device. On a non-zoned device, we
468 * fill the zone info with emulated CONVENTIONAL zones, so no need to
471 if (populate_cache && bdev_is_zoned(device->bdev)) {
472 zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
473 zone_info->nr_zones);
474 if (!zone_info->zone_cache) {
475 btrfs_err_in_rcu(device->fs_info,
476 "zoned: failed to allocate zone cache for %s",
477 rcu_str_deref(device->name));
485 while (sector < nr_sectors) {
486 nr_zones = BTRFS_REPORT_NR_ZONES;
487 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
492 for (i = 0; i < nr_zones; i++) {
493 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
494 __set_bit(nreported, zone_info->seq_zones);
495 switch (zones[i].cond) {
496 case BLK_ZONE_COND_EMPTY:
497 __set_bit(nreported, zone_info->empty_zones);
499 case BLK_ZONE_COND_IMP_OPEN:
500 case BLK_ZONE_COND_EXP_OPEN:
501 case BLK_ZONE_COND_CLOSED:
502 __set_bit(nreported, zone_info->active_zones);
508 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
511 if (nreported != zone_info->nr_zones) {
512 btrfs_err_in_rcu(device->fs_info,
513 "inconsistent number of zones on %s (%u/%u)",
514 rcu_str_deref(device->name), nreported,
515 zone_info->nr_zones);
520 if (max_active_zones) {
521 if (nactive > max_active_zones) {
522 btrfs_err_in_rcu(device->fs_info,
523 "zoned: %u active zones on %s exceeds max_active_zones %u",
524 nactive, rcu_str_deref(device->name),
529 atomic_set(&zone_info->active_zones_left,
530 max_active_zones - nactive);
533 /* Validate superblock log */
534 nr_zones = BTRFS_NR_SB_LOG_ZONES;
535 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
538 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
540 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
541 if (sb_zone + 1 >= zone_info->nr_zones)
544 ret = btrfs_get_dev_zones(device,
545 zone_start_physical(sb_zone, zone_info),
546 &zone_info->sb_zones[sb_pos],
551 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
552 btrfs_err_in_rcu(device->fs_info,
553 "zoned: failed to read super block log zone info at devid %llu zone %u",
554 device->devid, sb_zone);
560 * If zones[0] is conventional, always use the beginning of the
561 * zone to record superblock. No need to validate in that case.
563 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
564 BLK_ZONE_TYPE_CONVENTIONAL)
567 ret = sb_write_pointer(device->bdev,
568 &zone_info->sb_zones[sb_pos], &sb_wp);
569 if (ret != -ENOENT && ret) {
570 btrfs_err_in_rcu(device->fs_info,
571 "zoned: super block log zone corrupted devid %llu zone %u",
572 device->devid, sb_zone);
581 switch (bdev_zoned_model(bdev)) {
583 model = "host-managed zoned";
587 model = "host-aware zoned";
592 emulated = "emulated ";
596 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
597 bdev_zoned_model(bdev),
598 rcu_str_deref(device->name));
600 goto out_free_zone_info;
603 btrfs_info_in_rcu(fs_info,
604 "%s block device %s, %u %szones of %llu bytes",
605 model, rcu_str_deref(device->name), zone_info->nr_zones,
606 emulated, zone_info->zone_size);
613 btrfs_destroy_dev_zone_info(device);
618 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
620 struct btrfs_zoned_device_info *zone_info = device->zone_info;
625 bitmap_free(zone_info->active_zones);
626 bitmap_free(zone_info->seq_zones);
627 bitmap_free(zone_info->empty_zones);
628 vfree(zone_info->zone_cache);
630 device->zone_info = NULL;
633 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
634 struct blk_zone *zone)
636 unsigned int nr_zones = 1;
639 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
640 if (ret != 0 || !nr_zones)
641 return ret ? ret : -EIO;
646 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
648 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
649 struct btrfs_device *device;
650 u64 zoned_devices = 0;
653 u64 max_zone_append_size = 0;
654 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
657 /* Count zoned devices */
658 list_for_each_entry(device, &fs_devices->devices, dev_list) {
659 enum blk_zoned_model model;
664 model = bdev_zoned_model(device->bdev);
666 * A Host-Managed zoned device must be used as a zoned device.
667 * A Host-Aware zoned device and a non-zoned devices can be
668 * treated as a zoned device, if ZONED flag is enabled in the
671 if (model == BLK_ZONED_HM ||
672 (model == BLK_ZONED_HA && incompat_zoned) ||
673 (model == BLK_ZONED_NONE && incompat_zoned)) {
674 struct btrfs_zoned_device_info *zone_info;
676 zone_info = device->zone_info;
679 zone_size = zone_info->zone_size;
680 } else if (zone_info->zone_size != zone_size) {
682 "zoned: unequal block device zone sizes: have %llu found %llu",
683 device->zone_info->zone_size,
688 if (!max_zone_append_size ||
689 (zone_info->max_zone_append_size &&
690 zone_info->max_zone_append_size < max_zone_append_size))
691 max_zone_append_size =
692 zone_info->max_zone_append_size;
697 if (!zoned_devices && !incompat_zoned)
700 if (!zoned_devices && incompat_zoned) {
701 /* No zoned block device found on ZONED filesystem */
703 "zoned: no zoned devices found on a zoned filesystem");
708 if (zoned_devices && !incompat_zoned) {
710 "zoned: mode not enabled but zoned device found");
715 if (zoned_devices != nr_devices) {
717 "zoned: cannot mix zoned and regular devices");
723 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
724 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
725 * check the alignment here.
727 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
729 "zoned: zone size %llu not aligned to stripe %u",
730 zone_size, BTRFS_STRIPE_LEN);
735 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
736 btrfs_err(fs_info, "zoned: mixed block groups not supported");
741 fs_info->zone_size = zone_size;
742 fs_info->max_zone_append_size = ALIGN_DOWN(max_zone_append_size,
743 fs_info->sectorsize);
744 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
745 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
746 fs_info->max_extent_size = fs_info->max_zone_append_size;
749 * Check mount options here, because we might change fs_info->zoned
750 * from fs_info->zone_size.
752 ret = btrfs_check_mountopts_zoned(fs_info);
756 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
761 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
763 if (!btrfs_is_zoned(info))
767 * Space cache writing is not COWed. Disable that to avoid write errors
768 * in sequential zones.
770 if (btrfs_test_opt(info, SPACE_CACHE)) {
771 btrfs_err(info, "zoned: space cache v1 is not supported");
775 if (btrfs_test_opt(info, NODATACOW)) {
776 btrfs_err(info, "zoned: NODATACOW not supported");
783 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
784 int rw, u64 *bytenr_ret)
789 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
790 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
794 ret = sb_write_pointer(bdev, zones, &wp);
795 if (ret != -ENOENT && ret < 0)
799 struct blk_zone *reset = NULL;
801 if (wp == zones[0].start << SECTOR_SHIFT)
803 else if (wp == zones[1].start << SECTOR_SHIFT)
806 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
807 ASSERT(sb_zone_is_full(reset));
809 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
810 reset->start, reset->len,
815 reset->cond = BLK_ZONE_COND_EMPTY;
816 reset->wp = reset->start;
818 } else if (ret != -ENOENT) {
820 * For READ, we want the previous one. Move write pointer to
821 * the end of a zone, if it is at the head of a zone.
825 if (wp == zones[0].start << SECTOR_SHIFT)
826 zone_end = zones[1].start + zones[1].capacity;
827 else if (wp == zones[1].start << SECTOR_SHIFT)
828 zone_end = zones[0].start + zones[0].capacity;
830 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
831 BTRFS_SUPER_INFO_SIZE);
833 wp -= BTRFS_SUPER_INFO_SIZE;
841 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
844 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
845 sector_t zone_sectors;
848 u8 zone_sectors_shift;
852 if (!bdev_is_zoned(bdev)) {
853 *bytenr_ret = btrfs_sb_offset(mirror);
857 ASSERT(rw == READ || rw == WRITE);
859 zone_sectors = bdev_zone_sectors(bdev);
860 if (!is_power_of_2(zone_sectors))
862 zone_sectors_shift = ilog2(zone_sectors);
863 nr_sectors = bdev_nr_sectors(bdev);
864 nr_zones = nr_sectors >> zone_sectors_shift;
866 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
867 if (sb_zone + 1 >= nr_zones)
870 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
871 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
875 if (ret != BTRFS_NR_SB_LOG_ZONES)
878 return sb_log_location(bdev, zones, rw, bytenr_ret);
881 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
884 struct btrfs_zoned_device_info *zinfo = device->zone_info;
888 * For a zoned filesystem on a non-zoned block device, use the same
889 * super block locations as regular filesystem. Doing so, the super
890 * block can always be retrieved and the zoned flag of the volume
891 * detected from the super block information.
893 if (!bdev_is_zoned(device->bdev)) {
894 *bytenr_ret = btrfs_sb_offset(mirror);
898 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
899 if (zone_num + 1 >= zinfo->nr_zones)
902 return sb_log_location(device->bdev,
903 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
907 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
915 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
916 if (zone_num + 1 >= zinfo->nr_zones)
919 if (!test_bit(zone_num, zinfo->seq_zones))
925 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
927 struct btrfs_zoned_device_info *zinfo = device->zone_info;
928 struct blk_zone *zone;
931 if (!is_sb_log_zone(zinfo, mirror))
934 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
935 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
936 /* Advance the next zone */
937 if (zone->cond == BLK_ZONE_COND_FULL) {
942 if (zone->cond == BLK_ZONE_COND_EMPTY)
943 zone->cond = BLK_ZONE_COND_IMP_OPEN;
945 zone->wp += SUPER_INFO_SECTORS;
947 if (sb_zone_is_full(zone)) {
949 * No room left to write new superblock. Since
950 * superblock is written with REQ_SYNC, it is safe to
951 * finish the zone now.
953 * If the write pointer is exactly at the capacity,
954 * explicit ZONE_FINISH is not necessary.
956 if (zone->wp != zone->start + zone->capacity) {
959 ret = blkdev_zone_mgmt(device->bdev,
960 REQ_OP_ZONE_FINISH, zone->start,
961 zone->len, GFP_NOFS);
966 zone->wp = zone->start + zone->len;
967 zone->cond = BLK_ZONE_COND_FULL;
972 /* All the zones are FULL. Should not reach here. */
977 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
979 sector_t zone_sectors;
981 u8 zone_sectors_shift;
985 zone_sectors = bdev_zone_sectors(bdev);
986 zone_sectors_shift = ilog2(zone_sectors);
987 nr_sectors = bdev_nr_sectors(bdev);
988 nr_zones = nr_sectors >> zone_sectors_shift;
990 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
991 if (sb_zone + 1 >= nr_zones)
994 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
995 zone_start_sector(sb_zone, bdev),
996 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
1000 * btrfs_find_allocatable_zones - find allocatable zones within a given region
1002 * @device: the device to allocate a region on
1003 * @hole_start: the position of the hole to allocate the region
1004 * @num_bytes: size of wanted region
1005 * @hole_end: the end of the hole
1006 * @return: position of allocatable zones
1008 * Allocatable region should not contain any superblock locations.
1010 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1011 u64 hole_end, u64 num_bytes)
1013 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1014 const u8 shift = zinfo->zone_size_shift;
1015 u64 nzones = num_bytes >> shift;
1016 u64 pos = hole_start;
1021 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1022 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1024 while (pos < hole_end) {
1025 begin = pos >> shift;
1026 end = begin + nzones;
1028 if (end > zinfo->nr_zones)
1031 /* Check if zones in the region are all empty */
1032 if (btrfs_dev_is_sequential(device, pos) &&
1033 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
1034 pos += zinfo->zone_size;
1039 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1043 sb_zone = sb_zone_number(shift, i);
1044 if (!(end <= sb_zone ||
1045 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1047 pos = zone_start_physical(
1048 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1052 /* We also need to exclude regular superblock positions */
1053 sb_pos = btrfs_sb_offset(i);
1054 if (!(pos + num_bytes <= sb_pos ||
1055 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1057 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1069 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1071 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1072 unsigned int zno = (pos >> zone_info->zone_size_shift);
1074 /* We can use any number of zones */
1075 if (zone_info->max_active_zones == 0)
1078 if (!test_bit(zno, zone_info->active_zones)) {
1079 /* Active zone left? */
1080 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1082 if (test_and_set_bit(zno, zone_info->active_zones)) {
1083 /* Someone already set the bit */
1084 atomic_inc(&zone_info->active_zones_left);
1091 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1093 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1094 unsigned int zno = (pos >> zone_info->zone_size_shift);
1096 /* We can use any number of zones */
1097 if (zone_info->max_active_zones == 0)
1100 if (test_and_clear_bit(zno, zone_info->active_zones))
1101 atomic_inc(&zone_info->active_zones_left);
1104 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1105 u64 length, u64 *bytes)
1110 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1111 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1118 btrfs_dev_set_zone_empty(device, physical);
1119 btrfs_dev_clear_active_zone(device, physical);
1120 physical += device->zone_info->zone_size;
1121 length -= device->zone_info->zone_size;
1127 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1129 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1130 const u8 shift = zinfo->zone_size_shift;
1131 unsigned long begin = start >> shift;
1132 unsigned long end = (start + size) >> shift;
1136 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1137 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1139 if (end > zinfo->nr_zones)
1142 /* All the zones are conventional */
1143 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
1146 /* All the zones are sequential and empty */
1147 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
1148 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
1151 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1154 if (!btrfs_dev_is_sequential(device, pos) ||
1155 btrfs_dev_is_empty_zone(device, pos))
1158 /* Free regions should be empty */
1161 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1162 rcu_str_deref(device->name), device->devid, pos >> shift);
1165 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1175 * Calculate an allocation pointer from the extent allocation information
1176 * for a block group consist of conventional zones. It is pointed to the
1177 * end of the highest addressed extent in the block group as an allocation
1180 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1183 struct btrfs_fs_info *fs_info = cache->fs_info;
1184 struct btrfs_root *root;
1185 struct btrfs_path *path;
1186 struct btrfs_key key;
1187 struct btrfs_key found_key;
1191 path = btrfs_alloc_path();
1195 key.objectid = cache->start + cache->length;
1199 root = btrfs_extent_root(fs_info, key.objectid);
1200 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1201 /* We should not find the exact match */
1207 ret = btrfs_previous_extent_item(root, path, cache->start);
1216 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1218 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1219 length = found_key.offset;
1221 length = fs_info->nodesize;
1223 if (!(found_key.objectid >= cache->start &&
1224 found_key.objectid + length <= cache->start + cache->length)) {
1228 *offset_ret = found_key.objectid + length - cache->start;
1232 btrfs_free_path(path);
1236 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1238 struct btrfs_fs_info *fs_info = cache->fs_info;
1239 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1240 struct extent_map *em;
1241 struct map_lookup *map;
1242 struct btrfs_device *device;
1243 u64 logical = cache->start;
1244 u64 length = cache->length;
1247 unsigned int nofs_flag;
1248 u64 *alloc_offsets = NULL;
1250 u64 *physical = NULL;
1251 unsigned long *active = NULL;
1253 u32 num_sequential = 0, num_conventional = 0;
1255 if (!btrfs_is_zoned(fs_info))
1259 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1261 "zoned: block group %llu len %llu unaligned to zone size %llu",
1262 logical, length, fs_info->zone_size);
1266 /* Get the chunk mapping */
1267 read_lock(&em_tree->lock);
1268 em = lookup_extent_mapping(em_tree, logical, length);
1269 read_unlock(&em_tree->lock);
1274 map = em->map_lookup;
1276 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
1277 if (!cache->physical_map) {
1282 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1283 if (!alloc_offsets) {
1288 caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
1294 physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
1300 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1306 for (i = 0; i < map->num_stripes; i++) {
1308 struct blk_zone zone;
1309 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1310 int dev_replace_is_ongoing = 0;
1312 device = map->stripes[i].dev;
1313 physical[i] = map->stripes[i].physical;
1315 if (device->bdev == NULL) {
1316 alloc_offsets[i] = WP_MISSING_DEV;
1320 is_sequential = btrfs_dev_is_sequential(device, physical[i]);
1326 if (!is_sequential) {
1327 alloc_offsets[i] = WP_CONVENTIONAL;
1332 * This zone will be used for allocation, so mark this zone
1335 btrfs_dev_clear_zone_empty(device, physical[i]);
1337 down_read(&dev_replace->rwsem);
1338 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1339 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1340 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
1341 up_read(&dev_replace->rwsem);
1344 * The group is mapped to a sequential zone. Get the zone write
1345 * pointer to determine the allocation offset within the zone.
1347 WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
1348 nofs_flag = memalloc_nofs_save();
1349 ret = btrfs_get_dev_zone(device, physical[i], &zone);
1350 memalloc_nofs_restore(nofs_flag);
1351 if (ret == -EIO || ret == -EOPNOTSUPP) {
1353 alloc_offsets[i] = WP_MISSING_DEV;
1359 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1360 btrfs_err_in_rcu(fs_info,
1361 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1362 zone.start << SECTOR_SHIFT,
1363 rcu_str_deref(device->name), device->devid);
1368 caps[i] = (zone.capacity << SECTOR_SHIFT);
1370 switch (zone.cond) {
1371 case BLK_ZONE_COND_OFFLINE:
1372 case BLK_ZONE_COND_READONLY:
1374 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1375 physical[i] >> device->zone_info->zone_size_shift,
1376 rcu_str_deref(device->name), device->devid);
1377 alloc_offsets[i] = WP_MISSING_DEV;
1379 case BLK_ZONE_COND_EMPTY:
1380 alloc_offsets[i] = 0;
1382 case BLK_ZONE_COND_FULL:
1383 alloc_offsets[i] = caps[i];
1386 /* Partially used zone */
1388 ((zone.wp - zone.start) << SECTOR_SHIFT);
1389 __set_bit(i, active);
1394 * Consider a zone as active if we can allow any number of
1397 if (!device->zone_info->max_active_zones)
1398 __set_bit(i, active);
1401 if (num_sequential > 0)
1402 cache->seq_zone = true;
1404 if (num_conventional > 0) {
1406 * Avoid calling calculate_alloc_pointer() for new BG. It
1407 * is no use for new BG. It must be always 0.
1409 * Also, we have a lock chain of extent buffer lock ->
1410 * chunk mutex. For new BG, this function is called from
1411 * btrfs_make_block_group() which is already taking the
1412 * chunk mutex. Thus, we cannot call
1413 * calculate_alloc_pointer() which takes extent buffer
1414 * locks to avoid deadlock.
1417 /* Zone capacity is always zone size in emulation */
1418 cache->zone_capacity = cache->length;
1420 cache->alloc_offset = 0;
1423 ret = calculate_alloc_pointer(cache, &last_alloc);
1424 if (ret || map->num_stripes == num_conventional) {
1426 cache->alloc_offset = last_alloc;
1429 "zoned: failed to determine allocation offset of bg %llu",
1435 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1436 case 0: /* single */
1437 if (alloc_offsets[0] == WP_MISSING_DEV) {
1439 "zoned: cannot recover write pointer for zone %llu",
1444 cache->alloc_offset = alloc_offsets[0];
1445 cache->zone_capacity = caps[0];
1446 cache->zone_is_active = test_bit(0, active);
1448 case BTRFS_BLOCK_GROUP_DUP:
1449 if (map->type & BTRFS_BLOCK_GROUP_DATA) {
1450 btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
1454 if (alloc_offsets[0] == WP_MISSING_DEV) {
1456 "zoned: cannot recover write pointer for zone %llu",
1461 if (alloc_offsets[1] == WP_MISSING_DEV) {
1463 "zoned: cannot recover write pointer for zone %llu",
1468 if (alloc_offsets[0] != alloc_offsets[1]) {
1470 "zoned: write pointer offset mismatch of zones in DUP profile");
1474 if (test_bit(0, active) != test_bit(1, active)) {
1475 if (!btrfs_zone_activate(cache)) {
1480 cache->zone_is_active = test_bit(0, active);
1482 cache->alloc_offset = alloc_offsets[0];
1483 cache->zone_capacity = min(caps[0], caps[1]);
1485 case BTRFS_BLOCK_GROUP_RAID1:
1486 case BTRFS_BLOCK_GROUP_RAID0:
1487 case BTRFS_BLOCK_GROUP_RAID10:
1488 case BTRFS_BLOCK_GROUP_RAID5:
1489 case BTRFS_BLOCK_GROUP_RAID6:
1490 /* non-single profiles are not supported yet */
1492 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1493 btrfs_bg_type_to_raid_name(map->type));
1498 if (cache->zone_is_active) {
1499 btrfs_get_block_group(cache);
1500 spin_lock(&fs_info->zone_active_bgs_lock);
1501 list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
1502 spin_unlock(&fs_info->zone_active_bgs_lock);
1506 if (cache->alloc_offset > fs_info->zone_size) {
1508 "zoned: invalid write pointer %llu in block group %llu",
1509 cache->alloc_offset, cache->start);
1513 if (cache->alloc_offset > cache->zone_capacity) {
1515 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1516 cache->alloc_offset, cache->zone_capacity,
1521 /* An extent is allocated after the write pointer */
1522 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1524 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1525 logical, last_alloc, cache->alloc_offset);
1530 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1533 kfree(cache->physical_map);
1534 cache->physical_map = NULL;
1536 bitmap_free(active);
1539 kfree(alloc_offsets);
1540 free_extent_map(em);
1545 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1549 if (!btrfs_is_zoned(cache->fs_info))
1552 WARN_ON(cache->bytes_super != 0);
1553 unusable = (cache->alloc_offset - cache->used) +
1554 (cache->length - cache->zone_capacity);
1555 free = cache->zone_capacity - cache->alloc_offset;
1557 /* We only need ->free_space in ALLOC_SEQ block groups */
1558 cache->last_byte_to_unpin = (u64)-1;
1559 cache->cached = BTRFS_CACHE_FINISHED;
1560 cache->free_space_ctl->free_space = free;
1561 cache->zone_unusable = unusable;
1564 void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1565 struct extent_buffer *eb)
1567 struct btrfs_fs_info *fs_info = eb->fs_info;
1569 if (!btrfs_is_zoned(fs_info) ||
1570 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
1571 !list_empty(&eb->release_list))
1574 set_extent_buffer_dirty(eb);
1575 set_extent_bits_nowait(&trans->dirty_pages, eb->start,
1576 eb->start + eb->len - 1, EXTENT_DIRTY);
1577 memzero_extent_buffer(eb, 0, eb->len);
1578 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1580 spin_lock(&trans->releasing_ebs_lock);
1581 list_add_tail(&eb->release_list, &trans->releasing_ebs);
1582 spin_unlock(&trans->releasing_ebs_lock);
1583 atomic_inc(&eb->refs);
1586 void btrfs_free_redirty_list(struct btrfs_transaction *trans)
1588 spin_lock(&trans->releasing_ebs_lock);
1589 while (!list_empty(&trans->releasing_ebs)) {
1590 struct extent_buffer *eb;
1592 eb = list_first_entry(&trans->releasing_ebs,
1593 struct extent_buffer, release_list);
1594 list_del_init(&eb->release_list);
1595 free_extent_buffer(eb);
1597 spin_unlock(&trans->releasing_ebs_lock);
1600 bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
1602 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1603 struct btrfs_block_group *cache;
1606 if (!btrfs_is_zoned(fs_info))
1609 if (!is_data_inode(&inode->vfs_inode))
1613 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1614 * extent layout the relocation code has.
1615 * Furthermore we have set aside own block-group from which only the
1616 * relocation "process" can allocate and make sure only one process at a
1617 * time can add pages to an extent that gets relocated, so it's safe to
1618 * use regular REQ_OP_WRITE for this special case.
1620 if (btrfs_is_data_reloc_root(inode->root))
1623 cache = btrfs_lookup_block_group(fs_info, start);
1628 ret = cache->seq_zone;
1629 btrfs_put_block_group(cache);
1634 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
1637 struct btrfs_ordered_extent *ordered;
1638 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
1640 if (bio_op(bio) != REQ_OP_ZONE_APPEND)
1643 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
1644 if (WARN_ON(!ordered))
1647 ordered->physical = physical;
1648 ordered->bdev = bio->bi_bdev;
1650 btrfs_put_ordered_extent(ordered);
1653 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
1655 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1656 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1657 struct extent_map_tree *em_tree;
1658 struct extent_map *em;
1659 struct btrfs_ordered_sum *sum;
1660 u64 orig_logical = ordered->disk_bytenr;
1661 u64 *logical = NULL;
1664 /* Zoned devices should not have partitions. So, we can assume it is 0 */
1665 ASSERT(!bdev_is_partition(ordered->bdev));
1666 if (WARN_ON(!ordered->bdev))
1669 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
1670 ordered->physical, &logical, &nr,
1676 if (orig_logical == *logical)
1679 ordered->disk_bytenr = *logical;
1681 em_tree = &inode->extent_tree;
1682 write_lock(&em_tree->lock);
1683 em = search_extent_mapping(em_tree, ordered->file_offset,
1684 ordered->num_bytes);
1685 em->block_start = *logical;
1686 free_extent_map(em);
1687 write_unlock(&em_tree->lock);
1689 list_for_each_entry(sum, &ordered->list, list) {
1690 if (*logical < orig_logical)
1691 sum->bytenr -= orig_logical - *logical;
1693 sum->bytenr += *logical - orig_logical;
1700 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1701 struct extent_buffer *eb,
1702 struct btrfs_block_group **cache_ret)
1704 struct btrfs_block_group *cache;
1707 if (!btrfs_is_zoned(fs_info))
1710 cache = btrfs_lookup_block_group(fs_info, eb->start);
1714 if (cache->meta_write_pointer != eb->start) {
1715 btrfs_put_block_group(cache);
1719 cache->meta_write_pointer = eb->start + eb->len;
1727 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1728 struct extent_buffer *eb)
1730 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1733 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1734 cache->meta_write_pointer = eb->start;
1737 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1739 if (!btrfs_dev_is_sequential(device, physical))
1742 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1743 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1746 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1747 struct blk_zone *zone)
1749 struct btrfs_io_context *bioc = NULL;
1750 u64 mapped_length = PAGE_SIZE;
1751 unsigned int nofs_flag;
1755 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1756 &mapped_length, &bioc);
1757 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1762 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1767 nofs_flag = memalloc_nofs_save();
1768 nmirrors = (int)bioc->num_stripes;
1769 for (i = 0; i < nmirrors; i++) {
1770 u64 physical = bioc->stripes[i].physical;
1771 struct btrfs_device *dev = bioc->stripes[i].dev;
1773 /* Missing device */
1777 ret = btrfs_get_dev_zone(dev, physical, zone);
1778 /* Failing device */
1779 if (ret == -EIO || ret == -EOPNOTSUPP)
1783 memalloc_nofs_restore(nofs_flag);
1785 btrfs_put_bioc(bioc);
1790 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1791 * filling zeros between @physical_pos to a write pointer of dev-replace
1794 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
1795 u64 physical_start, u64 physical_pos)
1797 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
1798 struct blk_zone zone;
1803 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
1806 ret = read_zone_info(fs_info, logical, &zone);
1810 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
1812 if (physical_pos == wp)
1815 if (physical_pos > wp)
1818 length = wp - physical_pos;
1819 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
1822 struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
1823 u64 logical, u64 length)
1825 struct btrfs_device *device;
1826 struct extent_map *em;
1827 struct map_lookup *map;
1829 em = btrfs_get_chunk_map(fs_info, logical, length);
1831 return ERR_CAST(em);
1833 map = em->map_lookup;
1834 /* We only support single profile for now */
1835 device = map->stripes[0].dev;
1837 free_extent_map(em);
1843 * Activate block group and underlying device zones
1845 * @block_group: the block group to activate
1847 * Return: true on success, false otherwise
1849 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
1851 struct btrfs_fs_info *fs_info = block_group->fs_info;
1852 struct btrfs_space_info *space_info = block_group->space_info;
1853 struct map_lookup *map;
1854 struct btrfs_device *device;
1859 if (!btrfs_is_zoned(block_group->fs_info))
1862 map = block_group->physical_map;
1864 spin_lock(&space_info->lock);
1865 spin_lock(&block_group->lock);
1866 if (block_group->zone_is_active) {
1872 if (btrfs_zoned_bg_is_full(block_group)) {
1877 for (i = 0; i < map->num_stripes; i++) {
1878 device = map->stripes[i].dev;
1879 physical = map->stripes[i].physical;
1881 if (device->zone_info->max_active_zones == 0)
1884 if (!btrfs_dev_set_active_zone(device, physical)) {
1885 /* Cannot activate the zone */
1891 /* Successfully activated all the zones */
1892 block_group->zone_is_active = 1;
1893 space_info->active_total_bytes += block_group->length;
1894 spin_unlock(&block_group->lock);
1895 btrfs_try_granting_tickets(fs_info, space_info);
1896 spin_unlock(&space_info->lock);
1898 /* For the active block group list */
1899 btrfs_get_block_group(block_group);
1901 spin_lock(&fs_info->zone_active_bgs_lock);
1902 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
1903 spin_unlock(&fs_info->zone_active_bgs_lock);
1908 spin_unlock(&block_group->lock);
1909 spin_unlock(&space_info->lock);
1913 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
1915 struct btrfs_fs_info *fs_info = block_group->fs_info;
1916 struct map_lookup *map;
1920 spin_lock(&block_group->lock);
1921 if (!block_group->zone_is_active) {
1922 spin_unlock(&block_group->lock);
1926 /* Check if we have unwritten allocated space */
1927 if ((block_group->flags &
1928 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
1929 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
1930 spin_unlock(&block_group->lock);
1935 * If we are sure that the block group is full (= no more room left for
1936 * new allocation) and the IO for the last usable block is completed, we
1937 * don't need to wait for the other IOs. This holds because we ensure
1938 * the sequential IO submissions using the ZONE_APPEND command for data
1939 * and block_group->meta_write_pointer for metadata.
1941 if (!fully_written) {
1942 spin_unlock(&block_group->lock);
1944 ret = btrfs_inc_block_group_ro(block_group, false);
1948 /* Ensure all writes in this block group finish */
1949 btrfs_wait_block_group_reservations(block_group);
1950 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
1951 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
1952 block_group->length);
1954 spin_lock(&block_group->lock);
1957 * Bail out if someone already deactivated the block group, or
1958 * allocated space is left in the block group.
1960 if (!block_group->zone_is_active) {
1961 spin_unlock(&block_group->lock);
1962 btrfs_dec_block_group_ro(block_group);
1966 if (block_group->reserved) {
1967 spin_unlock(&block_group->lock);
1968 btrfs_dec_block_group_ro(block_group);
1973 block_group->zone_is_active = 0;
1974 block_group->alloc_offset = block_group->zone_capacity;
1975 block_group->free_space_ctl->free_space = 0;
1976 btrfs_clear_treelog_bg(block_group);
1977 btrfs_clear_data_reloc_bg(block_group);
1978 spin_unlock(&block_group->lock);
1980 map = block_group->physical_map;
1981 for (i = 0; i < map->num_stripes; i++) {
1982 struct btrfs_device *device = map->stripes[i].dev;
1983 const u64 physical = map->stripes[i].physical;
1985 if (device->zone_info->max_active_zones == 0)
1988 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
1989 physical >> SECTOR_SHIFT,
1990 device->zone_info->zone_size >> SECTOR_SHIFT,
1996 btrfs_dev_clear_active_zone(device, physical);
2000 btrfs_dec_block_group_ro(block_group);
2002 spin_lock(&fs_info->zone_active_bgs_lock);
2003 ASSERT(!list_empty(&block_group->active_bg_list));
2004 list_del_init(&block_group->active_bg_list);
2005 spin_unlock(&fs_info->zone_active_bgs_lock);
2007 /* For active_bg_list */
2008 btrfs_put_block_group(block_group);
2010 clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2011 wake_up_all(&fs_info->zone_finish_wait);
2016 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2018 if (!btrfs_is_zoned(block_group->fs_info))
2021 return do_zone_finish(block_group, false);
2024 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2026 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2027 struct btrfs_device *device;
2030 if (!btrfs_is_zoned(fs_info))
2033 /* Check if there is a device with active zones left */
2034 mutex_lock(&fs_info->chunk_mutex);
2035 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2036 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2041 if (!zinfo->max_active_zones ||
2042 atomic_read(&zinfo->active_zones_left)) {
2047 mutex_unlock(&fs_info->chunk_mutex);
2050 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2055 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2057 struct btrfs_block_group *block_group;
2058 u64 min_alloc_bytes;
2060 if (!btrfs_is_zoned(fs_info))
2063 block_group = btrfs_lookup_block_group(fs_info, logical);
2064 ASSERT(block_group);
2066 /* No MIXED_BG on zoned btrfs. */
2067 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2068 min_alloc_bytes = fs_info->sectorsize;
2070 min_alloc_bytes = fs_info->nodesize;
2072 /* Bail out if we can allocate more data from this block group. */
2073 if (logical + length + min_alloc_bytes <=
2074 block_group->start + block_group->zone_capacity)
2077 do_zone_finish(block_group, true);
2080 btrfs_put_block_group(block_group);
2083 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2085 struct btrfs_block_group *bg =
2086 container_of(work, struct btrfs_block_group, zone_finish_work);
2088 wait_on_extent_buffer_writeback(bg->last_eb);
2089 free_extent_buffer(bg->last_eb);
2090 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2091 btrfs_put_block_group(bg);
2094 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2095 struct extent_buffer *eb)
2097 if (!bg->seq_zone || eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2100 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2101 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2107 btrfs_get_block_group(bg);
2108 atomic_inc(&eb->refs);
2110 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2111 queue_work(system_unbound_wq, &bg->zone_finish_work);
2114 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2116 struct btrfs_fs_info *fs_info = bg->fs_info;
2118 spin_lock(&fs_info->relocation_bg_lock);
2119 if (fs_info->data_reloc_bg == bg->start)
2120 fs_info->data_reloc_bg = 0;
2121 spin_unlock(&fs_info->relocation_bg_lock);
2124 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2126 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2127 struct btrfs_device *device;
2129 if (!btrfs_is_zoned(fs_info))
2132 mutex_lock(&fs_devices->device_list_mutex);
2133 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2134 if (device->zone_info) {
2135 vfree(device->zone_info->zone_cache);
2136 device->zone_info->zone_cache = NULL;
2139 mutex_unlock(&fs_devices->device_list_mutex);
2142 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2144 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2145 struct btrfs_device *device;
2150 ASSERT(btrfs_is_zoned(fs_info));
2152 if (fs_info->bg_reclaim_threshold == 0)
2155 mutex_lock(&fs_devices->device_list_mutex);
2156 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2160 total += device->disk_total_bytes;
2161 used += device->bytes_used;
2163 mutex_unlock(&fs_devices->device_list_mutex);
2165 factor = div64_u64(used * 100, total);
2166 return factor >= fs_info->bg_reclaim_threshold;
2169 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2172 struct btrfs_block_group *block_group;
2174 if (!btrfs_is_zoned(fs_info))
2177 block_group = btrfs_lookup_block_group(fs_info, logical);
2178 /* It should be called on a previous data relocation block group. */
2179 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2181 spin_lock(&block_group->lock);
2182 if (!block_group->zoned_data_reloc_ongoing)
2185 /* All relocation extents are written. */
2186 if (block_group->start + block_group->alloc_offset == logical + length) {
2187 /* Now, release this block group for further allocations. */
2188 block_group->zoned_data_reloc_ongoing = 0;
2192 spin_unlock(&block_group->lock);
2193 btrfs_put_block_group(block_group);
2196 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2198 struct btrfs_block_group *block_group;
2199 struct btrfs_block_group *min_bg = NULL;
2200 u64 min_avail = U64_MAX;
2203 spin_lock(&fs_info->zone_active_bgs_lock);
2204 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2208 spin_lock(&block_group->lock);
2209 if (block_group->reserved ||
2210 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
2211 spin_unlock(&block_group->lock);
2215 avail = block_group->zone_capacity - block_group->alloc_offset;
2216 if (min_avail > avail) {
2218 btrfs_put_block_group(min_bg);
2219 min_bg = block_group;
2221 btrfs_get_block_group(min_bg);
2223 spin_unlock(&block_group->lock);
2225 spin_unlock(&fs_info->zone_active_bgs_lock);
2230 ret = btrfs_zone_finish(min_bg);
2231 btrfs_put_block_group(min_bg);
2233 return ret < 0 ? ret : 1;
2236 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2237 struct btrfs_space_info *space_info,
2240 struct btrfs_block_group *bg;
2243 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2246 /* No more block groups to activate */
2247 if (space_info->active_total_bytes == space_info->total_bytes)
2252 bool need_finish = false;
2254 down_read(&space_info->groups_sem);
2255 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2256 list_for_each_entry(bg, &space_info->block_groups[index],
2258 if (!spin_trylock(&bg->lock))
2260 if (btrfs_zoned_bg_is_full(bg) || bg->zone_is_active) {
2261 spin_unlock(&bg->lock);
2264 spin_unlock(&bg->lock);
2266 if (btrfs_zone_activate(bg)) {
2267 up_read(&space_info->groups_sem);
2274 up_read(&space_info->groups_sem);
2276 if (!do_finish || !need_finish)
2279 ret = btrfs_zone_finish_one_bg(fs_info);