1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/crc32.h>
12 #include <linux/sched/mm.h>
14 #define DM_MSG_PREFIX "zoned metadata"
19 #define DMZ_META_VER 2
22 * On-disk super block magic.
24 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
38 * All metadata blocks are stored in conventional zones, starting from
39 * the first conventional zone found on disk.
45 /* Metadata version number */
46 __le32 version; /* 8 */
48 /* Generation number */
51 /* This block number */
52 __le64 sb_block; /* 24 */
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks; /* 28 */
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq; /* 32 */
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks; /* 36 */
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks; /* 40 */
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks; /* 44 */
73 u8 dmz_label[32]; /* 80 */
76 u8 dmz_uuid[16]; /* 96 */
79 u8 dev_uuid[16]; /* 112 */
81 /* Padding to full 512B sector */
82 u8 reserved[400]; /* 512 */
86 * Chunk mapping entry: entries are indexed by chunk number
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
101 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
102 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
103 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
104 #define DMZ_MAP_UNMAPPED UINT_MAX
107 * Meta data block descriptor (for cached metadata blocks).
111 struct list_head link;
120 * Metadata block state flags.
130 * Super block information (one per metadata set).
135 struct dmz_mblock *mblk;
136 struct dmz_super *sb;
137 struct dm_zone *zone;
141 * In-memory metadata.
143 struct dmz_metadata {
145 unsigned int nr_devs;
147 char devname[BDEVNAME_SIZE];
148 char label[BDEVNAME_SIZE];
151 sector_t zone_bitmap_size;
152 unsigned int zone_nr_bitmap_blocks;
153 unsigned int zone_bits_per_mblk;
155 sector_t zone_nr_blocks;
156 sector_t zone_nr_blocks_shift;
158 sector_t zone_nr_sectors;
159 sector_t zone_nr_sectors_shift;
161 unsigned int nr_bitmap_blocks;
162 unsigned int nr_map_blocks;
164 unsigned int nr_zones;
165 unsigned int nr_useable_zones;
166 unsigned int nr_meta_blocks;
167 unsigned int nr_meta_zones;
168 unsigned int nr_data_zones;
169 unsigned int nr_cache_zones;
170 unsigned int nr_rnd_zones;
171 unsigned int nr_reserved_seq;
172 unsigned int nr_chunks;
174 /* Zone information array */
178 unsigned int mblk_primary;
179 unsigned int sb_version;
181 unsigned int min_nr_mblks;
182 unsigned int max_nr_mblks;
184 struct rw_semaphore mblk_sem;
185 struct mutex mblk_flush_lock;
186 spinlock_t mblk_lock;
187 struct rb_root mblk_rbtree;
188 struct list_head mblk_lru_list;
189 struct list_head mblk_dirty_list;
190 struct shrinker mblk_shrinker;
192 /* Zone allocation management */
193 struct mutex map_lock;
194 struct dmz_mblock **map_mblk;
196 atomic_t unmap_nr_rnd;
197 struct list_head unmap_rnd_list;
198 struct list_head map_rnd_list;
200 unsigned int nr_cache;
201 atomic_t unmap_nr_cache;
202 struct list_head unmap_cache_list;
203 struct list_head map_cache_list;
206 atomic_t unmap_nr_seq;
207 struct list_head unmap_seq_list;
208 struct list_head map_seq_list;
210 atomic_t nr_reserved_seq_zones;
211 struct list_head reserved_seq_zones_list;
213 wait_queue_head_t free_wq;
216 #define dmz_zmd_info(zmd, format, args...) \
217 DMINFO("(%s): " format, (zmd)->label, ## args)
219 #define dmz_zmd_err(zmd, format, args...) \
220 DMERR("(%s): " format, (zmd)->label, ## args)
222 #define dmz_zmd_warn(zmd, format, args...) \
223 DMWARN("(%s): " format, (zmd)->label, ## args)
225 #define dmz_zmd_debug(zmd, format, args...) \
226 DMDEBUG("(%s): " format, (zmd)->label, ## args)
230 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
232 unsigned int zone_id;
238 if (zmd->nr_devs > 1 &&
239 (zone_id >= zmd->dev[1].zone_offset))
240 zone_id -= zmd->dev[1].zone_offset;
244 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
246 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
248 return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
251 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
253 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
255 return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
258 struct dmz_dev *dmz_zone_to_dev(struct dmz_metadata *zmd, struct dm_zone *zone)
263 if (zmd->nr_devs > 1 &&
264 zone->id >= zmd->dev[1].zone_offset)
270 unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
272 return zmd->zone_nr_blocks;
275 unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
277 return zmd->zone_nr_blocks_shift;
280 unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
282 return zmd->zone_nr_sectors;
285 unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
287 return zmd->zone_nr_sectors_shift;
290 unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
292 return zmd->nr_zones;
295 unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
297 return zmd->nr_chunks;
300 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
305 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
307 return atomic_read(&zmd->unmap_nr_rnd);
310 unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
312 return zmd->nr_cache;
315 unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
317 return atomic_read(&zmd->unmap_nr_cache);
320 unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd)
325 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd)
327 return atomic_read(&zmd->unmap_nr_seq);
330 static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
332 return xa_load(&zmd->zones, zone_id);
335 static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
336 unsigned int zone_id)
338 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
341 return ERR_PTR(-ENOMEM);
343 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
345 return ERR_PTR(-EBUSY);
348 INIT_LIST_HEAD(&zone->link);
349 atomic_set(&zone->refcount, 0);
351 zone->chunk = DMZ_MAP_UNMAPPED;
356 const char *dmz_metadata_label(struct dmz_metadata *zmd)
358 return (const char *)zmd->label;
361 bool dmz_check_dev(struct dmz_metadata *zmd)
365 for (i = 0; i < zmd->nr_devs; i++) {
366 if (!dmz_check_bdev(&zmd->dev[i]))
372 bool dmz_dev_is_dying(struct dmz_metadata *zmd)
376 for (i = 0; i < zmd->nr_devs; i++) {
377 if (dmz_bdev_is_dying(&zmd->dev[i]))
384 * Lock/unlock mapping table.
385 * The map lock also protects all the zone lists.
387 void dmz_lock_map(struct dmz_metadata *zmd)
389 mutex_lock(&zmd->map_lock);
392 void dmz_unlock_map(struct dmz_metadata *zmd)
394 mutex_unlock(&zmd->map_lock);
398 * Lock/unlock metadata access. This is a "read" lock on a semaphore
399 * that prevents metadata flush from running while metadata are being
400 * modified. The actual metadata write mutual exclusion is achieved with
401 * the map lock and zone state management (active and reclaim state are
402 * mutually exclusive).
404 void dmz_lock_metadata(struct dmz_metadata *zmd)
406 down_read(&zmd->mblk_sem);
409 void dmz_unlock_metadata(struct dmz_metadata *zmd)
411 up_read(&zmd->mblk_sem);
415 * Lock/unlock flush: prevent concurrent executions
416 * of dmz_flush_metadata as well as metadata modification in reclaim
417 * while flush is being executed.
419 void dmz_lock_flush(struct dmz_metadata *zmd)
421 mutex_lock(&zmd->mblk_flush_lock);
424 void dmz_unlock_flush(struct dmz_metadata *zmd)
426 mutex_unlock(&zmd->mblk_flush_lock);
430 * Allocate a metadata block.
432 static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
435 struct dmz_mblock *mblk = NULL;
437 /* See if we can reuse cached blocks */
438 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
439 spin_lock(&zmd->mblk_lock);
440 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
441 struct dmz_mblock, link);
443 list_del_init(&mblk->link);
444 rb_erase(&mblk->node, &zmd->mblk_rbtree);
447 spin_unlock(&zmd->mblk_lock);
452 /* Allocate a new block */
453 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
457 mblk->page = alloc_page(GFP_NOIO);
463 RB_CLEAR_NODE(&mblk->node);
464 INIT_LIST_HEAD(&mblk->link);
468 mblk->data = page_address(mblk->page);
470 atomic_inc(&zmd->nr_mblks);
476 * Free a metadata block.
478 static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
480 __free_pages(mblk->page, 0);
483 atomic_dec(&zmd->nr_mblks);
487 * Insert a metadata block in the rbtree.
489 static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
491 struct rb_root *root = &zmd->mblk_rbtree;
492 struct rb_node **new = &(root->rb_node), *parent = NULL;
493 struct dmz_mblock *b;
495 /* Figure out where to put the new node */
497 b = container_of(*new, struct dmz_mblock, node);
499 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
502 /* Add new node and rebalance tree */
503 rb_link_node(&mblk->node, parent, new);
504 rb_insert_color(&mblk->node, root);
508 * Lookup a metadata block in the rbtree. If the block is found, increment
509 * its reference count.
511 static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
514 struct rb_root *root = &zmd->mblk_rbtree;
515 struct rb_node *node = root->rb_node;
516 struct dmz_mblock *mblk;
519 mblk = container_of(node, struct dmz_mblock, node);
520 if (mblk->no == mblk_no) {
522 * If this is the first reference to the block,
523 * remove it from the LRU list.
526 if (mblk->ref == 1 &&
527 !test_bit(DMZ_META_DIRTY, &mblk->state))
528 list_del_init(&mblk->link);
531 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
538 * Metadata block BIO end callback.
540 static void dmz_mblock_bio_end_io(struct bio *bio)
542 struct dmz_mblock *mblk = bio->bi_private;
546 set_bit(DMZ_META_ERROR, &mblk->state);
548 if (bio_op(bio) == REQ_OP_WRITE)
549 flag = DMZ_META_WRITING;
551 flag = DMZ_META_READING;
553 clear_bit_unlock(flag, &mblk->state);
554 smp_mb__after_atomic();
555 wake_up_bit(&mblk->state, flag);
561 * Read an uncached metadata block from disk and add it to the cache.
563 static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
566 struct dmz_mblock *mblk, *m;
567 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
568 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
571 if (dmz_bdev_is_dying(dev))
572 return ERR_PTR(-EIO);
574 /* Get a new block and a BIO to read it */
575 mblk = dmz_alloc_mblock(zmd, mblk_no);
577 return ERR_PTR(-ENOMEM);
579 bio = bio_alloc(GFP_NOIO, 1);
581 dmz_free_mblock(zmd, mblk);
582 return ERR_PTR(-ENOMEM);
585 spin_lock(&zmd->mblk_lock);
588 * Make sure that another context did not start reading
591 m = dmz_get_mblock_fast(zmd, mblk_no);
593 spin_unlock(&zmd->mblk_lock);
594 dmz_free_mblock(zmd, mblk);
600 set_bit(DMZ_META_READING, &mblk->state);
601 dmz_insert_mblock(zmd, mblk);
603 spin_unlock(&zmd->mblk_lock);
605 /* Submit read BIO */
606 bio->bi_iter.bi_sector = dmz_blk2sect(block);
607 bio_set_dev(bio, dev->bdev);
608 bio->bi_private = mblk;
609 bio->bi_end_io = dmz_mblock_bio_end_io;
610 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
611 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
618 * Free metadata blocks.
620 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
623 struct dmz_mblock *mblk;
624 unsigned long count = 0;
626 if (!zmd->max_nr_mblks)
629 while (!list_empty(&zmd->mblk_lru_list) &&
630 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
632 mblk = list_first_entry(&zmd->mblk_lru_list,
633 struct dmz_mblock, link);
634 list_del_init(&mblk->link);
635 rb_erase(&mblk->node, &zmd->mblk_rbtree);
636 dmz_free_mblock(zmd, mblk);
644 * For mblock shrinker: get the number of unused metadata blocks in the cache.
646 static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
647 struct shrink_control *sc)
649 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
651 return atomic_read(&zmd->nr_mblks);
655 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
657 static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
658 struct shrink_control *sc)
660 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
663 spin_lock(&zmd->mblk_lock);
664 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
665 spin_unlock(&zmd->mblk_lock);
667 return count ? count : SHRINK_STOP;
671 * Release a metadata block.
673 static void dmz_release_mblock(struct dmz_metadata *zmd,
674 struct dmz_mblock *mblk)
680 spin_lock(&zmd->mblk_lock);
683 if (mblk->ref == 0) {
684 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
685 rb_erase(&mblk->node, &zmd->mblk_rbtree);
686 dmz_free_mblock(zmd, mblk);
687 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
688 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
689 dmz_shrink_mblock_cache(zmd, 1);
693 spin_unlock(&zmd->mblk_lock);
697 * Get a metadata block from the rbtree. If the block
698 * is not present, read it from disk.
700 static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
703 struct dmz_mblock *mblk;
704 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
707 spin_lock(&zmd->mblk_lock);
708 mblk = dmz_get_mblock_fast(zmd, mblk_no);
709 spin_unlock(&zmd->mblk_lock);
712 /* Cache miss: read the block from disk */
713 mblk = dmz_get_mblock_slow(zmd, mblk_no);
718 /* Wait for on-going read I/O and check for error */
719 wait_on_bit_io(&mblk->state, DMZ_META_READING,
720 TASK_UNINTERRUPTIBLE);
721 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
722 dmz_release_mblock(zmd, mblk);
724 return ERR_PTR(-EIO);
731 * Mark a metadata block dirty.
733 static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
735 spin_lock(&zmd->mblk_lock);
736 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
737 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
738 spin_unlock(&zmd->mblk_lock);
742 * Issue a metadata block write BIO.
744 static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
747 struct dmz_dev *dev = zmd->sb[set].dev;
748 sector_t block = zmd->sb[set].block + mblk->no;
751 if (dmz_bdev_is_dying(dev))
754 bio = bio_alloc(GFP_NOIO, 1);
756 set_bit(DMZ_META_ERROR, &mblk->state);
760 set_bit(DMZ_META_WRITING, &mblk->state);
762 bio->bi_iter.bi_sector = dmz_blk2sect(block);
763 bio_set_dev(bio, dev->bdev);
764 bio->bi_private = mblk;
765 bio->bi_end_io = dmz_mblock_bio_end_io;
766 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
767 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
774 * Read/write a metadata block.
776 static int dmz_rdwr_block(struct dmz_dev *dev, int op,
777 sector_t block, struct page *page)
785 if (dmz_bdev_is_dying(dev))
788 bio = bio_alloc(GFP_NOIO, 1);
792 bio->bi_iter.bi_sector = dmz_blk2sect(block);
793 bio_set_dev(bio, dev->bdev);
794 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
795 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
796 ret = submit_bio_wait(bio);
805 * Write super block of the specified metadata set.
807 static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
809 struct dmz_mblock *mblk = zmd->sb[set].mblk;
810 struct dmz_super *sb = zmd->sb[set].sb;
811 struct dmz_dev *dev = zmd->sb[set].dev;
813 u64 sb_gen = zmd->sb_gen + 1;
816 sb->magic = cpu_to_le32(DMZ_MAGIC);
818 sb->version = cpu_to_le32(zmd->sb_version);
819 if (zmd->sb_version > 1) {
820 BUILD_BUG_ON(UUID_SIZE != 16);
821 export_uuid(sb->dmz_uuid, &zmd->uuid);
822 memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
823 export_uuid(sb->dev_uuid, &dev->uuid);
826 sb->gen = cpu_to_le64(sb_gen);
829 * The metadata always references the absolute block address,
830 * ie relative to the entire block range, not the per-device
833 sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
834 sb->sb_block = cpu_to_le64(sb_block);
835 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
836 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
837 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
839 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
840 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
843 sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
845 ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
848 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
854 * Write dirty metadata blocks to the specified set.
856 static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
857 struct list_head *write_list,
860 struct dmz_mblock *mblk;
861 struct dmz_dev *dev = zmd->sb[set].dev;
862 struct blk_plug plug;
863 int ret = 0, nr_mblks_submitted = 0;
866 blk_start_plug(&plug);
867 list_for_each_entry(mblk, write_list, link) {
868 ret = dmz_write_mblock(zmd, mblk, set);
871 nr_mblks_submitted++;
873 blk_finish_plug(&plug);
875 /* Wait for completion */
876 list_for_each_entry(mblk, write_list, link) {
877 if (!nr_mblks_submitted)
879 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
880 TASK_UNINTERRUPTIBLE);
881 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
882 clear_bit(DMZ_META_ERROR, &mblk->state);
886 nr_mblks_submitted--;
889 /* Flush drive cache (this will also sync data) */
891 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
897 * Log dirty metadata blocks.
899 static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
900 struct list_head *write_list)
902 unsigned int log_set = zmd->mblk_primary ^ 0x1;
905 /* Write dirty blocks to the log */
906 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
911 * No error so far: now validate the log by updating the
912 * log index super block generation.
914 ret = dmz_write_sb(zmd, log_set);
922 * Flush dirty metadata blocks.
924 int dmz_flush_metadata(struct dmz_metadata *zmd)
926 struct dmz_mblock *mblk;
927 struct list_head write_list;
934 INIT_LIST_HEAD(&write_list);
937 * Make sure that metadata blocks are stable before logging: take
938 * the write lock on the metadata semaphore to prevent target BIOs
939 * from modifying metadata.
941 down_write(&zmd->mblk_sem);
942 dev = zmd->sb[zmd->mblk_primary].dev;
945 * This is called from the target flush work and reclaim work.
946 * Concurrent execution is not allowed.
950 if (dmz_bdev_is_dying(dev)) {
955 /* Get dirty blocks */
956 spin_lock(&zmd->mblk_lock);
957 list_splice_init(&zmd->mblk_dirty_list, &write_list);
958 spin_unlock(&zmd->mblk_lock);
960 /* If there are no dirty metadata blocks, just flush the device cache */
961 if (list_empty(&write_list)) {
962 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
967 * The primary metadata set is still clean. Keep it this way until
968 * all updates are successful in the secondary set. That is, use
969 * the secondary set as a log.
971 ret = dmz_log_dirty_mblocks(zmd, &write_list);
976 * The log is on disk. It is now safe to update in place
977 * in the primary metadata set.
979 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
983 ret = dmz_write_sb(zmd, zmd->mblk_primary);
987 while (!list_empty(&write_list)) {
988 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
989 list_del_init(&mblk->link);
991 spin_lock(&zmd->mblk_lock);
992 clear_bit(DMZ_META_DIRTY, &mblk->state);
994 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
995 spin_unlock(&zmd->mblk_lock);
1000 dmz_unlock_flush(zmd);
1001 up_write(&zmd->mblk_sem);
1006 if (!list_empty(&write_list)) {
1007 spin_lock(&zmd->mblk_lock);
1008 list_splice(&write_list, &zmd->mblk_dirty_list);
1009 spin_unlock(&zmd->mblk_lock);
1011 if (!dmz_check_bdev(dev))
1017 * Check super block.
1019 static int dmz_check_sb(struct dmz_metadata *zmd, unsigned int set)
1021 struct dmz_super *sb = zmd->sb[set].sb;
1022 struct dmz_dev *dev = zmd->sb[set].dev;
1023 unsigned int nr_meta_zones, nr_data_zones;
1024 u32 crc, stored_crc;
1027 if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
1028 dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
1029 DMZ_MAGIC, le32_to_cpu(sb->magic));
1033 zmd->sb_version = le32_to_cpu(sb->version);
1034 if (zmd->sb_version > DMZ_META_VER) {
1035 dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
1036 DMZ_META_VER, zmd->sb_version);
1039 if ((zmd->sb_version < 1) && (set == 2)) {
1040 dmz_dev_err(dev, "Tertiary superblocks are not supported");
1044 gen = le64_to_cpu(sb->gen);
1045 stored_crc = le32_to_cpu(sb->crc);
1047 crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
1048 if (crc != stored_crc) {
1049 dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1054 if (zmd->sb_version > 1) {
1057 import_uuid(&sb_uuid, sb->dmz_uuid);
1058 if (uuid_is_null(&sb_uuid)) {
1059 dmz_dev_err(dev, "NULL DM-Zoned uuid");
1061 } else if (uuid_is_null(&zmd->uuid)) {
1062 uuid_copy(&zmd->uuid, &sb_uuid);
1063 } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
1064 dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
1065 "is %pUl expected %pUl",
1066 &sb_uuid, &zmd->uuid);
1069 if (!strlen(zmd->label))
1070 memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
1071 else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
1072 dmz_dev_err(dev, "mismatching DM-Zoned label, "
1073 "is %s expected %s",
1074 sb->dmz_label, zmd->label);
1077 import_uuid(&dev->uuid, sb->dev_uuid);
1078 if (uuid_is_null(&dev->uuid)) {
1079 dmz_dev_err(dev, "NULL device uuid");
1085 * Generation number should be 0, but it doesn't
1086 * really matter if it isn't.
1089 dmz_dev_warn(dev, "Invalid generation %llu",
1095 nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
1096 >> zmd->zone_nr_blocks_shift;
1097 if (!nr_meta_zones ||
1098 nr_meta_zones >= zmd->nr_rnd_zones) {
1099 dmz_dev_err(dev, "Invalid number of metadata blocks");
1103 if (!le32_to_cpu(sb->nr_reserved_seq) ||
1104 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
1105 dmz_dev_err(dev, "Invalid number of reserved sequential zones");
1109 nr_data_zones = zmd->nr_useable_zones -
1110 (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
1111 if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
1112 dmz_dev_err(dev, "Invalid number of chunks %u / %u",
1113 le32_to_cpu(sb->nr_chunks), nr_data_zones);
1118 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
1119 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
1120 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
1121 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
1122 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
1123 zmd->nr_meta_zones = nr_meta_zones;
1124 zmd->nr_data_zones = nr_data_zones;
1130 * Read the first or second super block from disk.
1132 static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
1134 dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
1135 set, zmd->sb[set].dev->name,
1136 zmd->sb[set].block);
1138 return dmz_rdwr_block(zmd->sb[set].dev, REQ_OP_READ,
1139 zmd->sb[set].block, zmd->sb[set].mblk->page);
1143 * Determine the position of the secondary super blocks on disk.
1144 * This is used only if a corruption of the primary super block
1147 static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
1149 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
1150 struct dmz_mblock *mblk;
1151 unsigned int zone_id = zmd->sb[0].zone->id;
1154 /* Allocate a block */
1155 mblk = dmz_alloc_mblock(zmd, 0);
1159 zmd->sb[1].mblk = mblk;
1160 zmd->sb[1].sb = mblk->data;
1162 /* Bad first super block: search for the second one */
1163 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
1164 zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1165 zmd->sb[1].dev = zmd->sb[0].dev;
1166 for (i = 1; i < zmd->nr_rnd_zones; i++) {
1167 if (dmz_read_sb(zmd, 1) != 0)
1169 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
1171 zmd->sb[1].block += zone_nr_blocks;
1172 zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
1175 dmz_free_mblock(zmd, mblk);
1176 zmd->sb[1].mblk = NULL;
1177 zmd->sb[1].zone = NULL;
1178 zmd->sb[1].dev = NULL;
1184 * Read the first or second super block from disk.
1186 static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
1188 struct dmz_mblock *mblk;
1191 /* Allocate a block */
1192 mblk = dmz_alloc_mblock(zmd, 0);
1196 zmd->sb[set].mblk = mblk;
1197 zmd->sb[set].sb = mblk->data;
1199 /* Read super block */
1200 ret = dmz_read_sb(zmd, set);
1202 dmz_free_mblock(zmd, mblk);
1203 zmd->sb[set].mblk = NULL;
1211 * Recover a metadata set.
1213 static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1215 unsigned int src_set = dst_set ^ 0x1;
1219 dmz_dev_warn(zmd->sb[dst_set].dev,
1220 "Metadata set %u invalid: recovering", dst_set);
1223 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1225 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1227 page = alloc_page(GFP_NOIO);
1231 /* Copy metadata blocks */
1232 for (i = 1; i < zmd->nr_meta_blocks; i++) {
1233 ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
1234 zmd->sb[src_set].block + i, page);
1237 ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
1238 zmd->sb[dst_set].block + i, page);
1243 /* Finalize with the super block */
1244 if (!zmd->sb[dst_set].mblk) {
1245 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1246 if (!zmd->sb[dst_set].mblk) {
1250 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1253 ret = dmz_write_sb(zmd, dst_set);
1255 __free_pages(page, 0);
1261 * Get super block from disk.
1263 static int dmz_load_sb(struct dmz_metadata *zmd)
1265 bool sb_good[2] = {false, false};
1266 u64 sb_gen[2] = {0, 0};
1269 if (!zmd->sb[0].zone) {
1270 dmz_zmd_err(zmd, "Primary super block zone not set");
1274 /* Read and check the primary super block */
1275 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1276 zmd->sb[0].dev = dmz_zone_to_dev(zmd, zmd->sb[0].zone);
1277 ret = dmz_get_sb(zmd, 0);
1279 dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
1283 ret = dmz_check_sb(zmd, 0);
1285 /* Read and check secondary super block */
1288 if (!zmd->sb[1].zone) {
1289 unsigned int zone_id =
1290 zmd->sb[0].zone->id + zmd->nr_meta_zones;
1292 zmd->sb[1].zone = dmz_get(zmd, zone_id);
1294 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1295 zmd->sb[1].dev = zmd->sb[0].dev;
1296 ret = dmz_get_sb(zmd, 1);
1298 ret = dmz_lookup_secondary_sb(zmd);
1301 dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
1305 ret = dmz_check_sb(zmd, 1);
1309 /* Use highest generation sb first */
1310 if (!sb_good[0] && !sb_good[1]) {
1311 dmz_zmd_err(zmd, "No valid super block found");
1316 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
1318 ret = dmz_recover_mblocks(zmd, 0);
1320 dmz_dev_err(zmd->sb[0].dev,
1321 "Recovery of superblock 0 failed");
1327 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
1329 ret = dmz_recover_mblocks(zmd, 1);
1332 dmz_dev_err(zmd->sb[1].dev,
1333 "Recovery of superblock 1 failed");
1338 if (sb_gen[0] >= sb_gen[1]) {
1339 zmd->sb_gen = sb_gen[0];
1340 zmd->mblk_primary = 0;
1342 zmd->sb_gen = sb_gen[1];
1343 zmd->mblk_primary = 1;
1346 dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1347 "Using super block %u (gen %llu)",
1348 zmd->mblk_primary, zmd->sb_gen);
1350 if ((zmd->sb_version > 1) && zmd->sb[2].zone) {
1351 zmd->sb[2].block = dmz_start_block(zmd, zmd->sb[2].zone);
1352 zmd->sb[2].dev = dmz_zone_to_dev(zmd, zmd->sb[2].zone);
1353 ret = dmz_get_sb(zmd, 2);
1355 dmz_dev_err(zmd->sb[2].dev,
1356 "Read tertiary super block failed");
1359 ret = dmz_check_sb(zmd, 2);
1367 * Initialize a zone descriptor.
1369 static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
1371 struct dmz_metadata *zmd = data;
1372 struct dmz_dev *dev = zmd->nr_devs > 1 ? &zmd->dev[1] : &zmd->dev[0];
1373 int idx = num + dev->zone_offset;
1374 struct dm_zone *zone;
1376 zone = dmz_insert(zmd, idx);
1378 return PTR_ERR(zone);
1380 if (blkz->len != zmd->zone_nr_sectors) {
1381 if (zmd->sb_version > 1) {
1382 /* Ignore the eventual runt (smaller) zone */
1383 set_bit(DMZ_OFFLINE, &zone->flags);
1385 } else if (blkz->start + blkz->len == dev->capacity)
1390 switch (blkz->type) {
1391 case BLK_ZONE_TYPE_CONVENTIONAL:
1392 set_bit(DMZ_RND, &zone->flags);
1394 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1395 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1396 set_bit(DMZ_SEQ, &zone->flags);
1402 if (dmz_is_rnd(zone))
1405 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1407 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1408 set_bit(DMZ_OFFLINE, &zone->flags);
1409 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1410 set_bit(DMZ_READ_ONLY, &zone->flags);
1412 zmd->nr_useable_zones++;
1413 if (dmz_is_rnd(zone)) {
1414 zmd->nr_rnd_zones++;
1415 if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1416 /* Primary super block zone */
1417 zmd->sb[0].zone = zone;
1420 if (zmd->nr_devs > 1 && !zmd->sb[2].zone) {
1421 /* Tertiary superblock zone */
1422 zmd->sb[2].zone = zone;
1429 static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
1432 sector_t zone_offset = 0;
1434 for(idx = 0; idx < dev->nr_zones; idx++) {
1435 struct dm_zone *zone;
1437 zone = dmz_insert(zmd, idx);
1439 return PTR_ERR(zone);
1440 set_bit(DMZ_CACHE, &zone->flags);
1442 zmd->nr_cache_zones++;
1443 zmd->nr_useable_zones++;
1444 if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
1445 /* Disable runt zone */
1446 set_bit(DMZ_OFFLINE, &zone->flags);
1449 zone_offset += zmd->zone_nr_sectors;
1455 * Free zones descriptors.
1457 static void dmz_drop_zones(struct dmz_metadata *zmd)
1461 for(idx = 0; idx < zmd->nr_zones; idx++) {
1462 struct dm_zone *zone = xa_load(&zmd->zones, idx);
1465 xa_erase(&zmd->zones, idx);
1467 xa_destroy(&zmd->zones);
1471 * Allocate and initialize zone descriptors using the zone
1472 * information from disk.
1474 static int dmz_init_zones(struct dmz_metadata *zmd)
1477 struct dmz_dev *zoned_dev = &zmd->dev[0];
1480 zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
1481 zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1482 zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1483 zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1484 zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
1485 zmd->zone_nr_bitmap_blocks =
1486 max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
1487 zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
1488 DMZ_BLOCK_SIZE_BITS);
1490 /* Allocate zone array */
1492 for (i = 0; i < zmd->nr_devs; i++)
1493 zmd->nr_zones += zmd->dev[i].nr_zones;
1495 if (!zmd->nr_zones) {
1496 DMERR("(%s): No zones found", zmd->devname);
1499 xa_init(&zmd->zones);
1501 DMDEBUG("(%s): Using %zu B for zone information",
1502 zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
1504 if (zmd->nr_devs > 1) {
1505 ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
1507 DMDEBUG("(%s): Failed to emulate zones, error %d",
1509 dmz_drop_zones(zmd);
1514 * Primary superblock zone is always at zone 0 when multiple
1515 * drives are present.
1517 zmd->sb[0].zone = dmz_get(zmd, 0);
1519 zoned_dev = &zmd->dev[1];
1523 * Get zone information and initialize zone descriptors. At the same
1524 * time, determine where the super block should be: first block of the
1525 * first randomly writable zone.
1527 ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
1528 dmz_init_zone, zmd);
1530 DMDEBUG("(%s): Failed to report zones, error %d",
1532 dmz_drop_zones(zmd);
1539 static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1542 struct dm_zone *zone = data;
1544 clear_bit(DMZ_OFFLINE, &zone->flags);
1545 clear_bit(DMZ_READ_ONLY, &zone->flags);
1546 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1547 set_bit(DMZ_OFFLINE, &zone->flags);
1548 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1549 set_bit(DMZ_READ_ONLY, &zone->flags);
1551 if (dmz_is_seq(zone))
1552 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1559 * Update a zone information.
1561 static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1563 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
1564 unsigned int noio_flag;
1567 if (dev->flags & DMZ_BDEV_REGULAR)
1571 * Get zone information from disk. Since blkdev_report_zones() uses
1572 * GFP_KERNEL by default for memory allocations, set the per-task
1573 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1574 * GFP_NOIO was specified.
1576 noio_flag = memalloc_noio_save();
1577 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
1578 dmz_update_zone_cb, zone);
1579 memalloc_noio_restore(noio_flag);
1584 dmz_dev_err(dev, "Get zone %u report failed",
1586 dmz_check_bdev(dev);
1594 * Check a zone write pointer position when the zone is marked
1595 * with the sequential write error flag.
1597 static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1598 struct dm_zone *zone)
1600 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
1601 unsigned int wp = 0;
1604 wp = zone->wp_block;
1605 ret = dmz_update_zone(zmd, zone);
1609 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
1610 zone->id, zone->wp_block, wp);
1612 if (zone->wp_block < wp) {
1613 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1614 wp - zone->wp_block);
1621 * Reset a zone write pointer.
1623 static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1628 * Ignore offline zones, read only zones,
1629 * and conventional zones.
1631 if (dmz_is_offline(zone) ||
1632 dmz_is_readonly(zone) ||
1636 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1637 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
1639 ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1640 dmz_start_sect(zmd, zone),
1641 zmd->zone_nr_sectors, GFP_NOIO);
1643 dmz_dev_err(dev, "Reset zone %u failed %d",
1649 /* Clear write error bit and rewind write pointer position */
1650 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1656 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1659 * Initialize chunk mapping.
1661 static int dmz_load_mapping(struct dmz_metadata *zmd)
1663 struct dm_zone *dzone, *bzone;
1664 struct dmz_mblock *dmap_mblk = NULL;
1665 struct dmz_map *dmap;
1666 unsigned int i = 0, e = 0, chunk = 0;
1667 unsigned int dzone_id;
1668 unsigned int bzone_id;
1670 /* Metadata block array for the chunk mapping table */
1671 zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1672 sizeof(struct dmz_mblk *), GFP_KERNEL);
1676 /* Get chunk mapping table blocks and initialize zone mapping */
1677 while (chunk < zmd->nr_chunks) {
1679 /* Get mapping block */
1680 dmap_mblk = dmz_get_mblock(zmd, i + 1);
1681 if (IS_ERR(dmap_mblk))
1682 return PTR_ERR(dmap_mblk);
1683 zmd->map_mblk[i] = dmap_mblk;
1684 dmap = (struct dmz_map *) dmap_mblk->data;
1689 /* Check data zone */
1690 dzone_id = le32_to_cpu(dmap[e].dzone_id);
1691 if (dzone_id == DMZ_MAP_UNMAPPED)
1694 if (dzone_id >= zmd->nr_zones) {
1695 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1700 dzone = dmz_get(zmd, dzone_id);
1702 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1706 set_bit(DMZ_DATA, &dzone->flags);
1707 dzone->chunk = chunk;
1708 dmz_get_zone_weight(zmd, dzone);
1710 if (dmz_is_cache(dzone))
1711 list_add_tail(&dzone->link, &zmd->map_cache_list);
1712 else if (dmz_is_rnd(dzone))
1713 list_add_tail(&dzone->link, &zmd->map_rnd_list);
1715 list_add_tail(&dzone->link, &zmd->map_seq_list);
1717 /* Check buffer zone */
1718 bzone_id = le32_to_cpu(dmap[e].bzone_id);
1719 if (bzone_id == DMZ_MAP_UNMAPPED)
1722 if (bzone_id >= zmd->nr_zones) {
1723 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
1728 bzone = dmz_get(zmd, bzone_id);
1730 dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1734 if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
1735 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
1740 set_bit(DMZ_DATA, &bzone->flags);
1741 set_bit(DMZ_BUF, &bzone->flags);
1742 bzone->chunk = chunk;
1743 bzone->bzone = dzone;
1744 dzone->bzone = bzone;
1745 dmz_get_zone_weight(zmd, bzone);
1746 if (dmz_is_cache(bzone))
1747 list_add_tail(&bzone->link, &zmd->map_cache_list);
1749 list_add_tail(&bzone->link, &zmd->map_rnd_list);
1753 if (e >= DMZ_MAP_ENTRIES)
1758 * At this point, only meta zones and mapped data zones were
1759 * fully initialized. All remaining zones are unmapped data
1760 * zones. Finish initializing those here.
1762 for (i = 0; i < zmd->nr_zones; i++) {
1763 dzone = dmz_get(zmd, i);
1766 if (dmz_is_meta(dzone))
1768 if (dmz_is_offline(dzone))
1771 if (dmz_is_cache(dzone))
1773 else if (dmz_is_rnd(dzone))
1778 if (dmz_is_data(dzone)) {
1779 /* Already initialized */
1783 /* Unmapped data zone */
1784 set_bit(DMZ_DATA, &dzone->flags);
1785 dzone->chunk = DMZ_MAP_UNMAPPED;
1786 if (dmz_is_cache(dzone)) {
1787 list_add_tail(&dzone->link, &zmd->unmap_cache_list);
1788 atomic_inc(&zmd->unmap_nr_cache);
1789 } else if (dmz_is_rnd(dzone)) {
1790 list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
1791 atomic_inc(&zmd->unmap_nr_rnd);
1792 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1793 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1794 set_bit(DMZ_RESERVED, &dzone->flags);
1795 atomic_inc(&zmd->nr_reserved_seq_zones);
1798 list_add_tail(&dzone->link, &zmd->unmap_seq_list);
1799 atomic_inc(&zmd->unmap_nr_seq);
1807 * Set a data chunk mapping.
1809 static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1810 unsigned int dzone_id, unsigned int bzone_id)
1812 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1813 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1814 int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1816 dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1817 dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1818 dmz_dirty_mblock(zmd, dmap_mblk);
1822 * The list of mapped zones is maintained in LRU order.
1823 * This rotates a zone at the end of its map list.
1825 static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1827 if (list_empty(&zone->link))
1830 list_del_init(&zone->link);
1831 if (dmz_is_seq(zone)) {
1832 /* LRU rotate sequential zone */
1833 list_add_tail(&zone->link, &zmd->map_seq_list);
1834 } else if (dmz_is_cache(zone)) {
1835 /* LRU rotate cache zone */
1836 list_add_tail(&zone->link, &zmd->map_cache_list);
1838 /* LRU rotate random zone */
1839 list_add_tail(&zone->link, &zmd->map_rnd_list);
1844 * The list of mapped random zones is maintained
1845 * in LRU order. This rotates a zone at the end of the list.
1847 static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1849 __dmz_lru_zone(zmd, zone);
1851 __dmz_lru_zone(zmd, zone->bzone);
1855 * Wait for any zone to be freed.
1857 static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1861 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1862 dmz_unlock_map(zmd);
1863 dmz_unlock_metadata(zmd);
1865 io_schedule_timeout(HZ);
1867 dmz_lock_metadata(zmd);
1869 finish_wait(&zmd->free_wq, &wait);
1873 * Lock a zone for reclaim (set the zone RECLAIM bit).
1874 * Returns false if the zone cannot be locked or if it is already locked
1877 int dmz_lock_zone_reclaim(struct dm_zone *zone)
1879 /* Active zones cannot be reclaimed */
1880 if (dmz_is_active(zone))
1883 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1887 * Clear a zone reclaim flag.
1889 void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1891 WARN_ON(dmz_is_active(zone));
1892 WARN_ON(!dmz_in_reclaim(zone));
1894 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1895 smp_mb__after_atomic();
1896 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1900 * Wait for a zone reclaim to complete.
1902 static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1904 dmz_unlock_map(zmd);
1905 dmz_unlock_metadata(zmd);
1906 set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1907 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1908 clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1909 dmz_lock_metadata(zmd);
1914 * Select a cache or random write zone for reclaim.
1916 static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
1919 struct dm_zone *dzone = NULL;
1920 struct dm_zone *zone;
1921 struct list_head *zone_list = &zmd->map_rnd_list;
1923 /* If we have cache zones select from the cache zone list */
1924 if (zmd->nr_cache) {
1925 zone_list = &zmd->map_cache_list;
1926 /* Try to relaim random zones, too, when idle */
1927 if (idle && list_empty(zone_list))
1928 zone_list = &zmd->map_rnd_list;
1931 list_for_each_entry(zone, zone_list, link) {
1932 if (dmz_is_buf(zone))
1933 dzone = zone->bzone;
1936 if (dmz_lock_zone_reclaim(dzone))
1944 * Select a buffered sequential zone for reclaim.
1946 static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1948 struct dm_zone *zone;
1950 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1953 if (dmz_lock_zone_reclaim(zone))
1961 * Select a zone for reclaim.
1963 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, bool idle)
1965 struct dm_zone *zone;
1968 * Search for a zone candidate to reclaim: 2 cases are possible.
1969 * (1) There is no free sequential zones. Then a random data zone
1970 * cannot be reclaimed. So choose a sequential zone to reclaim so
1971 * that afterward a random zone can be reclaimed.
1972 * (2) At least one free sequential zone is available, then choose
1973 * the oldest random zone (data or buffer) that can be locked.
1976 if (list_empty(&zmd->reserved_seq_zones_list))
1977 zone = dmz_get_seq_zone_for_reclaim(zmd);
1979 zone = dmz_get_rnd_zone_for_reclaim(zmd, idle);
1980 dmz_unlock_map(zmd);
1986 * Get the zone mapping a chunk, if the chunk is mapped already.
1987 * If no mapping exist and the operation is WRITE, a zone is
1988 * allocated and used to map the chunk.
1989 * The zone returned will be set to the active state.
1991 struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
1993 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1994 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1995 int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1996 unsigned int dzone_id;
1997 struct dm_zone *dzone = NULL;
1999 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2003 /* Get the chunk mapping */
2004 dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
2005 if (dzone_id == DMZ_MAP_UNMAPPED) {
2007 * Read or discard in unmapped chunks are fine. But for
2008 * writes, we need a mapping, so get one.
2010 if (op != REQ_OP_WRITE)
2013 /* Allocate a random zone */
2014 dzone = dmz_alloc_zone(zmd, alloc_flags);
2016 if (dmz_dev_is_dying(zmd)) {
2017 dzone = ERR_PTR(-EIO);
2020 dmz_wait_for_free_zones(zmd);
2024 dmz_map_zone(zmd, dzone, chunk);
2027 /* The chunk is already mapped: get the mapping zone */
2028 dzone = dmz_get(zmd, dzone_id);
2030 dzone = ERR_PTR(-EIO);
2033 if (dzone->chunk != chunk) {
2034 dzone = ERR_PTR(-EIO);
2038 /* Repair write pointer if the sequential dzone has error */
2039 if (dmz_seq_write_err(dzone)) {
2040 ret = dmz_handle_seq_write_err(zmd, dzone);
2042 dzone = ERR_PTR(-EIO);
2045 clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
2050 * If the zone is being reclaimed, the chunk mapping may change
2051 * to a different zone. So wait for reclaim and retry. Otherwise,
2052 * activate the zone (this will prevent reclaim from touching it).
2054 if (dmz_in_reclaim(dzone)) {
2055 dmz_wait_for_reclaim(zmd, dzone);
2058 dmz_activate_zone(dzone);
2059 dmz_lru_zone(zmd, dzone);
2061 dmz_unlock_map(zmd);
2067 * Write and discard change the block validity of data zones and their buffer
2068 * zones. Check here that valid blocks are still present. If all blocks are
2069 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2072 void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
2074 struct dm_zone *bzone;
2078 bzone = dzone->bzone;
2080 if (dmz_weight(bzone))
2081 dmz_lru_zone(zmd, bzone);
2083 /* Empty buffer zone: reclaim it */
2084 dmz_unmap_zone(zmd, bzone);
2085 dmz_free_zone(zmd, bzone);
2090 /* Deactivate the data zone */
2091 dmz_deactivate_zone(dzone);
2092 if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
2093 dmz_lru_zone(zmd, dzone);
2095 /* Unbuffered inactive empty data zone: reclaim it */
2096 dmz_unmap_zone(zmd, dzone);
2097 dmz_free_zone(zmd, dzone);
2100 dmz_unlock_map(zmd);
2104 * Allocate and map a random zone to buffer a chunk
2105 * already mapped to a sequential zone.
2107 struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
2108 struct dm_zone *dzone)
2110 struct dm_zone *bzone;
2111 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2115 bzone = dzone->bzone;
2119 /* Allocate a random zone */
2120 bzone = dmz_alloc_zone(zmd, alloc_flags);
2122 if (dmz_dev_is_dying(zmd)) {
2123 bzone = ERR_PTR(-EIO);
2126 dmz_wait_for_free_zones(zmd);
2130 /* Update the chunk mapping */
2131 dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
2133 set_bit(DMZ_BUF, &bzone->flags);
2134 bzone->chunk = dzone->chunk;
2135 bzone->bzone = dzone;
2136 dzone->bzone = bzone;
2137 if (dmz_is_cache(bzone))
2138 list_add_tail(&bzone->link, &zmd->map_cache_list);
2140 list_add_tail(&bzone->link, &zmd->map_rnd_list);
2142 dmz_unlock_map(zmd);
2148 * Get an unmapped (free) zone.
2149 * This must be called with the mapping lock held.
2151 struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
2153 struct list_head *list;
2154 struct dm_zone *zone;
2156 if (flags & DMZ_ALLOC_CACHE)
2157 list = &zmd->unmap_cache_list;
2158 else if (flags & DMZ_ALLOC_RND)
2159 list = &zmd->unmap_rnd_list;
2161 list = &zmd->unmap_seq_list;
2164 if (list_empty(list)) {
2166 * No free zone: return NULL if this is for not reclaim.
2168 if (!(flags & DMZ_ALLOC_RECLAIM))
2171 * Fallback to the reserved sequential zones
2173 zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2174 struct dm_zone, link);
2176 list_del_init(&zone->link);
2177 atomic_dec(&zmd->nr_reserved_seq_zones);
2182 zone = list_first_entry(list, struct dm_zone, link);
2183 list_del_init(&zone->link);
2185 if (dmz_is_cache(zone))
2186 atomic_dec(&zmd->unmap_nr_cache);
2187 else if (dmz_is_rnd(zone))
2188 atomic_dec(&zmd->unmap_nr_rnd);
2190 atomic_dec(&zmd->unmap_nr_seq);
2192 if (dmz_is_offline(zone)) {
2193 dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
2197 if (dmz_is_meta(zone)) {
2198 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
2200 dmz_dev_warn(dev, "Zone %u has metadata", zone->id);
2209 * This must be called with the mapping lock held.
2211 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2213 /* If this is a sequential zone, reset it */
2214 if (dmz_is_seq(zone))
2215 dmz_reset_zone(zmd, zone);
2217 /* Return the zone to its type unmap list */
2218 if (dmz_is_cache(zone)) {
2219 list_add_tail(&zone->link, &zmd->unmap_cache_list);
2220 atomic_inc(&zmd->unmap_nr_cache);
2221 } else if (dmz_is_rnd(zone)) {
2222 list_add_tail(&zone->link, &zmd->unmap_rnd_list);
2223 atomic_inc(&zmd->unmap_nr_rnd);
2224 } else if (dmz_is_reserved(zone)) {
2225 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2226 atomic_inc(&zmd->nr_reserved_seq_zones);
2228 list_add_tail(&zone->link, &zmd->unmap_seq_list);
2229 atomic_inc(&zmd->unmap_nr_seq);
2232 wake_up_all(&zmd->free_wq);
2236 * Map a chunk to a zone.
2237 * This must be called with the mapping lock held.
2239 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
2242 /* Set the chunk mapping */
2243 dmz_set_chunk_mapping(zmd, chunk, dzone->id,
2245 dzone->chunk = chunk;
2246 if (dmz_is_cache(dzone))
2247 list_add_tail(&dzone->link, &zmd->map_cache_list);
2248 else if (dmz_is_rnd(dzone))
2249 list_add_tail(&dzone->link, &zmd->map_rnd_list);
2251 list_add_tail(&dzone->link, &zmd->map_seq_list);
2256 * This must be called with the mapping lock held.
2258 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2260 unsigned int chunk = zone->chunk;
2261 unsigned int dzone_id;
2263 if (chunk == DMZ_MAP_UNMAPPED) {
2264 /* Already unmapped */
2268 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2270 * Unmapping the chunk buffer zone: clear only
2271 * the chunk buffer mapping
2273 dzone_id = zone->bzone->id;
2274 zone->bzone->bzone = NULL;
2279 * Unmapping the chunk data zone: the zone must
2282 if (WARN_ON(zone->bzone)) {
2283 zone->bzone->bzone = NULL;
2286 dzone_id = DMZ_MAP_UNMAPPED;
2289 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
2291 zone->chunk = DMZ_MAP_UNMAPPED;
2292 list_del_init(&zone->link);
2296 * Set @nr_bits bits in @bitmap starting from @bit.
2297 * Return the number of bits changed from 0 to 1.
2299 static unsigned int dmz_set_bits(unsigned long *bitmap,
2300 unsigned int bit, unsigned int nr_bits)
2302 unsigned long *addr;
2303 unsigned int end = bit + nr_bits;
2307 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2308 ((end - bit) >= BITS_PER_LONG)) {
2309 /* Try to set the whole word at once */
2310 addr = bitmap + BIT_WORD(bit);
2314 bit += BITS_PER_LONG;
2319 if (!test_and_set_bit(bit, bitmap))
2328 * Get the bitmap block storing the bit for chunk_block in zone.
2330 static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2331 struct dm_zone *zone,
2332 sector_t chunk_block)
2334 sector_t bitmap_block = 1 + zmd->nr_map_blocks +
2335 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
2336 (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2338 return dmz_get_mblock(zmd, bitmap_block);
2342 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2344 int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2345 struct dm_zone *to_zone)
2347 struct dmz_mblock *from_mblk, *to_mblk;
2348 sector_t chunk_block = 0;
2350 /* Get the zones bitmap blocks */
2351 while (chunk_block < zmd->zone_nr_blocks) {
2352 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2353 if (IS_ERR(from_mblk))
2354 return PTR_ERR(from_mblk);
2355 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2356 if (IS_ERR(to_mblk)) {
2357 dmz_release_mblock(zmd, from_mblk);
2358 return PTR_ERR(to_mblk);
2361 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2362 dmz_dirty_mblock(zmd, to_mblk);
2364 dmz_release_mblock(zmd, to_mblk);
2365 dmz_release_mblock(zmd, from_mblk);
2367 chunk_block += zmd->zone_bits_per_mblk;
2370 to_zone->weight = from_zone->weight;
2376 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2377 * starting from chunk_block.
2379 int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2380 struct dm_zone *to_zone, sector_t chunk_block)
2382 unsigned int nr_blocks;
2385 /* Get the zones bitmap blocks */
2386 while (chunk_block < zmd->zone_nr_blocks) {
2387 /* Get a valid region from the source zone */
2388 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2393 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2397 chunk_block += nr_blocks;
2404 * Validate all the blocks in the range [block..block+nr_blocks-1].
2406 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2407 sector_t chunk_block, unsigned int nr_blocks)
2409 unsigned int count, bit, nr_bits;
2410 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
2411 struct dmz_mblock *mblk;
2414 dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
2415 zone->id, (unsigned long long)chunk_block,
2418 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2421 /* Get bitmap block */
2422 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2424 return PTR_ERR(mblk);
2427 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2428 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2430 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2432 dmz_dirty_mblock(zmd, mblk);
2435 dmz_release_mblock(zmd, mblk);
2437 nr_blocks -= nr_bits;
2438 chunk_block += nr_bits;
2441 if (likely(zone->weight + n <= zone_nr_blocks))
2444 dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
2445 zone->id, zone->weight,
2446 zone_nr_blocks - n);
2447 zone->weight = zone_nr_blocks;
2454 * Clear nr_bits bits in bitmap starting from bit.
2455 * Return the number of bits cleared.
2457 static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2459 unsigned long *addr;
2460 int end = bit + nr_bits;
2464 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2465 ((end - bit) >= BITS_PER_LONG)) {
2466 /* Try to clear whole word at once */
2467 addr = bitmap + BIT_WORD(bit);
2468 if (*addr == ULONG_MAX) {
2471 bit += BITS_PER_LONG;
2476 if (test_and_clear_bit(bit, bitmap))
2485 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2487 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2488 sector_t chunk_block, unsigned int nr_blocks)
2490 unsigned int count, bit, nr_bits;
2491 struct dmz_mblock *mblk;
2494 dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
2495 zone->id, (u64)chunk_block, nr_blocks);
2497 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2500 /* Get bitmap block */
2501 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2503 return PTR_ERR(mblk);
2506 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2507 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2509 count = dmz_clear_bits((unsigned long *)mblk->data,
2512 dmz_dirty_mblock(zmd, mblk);
2515 dmz_release_mblock(zmd, mblk);
2517 nr_blocks -= nr_bits;
2518 chunk_block += nr_bits;
2521 if (zone->weight >= n)
2524 dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
2525 zone->id, zone->weight, n);
2533 * Get a block bit value.
2535 static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2536 sector_t chunk_block)
2538 struct dmz_mblock *mblk;
2541 WARN_ON(chunk_block >= zmd->zone_nr_blocks);
2543 /* Get bitmap block */
2544 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2546 return PTR_ERR(mblk);
2549 ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2550 (unsigned long *) mblk->data) != 0;
2552 dmz_release_mblock(zmd, mblk);
2558 * Return the number of blocks from chunk_block to the first block with a bit
2559 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2561 static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2562 sector_t chunk_block, unsigned int nr_blocks,
2565 struct dmz_mblock *mblk;
2566 unsigned int bit, set_bit, nr_bits;
2567 unsigned int zone_bits = zmd->zone_bits_per_mblk;
2568 unsigned long *bitmap;
2571 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2574 /* Get bitmap block */
2575 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2577 return PTR_ERR(mblk);
2580 bitmap = (unsigned long *) mblk->data;
2581 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2582 nr_bits = min(nr_blocks, zone_bits - bit);
2584 set_bit = find_next_bit(bitmap, zone_bits, bit);
2586 set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
2587 dmz_release_mblock(zmd, mblk);
2590 if (set_bit < zone_bits)
2593 nr_blocks -= nr_bits;
2594 chunk_block += nr_bits;
2601 * Test if chunk_block is valid. If it is, the number of consecutive
2602 * valid blocks from chunk_block will be returned.
2604 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2605 sector_t chunk_block)
2609 valid = dmz_test_block(zmd, zone, chunk_block);
2613 /* The block is valid: get the number of valid blocks from block */
2614 return dmz_to_next_set_block(zmd, zone, chunk_block,
2615 zmd->zone_nr_blocks - chunk_block, 0);
2619 * Find the first valid block from @chunk_block in @zone.
2620 * If such a block is found, its number is returned using
2621 * @chunk_block and the total number of valid blocks from @chunk_block
2624 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2625 sector_t *chunk_block)
2627 sector_t start_block = *chunk_block;
2630 ret = dmz_to_next_set_block(zmd, zone, start_block,
2631 zmd->zone_nr_blocks - start_block, 1);
2636 *chunk_block = start_block;
2638 return dmz_to_next_set_block(zmd, zone, start_block,
2639 zmd->zone_nr_blocks - start_block, 0);
2643 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2645 static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2647 unsigned long *addr;
2648 int end = bit + nr_bits;
2652 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2653 ((end - bit) >= BITS_PER_LONG)) {
2654 addr = (unsigned long *)bitmap + BIT_WORD(bit);
2655 if (*addr == ULONG_MAX) {
2657 bit += BITS_PER_LONG;
2662 if (test_bit(bit, bitmap))
2671 * Get a zone weight.
2673 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2675 struct dmz_mblock *mblk;
2676 sector_t chunk_block = 0;
2677 unsigned int bit, nr_bits;
2678 unsigned int nr_blocks = zmd->zone_nr_blocks;
2683 /* Get bitmap block */
2684 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2690 /* Count bits in this block */
2691 bitmap = mblk->data;
2692 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2693 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2694 n += dmz_count_bits(bitmap, bit, nr_bits);
2696 dmz_release_mblock(zmd, mblk);
2698 nr_blocks -= nr_bits;
2699 chunk_block += nr_bits;
2706 * Cleanup the zoned metadata resources.
2708 static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2710 struct rb_root *root;
2711 struct dmz_mblock *mblk, *next;
2714 /* Release zone mapping resources */
2715 if (zmd->map_mblk) {
2716 for (i = 0; i < zmd->nr_map_blocks; i++)
2717 dmz_release_mblock(zmd, zmd->map_mblk[i]);
2718 kfree(zmd->map_mblk);
2719 zmd->map_mblk = NULL;
2722 /* Release super blocks */
2723 for (i = 0; i < 2; i++) {
2724 if (zmd->sb[i].mblk) {
2725 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2726 zmd->sb[i].mblk = NULL;
2730 /* Free cached blocks */
2731 while (!list_empty(&zmd->mblk_dirty_list)) {
2732 mblk = list_first_entry(&zmd->mblk_dirty_list,
2733 struct dmz_mblock, link);
2734 dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
2735 (u64)mblk->no, mblk->ref);
2736 list_del_init(&mblk->link);
2737 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2738 dmz_free_mblock(zmd, mblk);
2741 while (!list_empty(&zmd->mblk_lru_list)) {
2742 mblk = list_first_entry(&zmd->mblk_lru_list,
2743 struct dmz_mblock, link);
2744 list_del_init(&mblk->link);
2745 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2746 dmz_free_mblock(zmd, mblk);
2749 /* Sanity checks: the mblock rbtree should now be empty */
2750 root = &zmd->mblk_rbtree;
2751 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2752 dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
2753 (u64)mblk->no, mblk->ref);
2755 dmz_free_mblock(zmd, mblk);
2758 /* Free the zone descriptors */
2759 dmz_drop_zones(zmd);
2761 mutex_destroy(&zmd->mblk_flush_lock);
2762 mutex_destroy(&zmd->map_lock);
2765 static void dmz_print_dev(struct dmz_metadata *zmd, int num)
2767 struct dmz_dev *dev = &zmd->dev[num];
2769 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
2770 dmz_dev_info(dev, "Regular block device");
2772 dmz_dev_info(dev, "Host-%s zoned block device",
2773 bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2774 "aware" : "managed");
2775 if (zmd->sb_version > 1) {
2776 sector_t sector_offset =
2777 dev->zone_offset << zmd->zone_nr_sectors_shift;
2779 dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
2780 (u64)dev->capacity, (u64)sector_offset);
2781 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
2782 dev->nr_zones, (u64)zmd->zone_nr_sectors,
2783 (u64)dev->zone_offset);
2785 dmz_dev_info(dev, " %llu 512-byte logical sectors",
2786 (u64)dev->capacity);
2787 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
2788 dev->nr_zones, (u64)zmd->zone_nr_sectors);
2793 * Initialize the zoned metadata.
2795 int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
2796 struct dmz_metadata **metadata,
2797 const char *devname)
2799 struct dmz_metadata *zmd;
2801 struct dm_zone *zone;
2804 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2808 strcpy(zmd->devname, devname);
2810 zmd->nr_devs = num_dev;
2811 zmd->mblk_rbtree = RB_ROOT;
2812 init_rwsem(&zmd->mblk_sem);
2813 mutex_init(&zmd->mblk_flush_lock);
2814 spin_lock_init(&zmd->mblk_lock);
2815 INIT_LIST_HEAD(&zmd->mblk_lru_list);
2816 INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2818 mutex_init(&zmd->map_lock);
2819 atomic_set(&zmd->unmap_nr_rnd, 0);
2820 INIT_LIST_HEAD(&zmd->unmap_rnd_list);
2821 INIT_LIST_HEAD(&zmd->map_rnd_list);
2823 atomic_set(&zmd->unmap_nr_cache, 0);
2824 INIT_LIST_HEAD(&zmd->unmap_cache_list);
2825 INIT_LIST_HEAD(&zmd->map_cache_list);
2827 atomic_set(&zmd->unmap_nr_seq, 0);
2828 INIT_LIST_HEAD(&zmd->unmap_seq_list);
2829 INIT_LIST_HEAD(&zmd->map_seq_list);
2831 atomic_set(&zmd->nr_reserved_seq_zones, 0);
2832 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2834 init_waitqueue_head(&zmd->free_wq);
2836 /* Initialize zone descriptors */
2837 ret = dmz_init_zones(zmd);
2841 /* Get super block */
2842 ret = dmz_load_sb(zmd);
2846 /* Set metadata zones starting from sb_zone */
2847 for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
2848 zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
2851 "metadata zone %u not present", i);
2855 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
2857 "metadata zone %d is not random", i);
2861 set_bit(DMZ_META, &zone->flags);
2863 if (zmd->sb[2].zone) {
2864 zone = dmz_get(zmd, zmd->sb[2].zone->id);
2867 "Tertiary metadata zone not present");
2871 set_bit(DMZ_META, &zone->flags);
2873 /* Load mapping table */
2874 ret = dmz_load_mapping(zmd);
2879 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2880 * blocks and enough blocks to be able to cache the bitmap blocks of
2881 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2882 * the cache to add 512 more metadata blocks.
2884 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2885 zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2886 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2887 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2888 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2890 /* Metadata cache shrinker */
2891 ret = register_shrinker(&zmd->mblk_shrinker);
2893 dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
2897 dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
2898 for (i = 0; i < zmd->nr_devs; i++)
2899 dmz_print_dev(zmd, i);
2901 dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
2902 zmd->nr_zones, (u64)zmd->zone_nr_sectors);
2903 dmz_zmd_debug(zmd, " %u metadata zones",
2904 zmd->nr_meta_zones * 2);
2905 dmz_zmd_debug(zmd, " %u data zones for %u chunks",
2906 zmd->nr_data_zones, zmd->nr_chunks);
2907 dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
2908 zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
2909 dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
2910 zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
2911 dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
2912 zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
2913 dmz_zmd_debug(zmd, " %u reserved sequential data zones",
2914 zmd->nr_reserved_seq);
2915 dmz_zmd_debug(zmd, "Format:");
2916 dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
2917 zmd->nr_meta_blocks, zmd->max_nr_mblks);
2918 dmz_zmd_debug(zmd, " %u data zone mapping blocks",
2919 zmd->nr_map_blocks);
2920 dmz_zmd_debug(zmd, " %u bitmap blocks",
2921 zmd->nr_bitmap_blocks);
2927 dmz_cleanup_metadata(zmd);
2935 * Cleanup the zoned metadata resources.
2937 void dmz_dtr_metadata(struct dmz_metadata *zmd)
2939 unregister_shrinker(&zmd->mblk_shrinker);
2940 dmz_cleanup_metadata(zmd);
2945 * Check zone information on resume.
2947 int dmz_resume_metadata(struct dmz_metadata *zmd)
2949 struct dm_zone *zone;
2955 for (i = 0; i < zmd->nr_zones; i++) {
2956 zone = dmz_get(zmd, i);
2958 dmz_zmd_err(zmd, "Unable to get zone %u", i);
2961 wp_block = zone->wp_block;
2963 ret = dmz_update_zone(zmd, zone);
2965 dmz_zmd_err(zmd, "Broken zone %u", i);
2969 if (dmz_is_offline(zone)) {
2970 dmz_zmd_warn(zmd, "Zone %u is offline", i);
2974 /* Check write pointer */
2975 if (!dmz_is_seq(zone))
2977 else if (zone->wp_block != wp_block) {
2978 dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
2979 i, (u64)zone->wp_block, (u64)wp_block);
2980 zone->wp_block = wp_block;
2981 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
2982 zmd->zone_nr_blocks - zone->wp_block);