2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
90 #define THIN_MAX_CONCURRENT_LOCKS 6
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
96 * Little endian on-disk superblock and device details.
98 struct thin_disk_superblock {
99 __le32 csum; /* Checksum of superblock except for this field. */
101 __le64 blocknr; /* This block number, dm_block_t. */
111 * Root held by userspace transactions.
115 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
116 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
121 __le64 data_mapping_root;
124 * Device detail root mapping dev_id -> device_details
126 __le64 device_details_root;
128 __le32 data_block_size; /* In 512-byte sectors. */
130 __le32 metadata_block_size; /* In 512-byte sectors. */
131 __le64 metadata_nr_blocks;
134 __le32 compat_ro_flags;
135 __le32 incompat_flags;
138 struct disk_device_details {
139 __le64 mapped_blocks;
140 __le64 transaction_id; /* When created. */
141 __le32 creation_time;
142 __le32 snapshotted_time;
145 struct dm_pool_metadata {
146 struct hlist_node hash;
148 struct block_device *bdev;
149 struct dm_block_manager *bm;
150 struct dm_space_map *metadata_sm;
151 struct dm_space_map *data_sm;
152 struct dm_transaction_manager *tm;
153 struct dm_transaction_manager *nb_tm;
157 * First level holds thin_dev_t.
158 * Second level holds mappings.
160 struct dm_btree_info info;
163 * Non-blocking version of the above.
165 struct dm_btree_info nb_info;
168 * Just the top level for deleting whole devices.
170 struct dm_btree_info tl_info;
173 * Just the bottom level for creating new devices.
175 struct dm_btree_info bl_info;
178 * Describes the device details btree.
180 struct dm_btree_info details_info;
182 struct rw_semaphore root_lock;
185 dm_block_t details_root;
186 struct list_head thin_devices;
189 sector_t data_block_size;
192 * Set if a transaction has to be aborted but the attempt to roll back
193 * to the previous (good) transaction failed. The only pool metadata
194 * operation possible in this state is the closing of the device.
199 * Reading the space map roots can fail, so we read it into these
200 * buffers before the superblock is locked and updated.
202 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
203 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
206 struct dm_thin_device {
207 struct list_head list;
208 struct dm_pool_metadata *pmd;
213 bool aborted_with_changes:1;
214 uint64_t mapped_blocks;
215 uint64_t transaction_id;
216 uint32_t creation_time;
217 uint32_t snapshotted_time;
220 /*----------------------------------------------------------------
221 * superblock validator
222 *--------------------------------------------------------------*/
224 #define SUPERBLOCK_CSUM_XOR 160774
226 static void sb_prepare_for_write(struct dm_block_validator *v,
230 struct thin_disk_superblock *disk_super = dm_block_data(b);
232 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
233 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
234 block_size - sizeof(__le32),
235 SUPERBLOCK_CSUM_XOR));
238 static int sb_check(struct dm_block_validator *v,
242 struct thin_disk_superblock *disk_super = dm_block_data(b);
245 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
246 DMERR("sb_check failed: blocknr %llu: "
247 "wanted %llu", le64_to_cpu(disk_super->blocknr),
248 (unsigned long long)dm_block_location(b));
252 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
253 DMERR("sb_check failed: magic %llu: "
254 "wanted %llu", le64_to_cpu(disk_super->magic),
255 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
259 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
260 block_size - sizeof(__le32),
261 SUPERBLOCK_CSUM_XOR));
262 if (csum_le != disk_super->csum) {
263 DMERR("sb_check failed: csum %u: wanted %u",
264 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
271 static struct dm_block_validator sb_validator = {
272 .name = "superblock",
273 .prepare_for_write = sb_prepare_for_write,
277 /*----------------------------------------------------------------
278 * Methods for the btree value types
279 *--------------------------------------------------------------*/
281 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
283 return (b << 24) | t;
286 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
289 *t = v & ((1 << 24) - 1);
292 static void data_block_inc(void *context, const void *value_le)
294 struct dm_space_map *sm = context;
299 memcpy(&v_le, value_le, sizeof(v_le));
300 unpack_block_time(le64_to_cpu(v_le), &b, &t);
301 dm_sm_inc_block(sm, b);
304 static void data_block_dec(void *context, const void *value_le)
306 struct dm_space_map *sm = context;
311 memcpy(&v_le, value_le, sizeof(v_le));
312 unpack_block_time(le64_to_cpu(v_le), &b, &t);
313 dm_sm_dec_block(sm, b);
316 static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
322 memcpy(&v1_le, value1_le, sizeof(v1_le));
323 memcpy(&v2_le, value2_le, sizeof(v2_le));
324 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
325 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
330 static void subtree_inc(void *context, const void *value)
332 struct dm_btree_info *info = context;
336 memcpy(&root_le, value, sizeof(root_le));
337 root = le64_to_cpu(root_le);
338 dm_tm_inc(info->tm, root);
341 static void subtree_dec(void *context, const void *value)
343 struct dm_btree_info *info = context;
347 memcpy(&root_le, value, sizeof(root_le));
348 root = le64_to_cpu(root_le);
349 if (dm_btree_del(info, root))
350 DMERR("btree delete failed");
353 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
356 memcpy(&v1_le, value1_le, sizeof(v1_le));
357 memcpy(&v2_le, value2_le, sizeof(v2_le));
359 return v1_le == v2_le;
362 /*----------------------------------------------------------------*/
364 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
365 struct dm_block **sblock)
367 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
368 &sb_validator, sblock);
371 static int superblock_lock(struct dm_pool_metadata *pmd,
372 struct dm_block **sblock)
374 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
375 &sb_validator, sblock);
378 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
383 __le64 *data_le, zero = cpu_to_le64(0);
384 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
387 * We can't use a validator here - it may be all zeroes.
389 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
393 data_le = dm_block_data(b);
395 for (i = 0; i < block_size; i++) {
396 if (data_le[i] != zero) {
407 static void __setup_btree_details(struct dm_pool_metadata *pmd)
409 pmd->info.tm = pmd->tm;
410 pmd->info.levels = 2;
411 pmd->info.value_type.context = pmd->data_sm;
412 pmd->info.value_type.size = sizeof(__le64);
413 pmd->info.value_type.inc = data_block_inc;
414 pmd->info.value_type.dec = data_block_dec;
415 pmd->info.value_type.equal = data_block_equal;
417 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
418 pmd->nb_info.tm = pmd->nb_tm;
420 pmd->tl_info.tm = pmd->tm;
421 pmd->tl_info.levels = 1;
422 pmd->tl_info.value_type.context = &pmd->bl_info;
423 pmd->tl_info.value_type.size = sizeof(__le64);
424 pmd->tl_info.value_type.inc = subtree_inc;
425 pmd->tl_info.value_type.dec = subtree_dec;
426 pmd->tl_info.value_type.equal = subtree_equal;
428 pmd->bl_info.tm = pmd->tm;
429 pmd->bl_info.levels = 1;
430 pmd->bl_info.value_type.context = pmd->data_sm;
431 pmd->bl_info.value_type.size = sizeof(__le64);
432 pmd->bl_info.value_type.inc = data_block_inc;
433 pmd->bl_info.value_type.dec = data_block_dec;
434 pmd->bl_info.value_type.equal = data_block_equal;
436 pmd->details_info.tm = pmd->tm;
437 pmd->details_info.levels = 1;
438 pmd->details_info.value_type.context = NULL;
439 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
440 pmd->details_info.value_type.inc = NULL;
441 pmd->details_info.value_type.dec = NULL;
442 pmd->details_info.value_type.equal = NULL;
445 static int save_sm_roots(struct dm_pool_metadata *pmd)
450 r = dm_sm_root_size(pmd->metadata_sm, &len);
454 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
458 r = dm_sm_root_size(pmd->data_sm, &len);
462 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
465 static void copy_sm_roots(struct dm_pool_metadata *pmd,
466 struct thin_disk_superblock *disk)
468 memcpy(&disk->metadata_space_map_root,
469 &pmd->metadata_space_map_root,
470 sizeof(pmd->metadata_space_map_root));
472 memcpy(&disk->data_space_map_root,
473 &pmd->data_space_map_root,
474 sizeof(pmd->data_space_map_root));
477 static int __write_initial_superblock(struct dm_pool_metadata *pmd)
480 struct dm_block *sblock;
481 struct thin_disk_superblock *disk_super;
482 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
484 if (bdev_size > THIN_METADATA_MAX_SECTORS)
485 bdev_size = THIN_METADATA_MAX_SECTORS;
487 r = dm_sm_commit(pmd->data_sm);
491 r = dm_tm_pre_commit(pmd->tm);
495 r = save_sm_roots(pmd);
499 r = superblock_lock_zero(pmd, &sblock);
503 disk_super = dm_block_data(sblock);
504 disk_super->flags = 0;
505 memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
506 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
507 disk_super->version = cpu_to_le32(THIN_VERSION);
508 disk_super->time = 0;
509 disk_super->trans_id = 0;
510 disk_super->held_root = 0;
512 copy_sm_roots(pmd, disk_super);
514 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
515 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
516 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
517 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
518 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
520 return dm_tm_commit(pmd->tm, sblock);
523 static int __format_metadata(struct dm_pool_metadata *pmd)
527 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
528 &pmd->tm, &pmd->metadata_sm);
530 DMERR("tm_create_with_sm failed");
534 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
535 if (IS_ERR(pmd->data_sm)) {
536 DMERR("sm_disk_create failed");
537 r = PTR_ERR(pmd->data_sm);
541 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
543 DMERR("could not create non-blocking clone tm");
545 goto bad_cleanup_data_sm;
548 __setup_btree_details(pmd);
550 r = dm_btree_empty(&pmd->info, &pmd->root);
552 goto bad_cleanup_nb_tm;
554 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
556 DMERR("couldn't create devices root");
557 goto bad_cleanup_nb_tm;
560 r = __write_initial_superblock(pmd);
562 goto bad_cleanup_nb_tm;
567 dm_tm_destroy(pmd->nb_tm);
569 dm_sm_destroy(pmd->data_sm);
571 dm_tm_destroy(pmd->tm);
572 dm_sm_destroy(pmd->metadata_sm);
577 static int __check_incompat_features(struct thin_disk_superblock *disk_super,
578 struct dm_pool_metadata *pmd)
582 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
584 DMERR("could not access metadata due to unsupported optional features (%lx).",
585 (unsigned long)features);
590 * Check for read-only metadata to skip the following RDWR checks.
592 if (get_disk_ro(pmd->bdev->bd_disk))
595 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
597 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
598 (unsigned long)features);
605 static int __open_metadata(struct dm_pool_metadata *pmd)
608 struct dm_block *sblock;
609 struct thin_disk_superblock *disk_super;
611 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
612 &sb_validator, &sblock);
614 DMERR("couldn't read superblock");
618 disk_super = dm_block_data(sblock);
620 /* Verify the data block size hasn't changed */
621 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
622 DMERR("changing the data block size (from %u to %llu) is not supported",
623 le32_to_cpu(disk_super->data_block_size),
624 (unsigned long long)pmd->data_block_size);
626 goto bad_unlock_sblock;
629 r = __check_incompat_features(disk_super, pmd);
631 goto bad_unlock_sblock;
633 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
634 disk_super->metadata_space_map_root,
635 sizeof(disk_super->metadata_space_map_root),
636 &pmd->tm, &pmd->metadata_sm);
638 DMERR("tm_open_with_sm failed");
639 goto bad_unlock_sblock;
642 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
643 sizeof(disk_super->data_space_map_root));
644 if (IS_ERR(pmd->data_sm)) {
645 DMERR("sm_disk_open failed");
646 r = PTR_ERR(pmd->data_sm);
650 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
652 DMERR("could not create non-blocking clone tm");
654 goto bad_cleanup_data_sm;
657 __setup_btree_details(pmd);
658 dm_bm_unlock(sblock);
663 dm_sm_destroy(pmd->data_sm);
665 dm_tm_destroy(pmd->tm);
666 dm_sm_destroy(pmd->metadata_sm);
668 dm_bm_unlock(sblock);
673 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
677 r = __superblock_all_zeroes(pmd->bm, &unformatted);
682 return format_device ? __format_metadata(pmd) : -EPERM;
684 return __open_metadata(pmd);
687 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
691 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
692 THIN_MAX_CONCURRENT_LOCKS);
693 if (IS_ERR(pmd->bm)) {
694 DMERR("could not create block manager");
695 return PTR_ERR(pmd->bm);
698 r = __open_or_format_metadata(pmd, format_device);
700 dm_block_manager_destroy(pmd->bm);
705 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
707 dm_sm_destroy(pmd->data_sm);
708 dm_sm_destroy(pmd->metadata_sm);
709 dm_tm_destroy(pmd->nb_tm);
710 dm_tm_destroy(pmd->tm);
711 dm_block_manager_destroy(pmd->bm);
714 static int __begin_transaction(struct dm_pool_metadata *pmd)
717 struct thin_disk_superblock *disk_super;
718 struct dm_block *sblock;
721 * We re-read the superblock every time. Shouldn't need to do this
724 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
725 &sb_validator, &sblock);
729 disk_super = dm_block_data(sblock);
730 pmd->time = le32_to_cpu(disk_super->time);
731 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
732 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
733 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
734 pmd->flags = le32_to_cpu(disk_super->flags);
735 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
737 dm_bm_unlock(sblock);
741 static int __write_changed_details(struct dm_pool_metadata *pmd)
744 struct dm_thin_device *td, *tmp;
745 struct disk_device_details details;
748 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
754 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
755 details.transaction_id = cpu_to_le64(td->transaction_id);
756 details.creation_time = cpu_to_le32(td->creation_time);
757 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
758 __dm_bless_for_disk(&details);
760 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
761 &key, &details, &pmd->details_root);
776 static int __commit_transaction(struct dm_pool_metadata *pmd)
779 struct thin_disk_superblock *disk_super;
780 struct dm_block *sblock;
783 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
785 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
787 r = __write_changed_details(pmd);
791 r = dm_sm_commit(pmd->data_sm);
795 r = dm_tm_pre_commit(pmd->tm);
799 r = save_sm_roots(pmd);
803 r = superblock_lock(pmd, &sblock);
807 disk_super = dm_block_data(sblock);
808 disk_super->time = cpu_to_le32(pmd->time);
809 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
810 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
811 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
812 disk_super->flags = cpu_to_le32(pmd->flags);
814 copy_sm_roots(pmd, disk_super);
816 return dm_tm_commit(pmd->tm, sblock);
819 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
820 sector_t data_block_size,
824 struct dm_pool_metadata *pmd;
826 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
828 DMERR("could not allocate metadata struct");
829 return ERR_PTR(-ENOMEM);
832 init_rwsem(&pmd->root_lock);
834 INIT_LIST_HEAD(&pmd->thin_devices);
835 pmd->fail_io = false;
837 pmd->data_block_size = data_block_size;
839 r = __create_persistent_data_objects(pmd, format_device);
845 r = __begin_transaction(pmd);
847 if (dm_pool_metadata_close(pmd) < 0)
848 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
855 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
858 unsigned open_devices = 0;
859 struct dm_thin_device *td, *tmp;
861 down_read(&pmd->root_lock);
862 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
870 up_read(&pmd->root_lock);
873 DMERR("attempt to close pmd when %u device(s) are still open",
878 if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
879 r = __commit_transaction(pmd);
881 DMWARN("%s: __commit_transaction() failed, error = %d",
886 __destroy_persistent_data_objects(pmd);
893 * __open_device: Returns @td corresponding to device with id @dev,
894 * creating it if @create is set and incrementing @td->open_count.
895 * On failure, @td is undefined.
897 static int __open_device(struct dm_pool_metadata *pmd,
898 dm_thin_id dev, int create,
899 struct dm_thin_device **td)
902 struct dm_thin_device *td2;
904 struct disk_device_details details_le;
907 * If the device is already open, return it.
909 list_for_each_entry(td2, &pmd->thin_devices, list)
910 if (td2->id == dev) {
912 * May not create an already-open device.
923 * Check the device exists.
925 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
928 if (r != -ENODATA || !create)
935 details_le.mapped_blocks = 0;
936 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
937 details_le.creation_time = cpu_to_le32(pmd->time);
938 details_le.snapshotted_time = cpu_to_le32(pmd->time);
941 *td = kmalloc(sizeof(**td), GFP_NOIO);
947 (*td)->open_count = 1;
948 (*td)->changed = changed;
949 (*td)->aborted_with_changes = false;
950 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
951 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
952 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
953 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
955 list_add(&(*td)->list, &pmd->thin_devices);
960 static void __close_device(struct dm_thin_device *td)
965 static int __create_thin(struct dm_pool_metadata *pmd,
971 struct disk_device_details details_le;
972 struct dm_thin_device *td;
975 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
981 * Create an empty btree for the mappings.
983 r = dm_btree_empty(&pmd->bl_info, &dev_root);
988 * Insert it into the main mapping tree.
990 value = cpu_to_le64(dev_root);
991 __dm_bless_for_disk(&value);
992 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
994 dm_btree_del(&pmd->bl_info, dev_root);
998 r = __open_device(pmd, dev, 1, &td);
1000 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1001 dm_btree_del(&pmd->bl_info, dev_root);
1009 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
1013 down_write(&pmd->root_lock);
1015 r = __create_thin(pmd, dev);
1016 up_write(&pmd->root_lock);
1021 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
1022 struct dm_thin_device *snap,
1023 dm_thin_id origin, uint32_t time)
1026 struct dm_thin_device *td;
1028 r = __open_device(pmd, origin, 0, &td);
1033 td->snapshotted_time = time;
1035 snap->mapped_blocks = td->mapped_blocks;
1036 snap->snapshotted_time = time;
1042 static int __create_snap(struct dm_pool_metadata *pmd,
1043 dm_thin_id dev, dm_thin_id origin)
1046 dm_block_t origin_root;
1047 uint64_t key = origin, dev_key = dev;
1048 struct dm_thin_device *td;
1049 struct disk_device_details details_le;
1052 /* check this device is unused */
1053 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1054 &dev_key, &details_le);
1058 /* find the mapping tree for the origin */
1059 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
1062 origin_root = le64_to_cpu(value);
1064 /* clone the origin, an inc will do */
1065 dm_tm_inc(pmd->tm, origin_root);
1067 /* insert into the main mapping tree */
1068 value = cpu_to_le64(origin_root);
1069 __dm_bless_for_disk(&value);
1071 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1073 dm_tm_dec(pmd->tm, origin_root);
1079 r = __open_device(pmd, dev, 1, &td);
1083 r = __set_snapshot_details(pmd, td, origin, pmd->time);
1092 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1093 dm_btree_remove(&pmd->details_info, pmd->details_root,
1094 &key, &pmd->details_root);
1098 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1104 down_write(&pmd->root_lock);
1106 r = __create_snap(pmd, dev, origin);
1107 up_write(&pmd->root_lock);
1112 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1116 struct dm_thin_device *td;
1118 /* TODO: failure should mark the transaction invalid */
1119 r = __open_device(pmd, dev, 0, &td);
1123 if (td->open_count > 1) {
1128 list_del(&td->list);
1130 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1131 &key, &pmd->details_root);
1135 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1142 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1147 down_write(&pmd->root_lock);
1149 r = __delete_device(pmd, dev);
1150 up_write(&pmd->root_lock);
1155 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1156 uint64_t current_id,
1161 down_write(&pmd->root_lock);
1166 if (pmd->trans_id != current_id) {
1167 DMERR("mismatched transaction id");
1171 pmd->trans_id = new_id;
1175 up_write(&pmd->root_lock);
1180 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1185 down_read(&pmd->root_lock);
1186 if (!pmd->fail_io) {
1187 *result = pmd->trans_id;
1190 up_read(&pmd->root_lock);
1195 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1198 struct thin_disk_superblock *disk_super;
1199 struct dm_block *copy, *sblock;
1200 dm_block_t held_root;
1203 * We commit to ensure the btree roots which we increment in a
1204 * moment are up to date.
1206 __commit_transaction(pmd);
1209 * Copy the superblock.
1211 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1212 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1213 &sb_validator, ©, &inc);
1219 held_root = dm_block_location(copy);
1220 disk_super = dm_block_data(copy);
1222 if (le64_to_cpu(disk_super->held_root)) {
1223 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1225 dm_tm_dec(pmd->tm, held_root);
1226 dm_tm_unlock(pmd->tm, copy);
1231 * Wipe the spacemap since we're not publishing this.
1233 memset(&disk_super->data_space_map_root, 0,
1234 sizeof(disk_super->data_space_map_root));
1235 memset(&disk_super->metadata_space_map_root, 0,
1236 sizeof(disk_super->metadata_space_map_root));
1239 * Increment the data structures that need to be preserved.
1241 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1242 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1243 dm_tm_unlock(pmd->tm, copy);
1246 * Write the held root into the superblock.
1248 r = superblock_lock(pmd, &sblock);
1250 dm_tm_dec(pmd->tm, held_root);
1254 disk_super = dm_block_data(sblock);
1255 disk_super->held_root = cpu_to_le64(held_root);
1256 dm_bm_unlock(sblock);
1260 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1264 down_write(&pmd->root_lock);
1266 r = __reserve_metadata_snap(pmd);
1267 up_write(&pmd->root_lock);
1272 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1275 struct thin_disk_superblock *disk_super;
1276 struct dm_block *sblock, *copy;
1277 dm_block_t held_root;
1279 r = superblock_lock(pmd, &sblock);
1283 disk_super = dm_block_data(sblock);
1284 held_root = le64_to_cpu(disk_super->held_root);
1285 disk_super->held_root = cpu_to_le64(0);
1287 dm_bm_unlock(sblock);
1290 DMWARN("No pool metadata snapshot found: nothing to release.");
1294 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©);
1298 disk_super = dm_block_data(copy);
1299 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1300 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1301 dm_sm_dec_block(pmd->metadata_sm, held_root);
1303 dm_tm_unlock(pmd->tm, copy);
1308 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1312 down_write(&pmd->root_lock);
1314 r = __release_metadata_snap(pmd);
1315 up_write(&pmd->root_lock);
1320 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1324 struct thin_disk_superblock *disk_super;
1325 struct dm_block *sblock;
1327 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1328 &sb_validator, &sblock);
1332 disk_super = dm_block_data(sblock);
1333 *result = le64_to_cpu(disk_super->held_root);
1335 dm_bm_unlock(sblock);
1340 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1345 down_read(&pmd->root_lock);
1347 r = __get_metadata_snap(pmd, result);
1348 up_read(&pmd->root_lock);
1353 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1354 struct dm_thin_device **td)
1358 down_write(&pmd->root_lock);
1360 r = __open_device(pmd, dev, 0, td);
1361 up_write(&pmd->root_lock);
1366 int dm_pool_close_thin_device(struct dm_thin_device *td)
1368 down_write(&td->pmd->root_lock);
1370 up_write(&td->pmd->root_lock);
1375 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1381 * Check whether @time (of block creation) is older than @td's last snapshot.
1382 * If so then the associated block is shared with the last snapshot device.
1383 * Any block on a device created *after* the device last got snapshotted is
1384 * necessarily not shared.
1386 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1388 return td->snapshotted_time > time;
1391 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
1392 struct dm_thin_lookup_result *result)
1394 uint64_t block_time = 0;
1395 dm_block_t exception_block;
1396 uint32_t exception_time;
1398 block_time = le64_to_cpu(value);
1399 unpack_block_time(block_time, &exception_block, &exception_time);
1400 result->block = exception_block;
1401 result->shared = __snapshotted_since(td, exception_time);
1404 static int __find_block(struct dm_thin_device *td, dm_block_t block,
1405 int can_issue_io, struct dm_thin_lookup_result *result)
1409 struct dm_pool_metadata *pmd = td->pmd;
1410 dm_block_t keys[2] = { td->id, block };
1411 struct dm_btree_info *info;
1416 info = &pmd->nb_info;
1418 r = dm_btree_lookup(info, pmd->root, keys, &value);
1420 unpack_lookup_result(td, value, result);
1425 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1426 int can_issue_io, struct dm_thin_lookup_result *result)
1429 struct dm_pool_metadata *pmd = td->pmd;
1431 down_read(&pmd->root_lock);
1433 up_read(&pmd->root_lock);
1437 r = __find_block(td, block, can_issue_io, result);
1439 up_read(&pmd->root_lock);
1443 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
1445 struct dm_thin_lookup_result *result)
1449 struct dm_pool_metadata *pmd = td->pmd;
1450 dm_block_t keys[2] = { td->id, block };
1452 r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
1454 unpack_lookup_result(td, value, result);
1459 static int __find_mapped_range(struct dm_thin_device *td,
1460 dm_block_t begin, dm_block_t end,
1461 dm_block_t *thin_begin, dm_block_t *thin_end,
1462 dm_block_t *pool_begin, bool *maybe_shared)
1465 dm_block_t pool_end;
1466 struct dm_thin_lookup_result lookup;
1471 r = __find_next_mapped_block(td, begin, &begin, &lookup);
1478 *thin_begin = begin;
1479 *pool_begin = lookup.block;
1480 *maybe_shared = lookup.shared;
1483 pool_end = *pool_begin + 1;
1484 while (begin != end) {
1485 r = __find_block(td, begin, true, &lookup);
1493 if ((lookup.block != pool_end) ||
1494 (lookup.shared != *maybe_shared))
1505 int dm_thin_find_mapped_range(struct dm_thin_device *td,
1506 dm_block_t begin, dm_block_t end,
1507 dm_block_t *thin_begin, dm_block_t *thin_end,
1508 dm_block_t *pool_begin, bool *maybe_shared)
1511 struct dm_pool_metadata *pmd = td->pmd;
1513 down_read(&pmd->root_lock);
1514 if (!pmd->fail_io) {
1515 r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
1516 pool_begin, maybe_shared);
1518 up_read(&pmd->root_lock);
1523 static int __insert(struct dm_thin_device *td, dm_block_t block,
1524 dm_block_t data_block)
1528 struct dm_pool_metadata *pmd = td->pmd;
1529 dm_block_t keys[2] = { td->id, block };
1531 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1532 __dm_bless_for_disk(&value);
1534 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1535 &pmd->root, &inserted);
1541 td->mapped_blocks++;
1546 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1547 dm_block_t data_block)
1551 down_write(&td->pmd->root_lock);
1552 if (!td->pmd->fail_io)
1553 r = __insert(td, block, data_block);
1554 up_write(&td->pmd->root_lock);
1559 static int __remove(struct dm_thin_device *td, dm_block_t block)
1562 struct dm_pool_metadata *pmd = td->pmd;
1563 dm_block_t keys[2] = { td->id, block };
1565 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1569 td->mapped_blocks--;
1575 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1578 unsigned count, total_count = 0;
1579 struct dm_pool_metadata *pmd = td->pmd;
1580 dm_block_t keys[1] = { td->id };
1582 dm_block_t mapping_root;
1585 * Find the mapping tree
1587 r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
1592 * Remove from the mapping tree, taking care to inc the
1593 * ref count so it doesn't get deleted.
1595 mapping_root = le64_to_cpu(value);
1596 dm_tm_inc(pmd->tm, mapping_root);
1597 r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
1602 * Remove leaves stops at the first unmapped entry, so we have to
1603 * loop round finding mapped ranges.
1605 while (begin < end) {
1606 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1616 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1620 total_count += count;
1623 td->mapped_blocks -= total_count;
1627 * Reinsert the mapping tree.
1629 value = cpu_to_le64(mapping_root);
1630 __dm_bless_for_disk(&value);
1631 return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
1634 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1638 down_write(&td->pmd->root_lock);
1639 if (!td->pmd->fail_io)
1640 r = __remove(td, block);
1641 up_write(&td->pmd->root_lock);
1646 int dm_thin_remove_range(struct dm_thin_device *td,
1647 dm_block_t begin, dm_block_t end)
1651 down_write(&td->pmd->root_lock);
1652 if (!td->pmd->fail_io)
1653 r = __remove_range(td, begin, end);
1654 up_write(&td->pmd->root_lock);
1659 int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1664 down_read(&pmd->root_lock);
1665 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1667 *result = (ref_count != 0);
1668 up_read(&pmd->root_lock);
1673 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1677 down_write(&pmd->root_lock);
1678 for (; b != e; b++) {
1679 r = dm_sm_inc_block(pmd->data_sm, b);
1683 up_write(&pmd->root_lock);
1688 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1692 down_write(&pmd->root_lock);
1693 for (; b != e; b++) {
1694 r = dm_sm_dec_block(pmd->data_sm, b);
1698 up_write(&pmd->root_lock);
1703 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1707 down_read(&td->pmd->root_lock);
1709 up_read(&td->pmd->root_lock);
1714 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1717 struct dm_thin_device *td, *tmp;
1719 down_read(&pmd->root_lock);
1720 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1726 up_read(&pmd->root_lock);
1731 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1735 down_read(&td->pmd->root_lock);
1736 r = td->aborted_with_changes;
1737 up_read(&td->pmd->root_lock);
1742 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1746 down_write(&pmd->root_lock);
1748 r = dm_sm_new_block(pmd->data_sm, result);
1749 up_write(&pmd->root_lock);
1754 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1758 down_write(&pmd->root_lock);
1762 r = __commit_transaction(pmd);
1767 * Open the next transaction.
1769 r = __begin_transaction(pmd);
1771 up_write(&pmd->root_lock);
1775 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
1777 struct dm_thin_device *td;
1779 list_for_each_entry(td, &pmd->thin_devices, list)
1780 td->aborted_with_changes = td->changed;
1783 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
1787 down_write(&pmd->root_lock);
1791 __set_abort_with_changes_flags(pmd);
1792 __destroy_persistent_data_objects(pmd);
1793 r = __create_persistent_data_objects(pmd, false);
1795 pmd->fail_io = true;
1798 up_write(&pmd->root_lock);
1803 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1807 down_read(&pmd->root_lock);
1809 r = dm_sm_get_nr_free(pmd->data_sm, result);
1810 up_read(&pmd->root_lock);
1815 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1820 down_read(&pmd->root_lock);
1822 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1823 up_read(&pmd->root_lock);
1828 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1833 down_read(&pmd->root_lock);
1835 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1836 up_read(&pmd->root_lock);
1841 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1845 down_read(&pmd->root_lock);
1847 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1848 up_read(&pmd->root_lock);
1853 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1856 struct dm_pool_metadata *pmd = td->pmd;
1858 down_read(&pmd->root_lock);
1859 if (!pmd->fail_io) {
1860 *result = td->mapped_blocks;
1863 up_read(&pmd->root_lock);
1868 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1872 dm_block_t thin_root;
1873 struct dm_pool_metadata *pmd = td->pmd;
1875 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1879 thin_root = le64_to_cpu(value_le);
1881 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1884 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1888 struct dm_pool_metadata *pmd = td->pmd;
1890 down_read(&pmd->root_lock);
1892 r = __highest_block(td, result);
1893 up_read(&pmd->root_lock);
1898 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1901 dm_block_t old_count;
1903 r = dm_sm_get_nr_blocks(sm, &old_count);
1907 if (new_count == old_count)
1910 if (new_count < old_count) {
1911 DMERR("cannot reduce size of space map");
1915 return dm_sm_extend(sm, new_count - old_count);
1918 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1922 down_write(&pmd->root_lock);
1924 r = __resize_space_map(pmd->data_sm, new_count);
1925 up_write(&pmd->root_lock);
1930 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1934 down_write(&pmd->root_lock);
1936 r = __resize_space_map(pmd->metadata_sm, new_count);
1937 up_write(&pmd->root_lock);
1942 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1944 down_write(&pmd->root_lock);
1945 dm_bm_set_read_only(pmd->bm);
1946 up_write(&pmd->root_lock);
1949 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
1951 down_write(&pmd->root_lock);
1952 dm_bm_set_read_write(pmd->bm);
1953 up_write(&pmd->root_lock);
1956 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1957 dm_block_t threshold,
1958 dm_sm_threshold_fn fn,
1963 down_write(&pmd->root_lock);
1964 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
1965 up_write(&pmd->root_lock);
1970 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
1973 struct dm_block *sblock;
1974 struct thin_disk_superblock *disk_super;
1976 down_write(&pmd->root_lock);
1977 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
1979 r = superblock_lock(pmd, &sblock);
1981 DMERR("couldn't read superblock");
1985 disk_super = dm_block_data(sblock);
1986 disk_super->flags = cpu_to_le32(pmd->flags);
1988 dm_bm_unlock(sblock);
1990 up_write(&pmd->root_lock);
1994 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
1998 down_read(&pmd->root_lock);
1999 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
2000 up_read(&pmd->root_lock);
2005 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
2007 down_read(&pmd->root_lock);
2009 dm_tm_issue_prefetches(pmd->tm);
2010 up_read(&pmd->root_lock);