1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011 Red Hat, Inc.
5 * This file is released under the GPL.
8 #include "dm-space-map-common.h"
9 #include "dm-transaction-manager.h"
10 #include "dm-btree-internal.h"
11 #include "dm-persistent-data-internal.h"
13 #include <linux/bitops.h>
14 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "space map common"
18 /*----------------------------------------------------------------*/
23 #define INDEX_CSUM_XOR 160478
25 static void index_prepare_for_write(struct dm_block_validator *v,
29 struct disk_metadata_index *mi_le = dm_block_data(b);
31 mi_le->blocknr = cpu_to_le64(dm_block_location(b));
32 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
33 block_size - sizeof(__le32),
37 static int index_check(struct dm_block_validator *v,
41 struct disk_metadata_index *mi_le = dm_block_data(b);
44 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
45 DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
46 le64_to_cpu(mi_le->blocknr), dm_block_location(b));
50 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
51 block_size - sizeof(__le32),
53 if (csum_disk != mi_le->csum) {
54 DMERR_LIMIT("index_check failed: csum %u != wanted %u",
55 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
62 static struct dm_block_validator index_validator = {
64 .prepare_for_write = index_prepare_for_write,
68 /*----------------------------------------------------------------*/
73 #define BITMAP_CSUM_XOR 240779
75 static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
79 struct disk_bitmap_header *disk_header = dm_block_data(b);
81 disk_header->blocknr = cpu_to_le64(dm_block_location(b));
82 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
83 block_size - sizeof(__le32),
87 static int dm_bitmap_check(struct dm_block_validator *v,
91 struct disk_bitmap_header *disk_header = dm_block_data(b);
94 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
95 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
96 le64_to_cpu(disk_header->blocknr), dm_block_location(b));
100 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
101 block_size - sizeof(__le32),
103 if (csum_disk != disk_header->csum) {
104 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
105 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
112 static struct dm_block_validator dm_sm_bitmap_validator = {
114 .prepare_for_write = dm_bitmap_prepare_for_write,
115 .check = dm_bitmap_check,
118 /*----------------------------------------------------------------*/
120 #define ENTRIES_PER_WORD 32
121 #define ENTRIES_SHIFT 5
123 static void *dm_bitmap_data(struct dm_block *b)
125 return dm_block_data(b) + sizeof(struct disk_bitmap_header);
128 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
130 static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
132 __le64 *words_le = addr;
133 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
135 uint64_t bits = le64_to_cpu(*w_le);
136 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
138 return !(~bits & mask);
141 static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
143 __le64 *words_le = addr;
144 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
147 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
148 hi = !!test_bit_le(b, (void *) w_le);
149 lo = !!test_bit_le(b + 1, (void *) w_le);
150 return (hi << 1) | lo;
153 static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
155 __le64 *words_le = addr;
156 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
158 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
161 __set_bit_le(b, (void *) w_le);
163 __clear_bit_le(b, (void *) w_le);
166 __set_bit_le(b + 1, (void *) w_le);
168 __clear_bit_le(b + 1, (void *) w_le);
171 static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
172 unsigned int *result)
174 while (begin < end) {
175 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
176 dm_bitmap_word_used(addr, begin)) {
177 begin += ENTRIES_PER_WORD;
181 if (!sm_lookup_bitmap(addr, begin)) {
192 /*----------------------------------------------------------------*/
194 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
196 memset(ll, 0, sizeof(struct ll_disk));
200 ll->bitmap_info.tm = tm;
201 ll->bitmap_info.levels = 1;
204 * Because the new bitmap blocks are created via a shadow
205 * operation, the old entry has already had its reference count
206 * decremented and we don't need the btree to do any bookkeeping.
208 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
209 ll->bitmap_info.value_type.inc = NULL;
210 ll->bitmap_info.value_type.dec = NULL;
211 ll->bitmap_info.value_type.equal = NULL;
213 ll->ref_count_info.tm = tm;
214 ll->ref_count_info.levels = 1;
215 ll->ref_count_info.value_type.size = sizeof(uint32_t);
216 ll->ref_count_info.value_type.inc = NULL;
217 ll->ref_count_info.value_type.dec = NULL;
218 ll->ref_count_info.value_type.equal = NULL;
220 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
222 if (ll->block_size > (1 << 30)) {
223 DMERR("block size too big to hold bitmaps");
227 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
231 ll->ref_count_root = 0;
232 ll->bitmap_index_changed = false;
237 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
240 dm_block_t i, nr_blocks, nr_indexes;
241 unsigned int old_blocks, blocks;
243 nr_blocks = ll->nr_blocks + extra_blocks;
244 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
245 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
247 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
248 if (nr_indexes > ll->max_entries(ll)) {
249 DMERR("space map too large");
254 * We need to set this before the dm_tm_new_block() call below.
256 ll->nr_blocks = nr_blocks;
257 for (i = old_blocks; i < blocks; i++) {
259 struct disk_index_entry idx;
261 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
265 idx.blocknr = cpu_to_le64(dm_block_location(b));
267 dm_tm_unlock(ll->tm, b);
269 idx.nr_free = cpu_to_le32(ll->entries_per_block);
270 idx.none_free_before = 0;
272 r = ll->save_ie(ll, i, &idx);
280 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
283 dm_block_t index = b;
284 struct disk_index_entry ie_disk;
285 struct dm_block *blk;
287 if (b >= ll->nr_blocks) {
288 DMERR_LIMIT("metadata block out of bounds");
292 b = do_div(index, ll->entries_per_block);
293 r = ll->load_ie(ll, index, &ie_disk);
297 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
298 &dm_sm_bitmap_validator, &blk);
302 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
304 dm_tm_unlock(ll->tm, blk);
309 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
315 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
319 *result = le32_to_cpu(le_rc);
324 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
326 int r = sm_ll_lookup_bitmap(ll, b, result);
334 return sm_ll_lookup_big_ref_count(ll, b, result);
337 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
338 dm_block_t end, dm_block_t *result)
341 struct disk_index_entry ie_disk;
342 dm_block_t i, index_begin = begin;
343 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
348 begin = do_div(index_begin, ll->entries_per_block);
349 end = do_div(end, ll->entries_per_block);
351 end = ll->entries_per_block;
353 for (i = index_begin; i < index_end; i++, begin = 0) {
354 struct dm_block *blk;
355 unsigned int position;
358 r = ll->load_ie(ll, i, &ie_disk);
362 if (le32_to_cpu(ie_disk.nr_free) == 0)
365 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
366 &dm_sm_bitmap_validator, &blk);
370 bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
372 r = sm_find_free(dm_bitmap_data(blk),
373 max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
377 * This might happen because we started searching
378 * part way through the bitmap.
380 dm_tm_unlock(ll->tm, blk);
384 dm_tm_unlock(ll->tm, blk);
386 *result = i * ll->entries_per_block + (dm_block_t) position;
393 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
394 dm_block_t begin, dm_block_t end, dm_block_t *b)
400 r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
404 /* double check this block wasn't used in the old transaction */
405 if (*b >= old_ll->nr_blocks)
408 r = sm_ll_lookup(old_ll, *b, &count);
420 /*----------------------------------------------------------------*/
422 int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
423 uint32_t ref_count, int32_t *nr_allocations)
428 dm_block_t index = b;
429 struct disk_index_entry ie_disk;
433 bit = do_div(index, ll->entries_per_block);
434 r = ll->load_ie(ll, index, &ie_disk);
438 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
439 &dm_sm_bitmap_validator, &nb, &inc);
441 DMERR("dm_tm_shadow_block() failed");
444 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
445 bm_le = dm_bitmap_data(nb);
447 old = sm_lookup_bitmap(bm_le, bit);
449 r = sm_ll_lookup_big_ref_count(ll, b, &old);
451 dm_tm_unlock(ll->tm, nb);
457 dm_tm_unlock(ll->tm, nb);
461 if (ref_count <= 2) {
462 sm_set_bitmap(bm_le, bit, ref_count);
463 dm_tm_unlock(ll->tm, nb);
466 r = dm_btree_remove(&ll->ref_count_info,
468 &b, &ll->ref_count_root);
474 __le32 le_rc = cpu_to_le32(ref_count);
476 sm_set_bitmap(bm_le, bit, 3);
477 dm_tm_unlock(ll->tm, nb);
479 __dm_bless_for_disk(&le_rc);
480 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
481 &b, &le_rc, &ll->ref_count_root);
483 DMERR("ref count insert failed");
488 if (ref_count && !old) {
491 le32_add_cpu(&ie_disk.nr_free, -1);
492 if (le32_to_cpu(ie_disk.none_free_before) == bit)
493 ie_disk.none_free_before = cpu_to_le32(bit + 1);
495 } else if (old && !ref_count) {
496 *nr_allocations = -1;
498 le32_add_cpu(&ie_disk.nr_free, 1);
499 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
503 return ll->save_ie(ll, index, &ie_disk);
506 /*----------------------------------------------------------------*/
509 * Holds useful intermediate results for the range based inc and dec
513 struct disk_index_entry ie_disk;
514 struct dm_block *bitmap_block;
517 struct dm_block *overflow_leaf;
520 static inline void init_inc_context(struct inc_context *ic)
522 ic->bitmap_block = NULL;
524 ic->overflow_leaf = NULL;
527 static inline void exit_inc_context(struct ll_disk *ll, struct inc_context *ic)
529 if (ic->bitmap_block)
530 dm_tm_unlock(ll->tm, ic->bitmap_block);
531 if (ic->overflow_leaf)
532 dm_tm_unlock(ll->tm, ic->overflow_leaf);
535 static inline void reset_inc_context(struct ll_disk *ll, struct inc_context *ic)
537 exit_inc_context(ll, ic);
538 init_inc_context(ic);
542 * Confirms a btree node contains a particular key at an index.
544 static bool contains_key(struct btree_node *n, uint64_t key, int index)
547 index < le32_to_cpu(n->header.nr_entries) &&
548 le64_to_cpu(n->keys[index]) == key;
551 static int __sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_context *ic)
555 struct btree_node *n;
560 * bitmap_block needs to be unlocked because getting the
561 * overflow_leaf may need to allocate, and thus use the space map.
563 reset_inc_context(ll, ic);
565 r = btree_get_overwrite_leaf(&ll->ref_count_info, ll->ref_count_root,
566 b, &index, &ll->ref_count_root, &ic->overflow_leaf);
570 n = dm_block_data(ic->overflow_leaf);
572 if (!contains_key(n, b, index)) {
573 DMERR("overflow btree is missing an entry");
577 v_ptr = value_ptr(n, index);
578 rc = le32_to_cpu(*v_ptr) + 1;
579 *v_ptr = cpu_to_le32(rc);
584 static int sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_context *ic)
587 struct btree_node *n;
592 * Do we already have the correct overflow leaf?
594 if (ic->overflow_leaf) {
595 n = dm_block_data(ic->overflow_leaf);
596 index = lower_bound(n, b);
597 if (contains_key(n, b, index)) {
598 v_ptr = value_ptr(n, index);
599 rc = le32_to_cpu(*v_ptr) + 1;
600 *v_ptr = cpu_to_le32(rc);
606 return __sm_ll_inc_overflow(ll, b, ic);
609 static inline int shadow_bitmap(struct ll_disk *ll, struct inc_context *ic)
613 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ic->ie_disk.blocknr),
614 &dm_sm_bitmap_validator, &ic->bitmap_block, &inc);
616 DMERR("dm_tm_shadow_block() failed");
619 ic->ie_disk.blocknr = cpu_to_le64(dm_block_location(ic->bitmap_block));
620 ic->bitmap = dm_bitmap_data(ic->bitmap_block);
625 * Once shadow_bitmap has been called, which always happens at the start of inc/dec,
626 * we can reopen the bitmap with a simple write lock, rather than re calling
627 * dm_tm_shadow_block().
629 static inline int ensure_bitmap(struct ll_disk *ll, struct inc_context *ic)
631 if (!ic->bitmap_block) {
632 int r = dm_bm_write_lock(dm_tm_get_bm(ll->tm), le64_to_cpu(ic->ie_disk.blocknr),
633 &dm_sm_bitmap_validator, &ic->bitmap_block);
635 DMERR("unable to re-get write lock for bitmap");
638 ic->bitmap = dm_bitmap_data(ic->bitmap_block);
645 * Loops round incrementing entries in a single bitmap.
647 static inline int sm_ll_inc_bitmap(struct ll_disk *ll, dm_block_t b,
648 uint32_t bit, uint32_t bit_end,
649 int32_t *nr_allocations, dm_block_t *new_b,
650 struct inc_context *ic)
656 for (; bit != bit_end; bit++, b++) {
658 * We only need to drop the bitmap if we need to find a new btree
659 * leaf for the overflow. So if it was dropped last iteration,
662 r = ensure_bitmap(ll, ic);
666 old = sm_lookup_bitmap(ic->bitmap, bit);
669 /* inc bitmap, adjust nr_allocated */
670 sm_set_bitmap(ic->bitmap, bit, 1);
673 le32_add_cpu(&ic->ie_disk.nr_free, -1);
674 if (le32_to_cpu(ic->ie_disk.none_free_before) == bit)
675 ic->ie_disk.none_free_before = cpu_to_le32(bit + 1);
680 sm_set_bitmap(ic->bitmap, bit, 2);
684 /* inc bitmap and insert into overflow */
685 sm_set_bitmap(ic->bitmap, bit, 3);
686 reset_inc_context(ll, ic);
688 le_rc = cpu_to_le32(3);
689 __dm_bless_for_disk(&le_rc);
690 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
691 &b, &le_rc, &ll->ref_count_root);
693 DMERR("ref count insert failed");
700 * inc within the overflow tree only.
702 r = sm_ll_inc_overflow(ll, b, ic);
713 * Finds a bitmap that contains entries in the block range, and increments
716 static int __sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
717 int32_t *nr_allocations, dm_block_t *new_b)
720 struct inc_context ic;
721 uint32_t bit, bit_end;
722 dm_block_t index = b;
724 init_inc_context(&ic);
726 bit = do_div(index, ll->entries_per_block);
727 r = ll->load_ie(ll, index, &ic.ie_disk);
731 r = shadow_bitmap(ll, &ic);
735 bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
736 r = sm_ll_inc_bitmap(ll, b, bit, bit_end, nr_allocations, new_b, &ic);
738 exit_inc_context(ll, &ic);
743 return ll->save_ie(ll, index, &ic.ie_disk);
746 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
747 int32_t *nr_allocations)
751 int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
760 /*----------------------------------------------------------------*/
762 static int __sm_ll_del_overflow(struct ll_disk *ll, dm_block_t b,
763 struct inc_context *ic)
765 reset_inc_context(ll, ic);
766 return dm_btree_remove(&ll->ref_count_info, ll->ref_count_root,
767 &b, &ll->ref_count_root);
770 static int __sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
771 struct inc_context *ic, uint32_t *old_rc)
775 struct btree_node *n;
779 reset_inc_context(ll, ic);
780 r = btree_get_overwrite_leaf(&ll->ref_count_info, ll->ref_count_root,
781 b, &index, &ll->ref_count_root, &ic->overflow_leaf);
785 n = dm_block_data(ic->overflow_leaf);
787 if (!contains_key(n, b, index)) {
788 DMERR("overflow btree is missing an entry");
792 v_ptr = value_ptr(n, index);
793 rc = le32_to_cpu(*v_ptr);
797 return __sm_ll_del_overflow(ll, b, ic);
800 *v_ptr = cpu_to_le32(rc);
805 static int sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
806 struct inc_context *ic, uint32_t *old_rc)
809 * Do we already have the correct overflow leaf?
811 if (ic->overflow_leaf) {
813 struct btree_node *n;
817 n = dm_block_data(ic->overflow_leaf);
818 index = lower_bound(n, b);
819 if (contains_key(n, b, index)) {
820 v_ptr = value_ptr(n, index);
821 rc = le32_to_cpu(*v_ptr);
826 *v_ptr = cpu_to_le32(rc);
829 return __sm_ll_del_overflow(ll, b, ic);
835 return __sm_ll_dec_overflow(ll, b, ic, old_rc);
839 * Loops round incrementing entries in a single bitmap.
841 static inline int sm_ll_dec_bitmap(struct ll_disk *ll, dm_block_t b,
842 uint32_t bit, uint32_t bit_end,
843 struct inc_context *ic,
844 int32_t *nr_allocations, dm_block_t *new_b)
849 for (; bit != bit_end; bit++, b++) {
851 * We only need to drop the bitmap if we need to find a new btree
852 * leaf for the overflow. So if it was dropped last iteration,
855 r = ensure_bitmap(ll, ic);
859 old = sm_lookup_bitmap(ic->bitmap, bit);
862 DMERR("unable to decrement block");
867 sm_set_bitmap(ic->bitmap, bit, 0);
870 le32_add_cpu(&ic->ie_disk.nr_free, 1);
871 ic->ie_disk.none_free_before =
872 cpu_to_le32(min(le32_to_cpu(ic->ie_disk.none_free_before), bit));
876 /* dec bitmap and insert into overflow */
877 sm_set_bitmap(ic->bitmap, bit, 1);
881 r = sm_ll_dec_overflow(ll, b, ic, &old);
886 r = ensure_bitmap(ll, ic);
890 sm_set_bitmap(ic->bitmap, bit, 2);
900 static int __sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
901 int32_t *nr_allocations, dm_block_t *new_b)
904 uint32_t bit, bit_end;
905 struct inc_context ic;
906 dm_block_t index = b;
908 init_inc_context(&ic);
910 bit = do_div(index, ll->entries_per_block);
911 r = ll->load_ie(ll, index, &ic.ie_disk);
915 r = shadow_bitmap(ll, &ic);
919 bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
920 r = sm_ll_dec_bitmap(ll, b, bit, bit_end, &ic, nr_allocations, new_b);
921 exit_inc_context(ll, &ic);
926 return ll->save_ie(ll, index, &ic.ie_disk);
929 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
930 int32_t *nr_allocations)
934 int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
943 /*----------------------------------------------------------------*/
945 int sm_ll_commit(struct ll_disk *ll)
949 if (ll->bitmap_index_changed) {
952 ll->bitmap_index_changed = false;
958 /*----------------------------------------------------------------*/
960 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
961 struct disk_index_entry *ie)
963 memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
967 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
968 struct disk_index_entry *ie)
970 ll->bitmap_index_changed = true;
971 memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
975 static int metadata_ll_init_index(struct ll_disk *ll)
980 r = dm_tm_new_block(ll->tm, &index_validator, &b);
984 ll->bitmap_root = dm_block_location(b);
986 dm_tm_unlock(ll->tm, b);
991 static int metadata_ll_open(struct ll_disk *ll)
994 struct dm_block *block;
996 r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
997 &index_validator, &block);
1001 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
1002 dm_tm_unlock(ll->tm, block);
1007 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
1009 return MAX_METADATA_BITMAPS;
1012 static int metadata_ll_commit(struct ll_disk *ll)
1017 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
1021 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
1022 ll->bitmap_root = dm_block_location(b);
1024 dm_tm_unlock(ll->tm, b);
1029 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
1033 r = sm_ll_init(ll, tm);
1037 ll->load_ie = metadata_ll_load_ie;
1038 ll->save_ie = metadata_ll_save_ie;
1039 ll->init_index = metadata_ll_init_index;
1040 ll->open_index = metadata_ll_open;
1041 ll->max_entries = metadata_ll_max_entries;
1042 ll->commit = metadata_ll_commit;
1045 ll->nr_allocated = 0;
1047 r = ll->init_index(ll);
1051 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
1058 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
1059 void *root_le, size_t len)
1062 struct disk_sm_root smr;
1064 if (len < sizeof(struct disk_sm_root)) {
1065 DMERR("sm_metadata root too small");
1070 * We don't know the alignment of the root_le buffer, so need to
1071 * copy into a new structure.
1073 memcpy(&smr, root_le, sizeof(smr));
1075 r = sm_ll_init(ll, tm);
1079 ll->load_ie = metadata_ll_load_ie;
1080 ll->save_ie = metadata_ll_save_ie;
1081 ll->init_index = metadata_ll_init_index;
1082 ll->open_index = metadata_ll_open;
1083 ll->max_entries = metadata_ll_max_entries;
1084 ll->commit = metadata_ll_commit;
1086 ll->nr_blocks = le64_to_cpu(smr.nr_blocks);
1087 ll->nr_allocated = le64_to_cpu(smr.nr_allocated);
1088 ll->bitmap_root = le64_to_cpu(smr.bitmap_root);
1089 ll->ref_count_root = le64_to_cpu(smr.ref_count_root);
1091 return ll->open_index(ll);
1094 /*----------------------------------------------------------------*/
1096 static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
1099 __dm_bless_for_disk(iec->ie);
1100 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
1101 &iec->index, &iec->ie, &ll->bitmap_root);
1104 static inline unsigned int hash_index(dm_block_t index)
1106 return dm_hash_block(index, IE_CACHE_MASK);
1109 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
1110 struct disk_index_entry *ie)
1113 unsigned int h = hash_index(index);
1114 struct ie_cache *iec = ll->ie_cache + h;
1117 if (iec->index == index) {
1118 memcpy(ie, &iec->ie, sizeof(*ie));
1123 r = ie_cache_writeback(ll, iec);
1129 r = dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
1134 memcpy(&iec->ie, ie, sizeof(*ie));
1140 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
1141 struct disk_index_entry *ie)
1144 unsigned int h = hash_index(index);
1145 struct ie_cache *iec = ll->ie_cache + h;
1147 ll->bitmap_index_changed = true;
1149 if (iec->index == index) {
1150 memcpy(&iec->ie, ie, sizeof(*ie));
1156 r = ie_cache_writeback(ll, iec);
1165 memcpy(&iec->ie, ie, sizeof(*ie));
1169 static int disk_ll_init_index(struct ll_disk *ll)
1173 for (i = 0; i < IE_CACHE_SIZE; i++) {
1174 struct ie_cache *iec = ll->ie_cache + i;
1179 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
1182 static int disk_ll_open(struct ll_disk *ll)
1187 static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
1192 static int disk_ll_commit(struct ll_disk *ll)
1197 for (i = 0; i < IE_CACHE_SIZE; i++) {
1198 struct ie_cache *iec = ll->ie_cache + i;
1200 if (iec->valid && iec->dirty)
1201 r = ie_cache_writeback(ll, iec);
1207 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
1211 r = sm_ll_init(ll, tm);
1215 ll->load_ie = disk_ll_load_ie;
1216 ll->save_ie = disk_ll_save_ie;
1217 ll->init_index = disk_ll_init_index;
1218 ll->open_index = disk_ll_open;
1219 ll->max_entries = disk_ll_max_entries;
1220 ll->commit = disk_ll_commit;
1223 ll->nr_allocated = 0;
1225 r = ll->init_index(ll);
1229 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
1236 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
1237 void *root_le, size_t len)
1240 struct disk_sm_root *smr = root_le;
1242 if (len < sizeof(struct disk_sm_root)) {
1243 DMERR("sm_metadata root too small");
1247 r = sm_ll_init(ll, tm);
1251 ll->load_ie = disk_ll_load_ie;
1252 ll->save_ie = disk_ll_save_ie;
1253 ll->init_index = disk_ll_init_index;
1254 ll->open_index = disk_ll_open;
1255 ll->max_entries = disk_ll_max_entries;
1256 ll->commit = disk_ll_commit;
1258 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
1259 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
1260 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
1261 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
1263 return ll->open_index(ll);
1266 /*----------------------------------------------------------------*/