2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-btree-internal.h"
8 #include "dm-space-map.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
12 #include <linux/device-mapper.h>
14 #define DM_MSG_PREFIX "btree"
16 /*----------------------------------------------------------------
18 *--------------------------------------------------------------*/
19 static void memcpy_disk(void *dest, const void *src, size_t len)
20 __dm_written_to_disk(src)
22 memcpy(dest, src, len);
23 __dm_unbless_for_disk(src);
26 static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
27 unsigned index, void *elt)
28 __dm_written_to_disk(elt)
31 memmove(base + (elt_size * (index + 1)),
32 base + (elt_size * index),
33 (nr_elts - index) * elt_size);
35 memcpy_disk(base + (elt_size * index), elt, elt_size);
38 /*----------------------------------------------------------------*/
40 /* makes the assumption that no two keys are the same. */
41 static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
46 int mid = lo + ((hi - lo) / 2);
47 uint64_t mid_key = le64_to_cpu(n->keys[mid]);
58 return want_hi ? hi : lo;
61 int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0);
66 static int upper_bound(struct btree_node *n, uint64_t key)
68 return bsearch(n, key, 1);
71 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
72 struct dm_btree_value_type *vt)
75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
77 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
78 for (i = 0; i < nr_entries; i++)
79 dm_tm_inc(tm, value64(n, i));
81 for (i = 0; i < nr_entries; i++)
82 vt->inc(vt->context, value_ptr(n, i));
85 static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
86 uint64_t key, void *value)
87 __dm_written_to_disk(value)
89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
90 __le64 key_le = cpu_to_le64(key);
92 if (index > nr_entries ||
93 index >= le32_to_cpu(node->header.max_entries)) {
94 DMERR("too many entries in btree node for insert");
95 __dm_unbless_for_disk(value);
99 __dm_bless_for_disk(&key_le);
101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
102 array_insert(value_base(node), value_size, nr_entries, index, value);
103 node->header.nr_entries = cpu_to_le32(nr_entries + 1);
108 /*----------------------------------------------------------------*/
111 * We want 3n entries (for some n). This works more nicely for repeated
112 * insert remove loops than (2n + 1).
114 static uint32_t calc_max_entries(size_t value_size, size_t block_size)
117 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
119 block_size -= sizeof(struct node_header);
120 total = block_size / elt_size;
121 n = total / 3; /* rounds down */
126 int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
130 struct btree_node *n;
132 uint32_t max_entries;
134 r = new_block(info, &b);
138 block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
139 max_entries = calc_max_entries(info->value_type.size, block_size);
141 n = dm_block_data(b);
142 memset(n, 0, block_size);
143 n->header.flags = cpu_to_le32(LEAF_NODE);
144 n->header.nr_entries = cpu_to_le32(0);
145 n->header.max_entries = cpu_to_le32(max_entries);
146 n->header.value_size = cpu_to_le32(info->value_type.size);
148 *root = dm_block_location(b);
149 unlock_block(info, b);
153 EXPORT_SYMBOL_GPL(dm_btree_empty);
155 /*----------------------------------------------------------------*/
158 * Deletion uses a recursive algorithm, since we have limited stack space
159 * we explicitly manage our own stack on the heap.
161 #define MAX_SPINE_DEPTH 64
164 struct btree_node *n;
166 unsigned nr_children;
167 unsigned current_child;
171 struct dm_btree_info *info;
172 struct dm_transaction_manager *tm;
174 struct frame spine[MAX_SPINE_DEPTH];
177 static int top_frame(struct del_stack *s, struct frame **f)
180 DMERR("btree deletion stack empty");
184 *f = s->spine + s->top;
189 static int unprocessed_frames(struct del_stack *s)
194 static void prefetch_children(struct del_stack *s, struct frame *f)
197 struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
199 for (i = 0; i < f->nr_children; i++)
200 dm_bm_prefetch(bm, value64(f->n, i));
203 static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
205 return f->level < (info->levels - 1);
208 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
213 if (s->top >= MAX_SPINE_DEPTH - 1) {
214 DMERR("btree deletion stack out of memory");
218 r = dm_tm_ref(s->tm, b, &ref_count);
224 * This is a shared node, so we can just decrement it's
225 * reference counter and leave the children.
231 struct frame *f = s->spine + ++s->top;
233 r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
239 f->n = dm_block_data(f->b);
241 f->nr_children = le32_to_cpu(f->n->header.nr_entries);
242 f->current_child = 0;
244 flags = le32_to_cpu(f->n->header.flags);
245 if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
246 prefetch_children(s, f);
252 static void pop_frame(struct del_stack *s)
254 struct frame *f = s->spine + s->top--;
256 dm_tm_dec(s->tm, dm_block_location(f->b));
257 dm_tm_unlock(s->tm, f->b);
260 static void unlock_all_frames(struct del_stack *s)
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
270 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
276 * dm_btree_del() is called via an ioctl, as such should be
277 * considered an FS op. We can't recurse back into the FS, so we
280 s = kmalloc(sizeof(*s), GFP_NOFS);
287 r = push_frame(s, root, 0);
291 while (unprocessed_frames(s)) {
296 r = top_frame(s, &f);
300 if (f->current_child >= f->nr_children) {
305 flags = le32_to_cpu(f->n->header.flags);
306 if (flags & INTERNAL_NODE) {
307 b = value64(f->n, f->current_child);
309 r = push_frame(s, b, f->level);
313 } else if (is_internal_level(info, f)) {
314 b = value64(f->n, f->current_child);
316 r = push_frame(s, b, f->level + 1);
321 if (info->value_type.dec) {
324 for (i = 0; i < f->nr_children; i++)
325 info->value_type.dec(info->value_type.context,
333 /* cleanup all frames of del_stack */
334 unlock_all_frames(s);
340 EXPORT_SYMBOL_GPL(dm_btree_del);
342 /*----------------------------------------------------------------*/
344 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
345 int (*search_fn)(struct btree_node *, uint64_t),
346 uint64_t *result_key, void *v, size_t value_size)
349 uint32_t flags, nr_entries;
352 r = ro_step(s, block);
356 i = search_fn(ro_node(s), key);
358 flags = le32_to_cpu(ro_node(s)->header.flags);
359 nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
360 if (i < 0 || i >= nr_entries)
363 if (flags & INTERNAL_NODE)
364 block = value64(ro_node(s), i);
366 } while (!(flags & LEAF_NODE));
368 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
369 memcpy(v, value_ptr(ro_node(s), i), value_size);
374 int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
375 uint64_t *keys, void *value_le)
377 unsigned level, last_level = info->levels - 1;
380 __le64 internal_value_le;
381 struct ro_spine spine;
383 init_ro_spine(&spine, info);
384 for (level = 0; level < info->levels; level++) {
388 if (level == last_level) {
390 size = info->value_type.size;
393 value_p = &internal_value_le;
394 size = sizeof(uint64_t);
397 r = btree_lookup_raw(&spine, root, keys[level],
402 if (rkey != keys[level]) {
403 exit_ro_spine(&spine);
407 exit_ro_spine(&spine);
411 root = le64_to_cpu(internal_value_le);
413 exit_ro_spine(&spine);
417 EXPORT_SYMBOL_GPL(dm_btree_lookup);
419 static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
420 uint64_t key, uint64_t *rkey, void *value_le)
423 uint32_t flags, nr_entries;
424 struct dm_block *node;
425 struct btree_node *n;
427 r = bn_read_lock(info, root, &node);
431 n = dm_block_data(node);
432 flags = le32_to_cpu(n->header.flags);
433 nr_entries = le32_to_cpu(n->header.nr_entries);
435 if (flags & INTERNAL_NODE) {
436 i = lower_bound(n, key);
439 * avoid early -ENODATA return when all entries are
440 * higher than the search @key.
444 if (i >= nr_entries) {
449 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
450 if (r == -ENODATA && i < (nr_entries - 1)) {
452 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
456 i = upper_bound(n, key);
457 if (i < 0 || i >= nr_entries) {
462 *rkey = le64_to_cpu(n->keys[i]);
463 memcpy(value_le, value_ptr(n, i), info->value_type.size);
466 dm_tm_unlock(info->tm, node);
470 int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
471 uint64_t *keys, uint64_t *rkey, void *value_le)
475 __le64 internal_value_le;
476 struct ro_spine spine;
478 init_ro_spine(&spine, info);
479 for (level = 0; level < info->levels - 1u; level++) {
480 r = btree_lookup_raw(&spine, root, keys[level],
482 &internal_value_le, sizeof(uint64_t));
486 if (*rkey != keys[level]) {
491 root = le64_to_cpu(internal_value_le);
494 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
496 exit_ro_spine(&spine);
500 EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
503 * Splits a node by creating a sibling node and shifting half the nodes
504 * contents across. Assumes there is a parent node, and it has room for
526 * +---------+ +-------+
530 * Where A* is a shadow of A.
532 static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
537 unsigned nr_left, nr_right;
538 struct dm_block *left, *right, *parent;
539 struct btree_node *ln, *rn, *pn;
542 left = shadow_current(s);
544 r = new_block(s->info, &right);
548 ln = dm_block_data(left);
549 rn = dm_block_data(right);
551 nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
552 nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
554 ln->header.nr_entries = cpu_to_le32(nr_left);
556 rn->header.flags = ln->header.flags;
557 rn->header.nr_entries = cpu_to_le32(nr_right);
558 rn->header.max_entries = ln->header.max_entries;
559 rn->header.value_size = ln->header.value_size;
560 memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
562 size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
563 sizeof(uint64_t) : s->info->value_type.size;
564 memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
568 * Patch up the parent
570 parent = shadow_parent(s);
572 pn = dm_block_data(parent);
573 location = cpu_to_le64(dm_block_location(left));
574 __dm_bless_for_disk(&location);
575 memcpy_disk(value_ptr(pn, parent_index),
576 &location, sizeof(__le64));
578 location = cpu_to_le64(dm_block_location(right));
579 __dm_bless_for_disk(&location);
581 r = insert_at(sizeof(__le64), pn, parent_index + 1,
582 le64_to_cpu(rn->keys[0]), &location);
584 unlock_block(s->info, right);
588 if (key < le64_to_cpu(rn->keys[0])) {
589 unlock_block(s->info, right);
592 unlock_block(s->info, left);
600 * Splits a node by creating two new children beneath the given node.
616 * +-------+ +-------+
617 * | B +++ | | C +++ |
618 * +-------+ +-------+
620 static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
624 unsigned nr_left, nr_right;
625 struct dm_block *left, *right, *new_parent;
626 struct btree_node *pn, *ln, *rn;
629 new_parent = shadow_current(s);
631 r = new_block(s->info, &left);
635 r = new_block(s->info, &right);
637 unlock_block(s->info, left);
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right);
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
653 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size;
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
667 /* new_parent should just point to l and r now */
668 pn->header.flags = cpu_to_le32(INTERNAL_NODE);
669 pn->header.nr_entries = cpu_to_le32(2);
670 pn->header.max_entries = cpu_to_le32(
671 calc_max_entries(sizeof(__le64),
673 dm_tm_get_bm(s->info->tm))));
674 pn->header.value_size = cpu_to_le32(sizeof(__le64));
676 val = cpu_to_le64(dm_block_location(left));
677 __dm_bless_for_disk(&val);
678 pn->keys[0] = ln->keys[0];
679 memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
681 val = cpu_to_le64(dm_block_location(right));
682 __dm_bless_for_disk(&val);
683 pn->keys[1] = rn->keys[0];
684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
687 * rejig the spine. This is ugly, since it knows too
688 * much about the spine
690 if (s->nodes[0] != new_parent) {
691 unlock_block(s->info, s->nodes[0]);
692 s->nodes[0] = new_parent;
694 if (key < le64_to_cpu(rn->keys[0])) {
695 unlock_block(s->info, right);
698 unlock_block(s->info, left);
706 static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
707 struct dm_btree_value_type *vt,
708 uint64_t key, unsigned *index)
710 int r, i = *index, top = 1;
711 struct btree_node *node;
714 r = shadow_step(s, root, vt);
718 node = dm_block_data(shadow_current(s));
721 * We have to patch up the parent node, ugly, but I don't
722 * see a way to do this automatically as part of the spine
725 if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
726 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
728 __dm_bless_for_disk(&location);
729 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
730 &location, sizeof(__le64));
733 node = dm_block_data(shadow_current(s));
735 if (node->header.nr_entries == node->header.max_entries) {
737 r = btree_split_beneath(s, key);
739 r = btree_split_sibling(s, i, key);
745 node = dm_block_data(shadow_current(s));
747 i = lower_bound(node, key);
749 if (le32_to_cpu(node->header.flags) & LEAF_NODE)
753 /* change the bounds on the lowest key */
754 node->keys[0] = cpu_to_le64(key);
758 root = value64(node, i);
762 if (i < 0 || le64_to_cpu(node->keys[i]) != key)
769 static bool need_insert(struct btree_node *node, uint64_t *keys,
770 unsigned level, unsigned index)
772 return ((index >= le32_to_cpu(node->header.nr_entries)) ||
773 (le64_to_cpu(node->keys[index]) != keys[level]));
776 static int insert(struct dm_btree_info *info, dm_block_t root,
777 uint64_t *keys, void *value, dm_block_t *new_root,
779 __dm_written_to_disk(value)
782 unsigned level, index = -1, last_level = info->levels - 1;
783 dm_block_t block = root;
784 struct shadow_spine spine;
785 struct btree_node *n;
786 struct dm_btree_value_type le64_type;
788 init_le64_type(info->tm, &le64_type);
789 init_shadow_spine(&spine, info);
791 for (level = 0; level < (info->levels - 1); level++) {
792 r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
796 n = dm_block_data(shadow_current(&spine));
798 if (need_insert(n, keys, level, index)) {
802 r = dm_btree_empty(info, &new_tree);
806 new_le = cpu_to_le64(new_tree);
807 __dm_bless_for_disk(&new_le);
809 r = insert_at(sizeof(uint64_t), n, index,
810 keys[level], &new_le);
815 if (level < last_level)
816 block = value64(n, index);
819 r = btree_insert_raw(&spine, block, &info->value_type,
820 keys[level], &index);
824 n = dm_block_data(shadow_current(&spine));
826 if (need_insert(n, keys, level, index)) {
830 r = insert_at(info->value_type.size, n, index,
838 if (info->value_type.dec &&
839 (!info->value_type.equal ||
840 !info->value_type.equal(
841 info->value_type.context,
844 info->value_type.dec(info->value_type.context,
845 value_ptr(n, index));
847 memcpy_disk(value_ptr(n, index),
848 value, info->value_type.size);
851 *new_root = shadow_root(&spine);
852 exit_shadow_spine(&spine);
857 __dm_unbless_for_disk(value);
859 exit_shadow_spine(&spine);
863 int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
864 uint64_t *keys, void *value, dm_block_t *new_root)
865 __dm_written_to_disk(value)
867 return insert(info, root, keys, value, new_root, NULL);
869 EXPORT_SYMBOL_GPL(dm_btree_insert);
871 int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
872 uint64_t *keys, void *value, dm_block_t *new_root,
874 __dm_written_to_disk(value)
876 return insert(info, root, keys, value, new_root, inserted);
878 EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
880 /*----------------------------------------------------------------*/
882 static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
883 uint64_t *result_key, dm_block_t *next_block)
889 r = ro_step(s, block);
893 flags = le32_to_cpu(ro_node(s)->header.flags);
894 i = le32_to_cpu(ro_node(s)->header.nr_entries);
901 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
903 *result_key = le64_to_cpu(ro_node(s)->keys[0]);
905 if (next_block || flags & INTERNAL_NODE) {
907 block = value64(ro_node(s), i);
909 block = value64(ro_node(s), 0);
912 } while (flags & INTERNAL_NODE);
919 static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
920 bool find_highest, uint64_t *result_keys)
922 int r = 0, count = 0, level;
923 struct ro_spine spine;
925 init_ro_spine(&spine, info);
926 for (level = 0; level < info->levels; level++) {
927 r = find_key(&spine, root, find_highest, result_keys + level,
928 level == info->levels - 1 ? NULL : &root);
938 exit_ro_spine(&spine);
940 return r ? r : count;
943 int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
944 uint64_t *result_keys)
946 return dm_btree_find_key(info, root, true, result_keys);
948 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
950 int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
951 uint64_t *result_keys)
953 return dm_btree_find_key(info, root, false, result_keys);
955 EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
957 /*----------------------------------------------------------------*/
960 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
961 * space. Also this only works for single level trees.
963 static int walk_node(struct dm_btree_info *info, dm_block_t block,
964 int (*fn)(void *context, uint64_t *keys, void *leaf),
969 struct dm_block *node;
970 struct btree_node *n;
973 r = bn_read_lock(info, block, &node);
977 n = dm_block_data(node);
979 nr = le32_to_cpu(n->header.nr_entries);
980 for (i = 0; i < nr; i++) {
981 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
982 r = walk_node(info, value64(n, i), fn, context);
986 keys = le64_to_cpu(*key_ptr(n, i));
987 r = fn(context, &keys, value_ptr(n, i));
994 dm_tm_unlock(info->tm, node);
998 int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
999 int (*fn)(void *context, uint64_t *keys, void *leaf),
1002 BUG_ON(info->levels > 1);
1003 return walk_node(info, root, fn, context);
1005 EXPORT_SYMBOL_GPL(dm_btree_walk);
1007 /*----------------------------------------------------------------*/
1009 static void prefetch_values(struct dm_btree_cursor *c)
1013 struct cursor_node *n = c->nodes + c->depth - 1;
1014 struct btree_node *bn = dm_block_data(n->b);
1015 struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
1017 BUG_ON(c->info->value_type.size != sizeof(value_le));
1019 nr = le32_to_cpu(bn->header.nr_entries);
1020 for (i = 0; i < nr; i++) {
1021 memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
1022 dm_bm_prefetch(bm, le64_to_cpu(value_le));
1026 static bool leaf_node(struct dm_btree_cursor *c)
1028 struct cursor_node *n = c->nodes + c->depth - 1;
1029 struct btree_node *bn = dm_block_data(n->b);
1031 return le32_to_cpu(bn->header.flags) & LEAF_NODE;
1034 static int push_node(struct dm_btree_cursor *c, dm_block_t b)
1037 struct cursor_node *n = c->nodes + c->depth;
1039 if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
1040 DMERR("couldn't push cursor node, stack depth too high");
1044 r = bn_read_lock(c->info, b, &n->b);
1051 if (c->prefetch_leaves || !leaf_node(c))
1057 static void pop_node(struct dm_btree_cursor *c)
1060 unlock_block(c->info, c->nodes[c->depth].b);
1063 static int inc_or_backtrack(struct dm_btree_cursor *c)
1065 struct cursor_node *n;
1066 struct btree_node *bn;
1072 n = c->nodes + c->depth - 1;
1073 bn = dm_block_data(n->b);
1076 if (n->index < le32_to_cpu(bn->header.nr_entries))
1085 static int find_leaf(struct dm_btree_cursor *c)
1088 struct cursor_node *n;
1089 struct btree_node *bn;
1093 n = c->nodes + c->depth - 1;
1094 bn = dm_block_data(n->b);
1096 if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
1099 memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
1100 r = push_node(c, le64_to_cpu(value_le));
1102 DMERR("push_node failed");
1107 if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
1113 int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
1114 bool prefetch_leaves, struct dm_btree_cursor *c)
1121 c->prefetch_leaves = prefetch_leaves;
1123 r = push_node(c, root);
1127 return find_leaf(c);
1129 EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
1131 void dm_btree_cursor_end(struct dm_btree_cursor *c)
1136 EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
1138 int dm_btree_cursor_next(struct dm_btree_cursor *c)
1140 int r = inc_or_backtrack(c);
1144 DMERR("find_leaf failed");
1149 EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
1151 int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count)
1155 while (count-- && !r)
1156 r = dm_btree_cursor_next(c);
1160 EXPORT_SYMBOL_GPL(dm_btree_cursor_skip);
1162 int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
1165 struct cursor_node *n = c->nodes + c->depth - 1;
1166 struct btree_node *bn = dm_block_data(n->b);
1168 if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
1171 *key = le64_to_cpu(*key_ptr(bn, n->index));
1172 memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
1178 EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);