2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-btree-internal.h"
8 #include "dm-space-map.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
12 #include <linux/device-mapper.h>
14 #define DM_MSG_PREFIX "btree"
16 /*----------------------------------------------------------------
18 *--------------------------------------------------------------*/
19 static void memcpy_disk(void *dest, const void *src, size_t len)
20 __dm_written_to_disk(src)
22 memcpy(dest, src, len);
23 __dm_unbless_for_disk(src);
26 static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
27 unsigned index, void *elt)
28 __dm_written_to_disk(elt)
31 memmove(base + (elt_size * (index + 1)),
32 base + (elt_size * index),
33 (nr_elts - index) * elt_size);
35 memcpy_disk(base + (elt_size * index), elt, elt_size);
38 /*----------------------------------------------------------------*/
40 /* makes the assumption that no two keys are the same. */
41 static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
46 int mid = lo + ((hi - lo) / 2);
47 uint64_t mid_key = le64_to_cpu(n->keys[mid]);
58 return want_hi ? hi : lo;
61 int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0);
66 static int upper_bound(struct btree_node *n, uint64_t key)
68 return bsearch(n, key, 1);
71 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
72 struct dm_btree_value_type *vt)
75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
77 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
78 for (i = 0; i < nr_entries; i++)
79 dm_tm_inc(tm, value64(n, i));
81 for (i = 0; i < nr_entries; i++)
82 vt->inc(vt->context, value_ptr(n, i));
85 static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
86 uint64_t key, void *value)
87 __dm_written_to_disk(value)
89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
90 uint32_t max_entries = le32_to_cpu(node->header.max_entries);
91 __le64 key_le = cpu_to_le64(key);
93 if (index > nr_entries ||
94 index >= max_entries ||
95 nr_entries >= max_entries) {
96 DMERR("too many entries in btree node for insert");
97 __dm_unbless_for_disk(value);
101 __dm_bless_for_disk(&key_le);
103 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
104 array_insert(value_base(node), value_size, nr_entries, index, value);
105 node->header.nr_entries = cpu_to_le32(nr_entries + 1);
110 /*----------------------------------------------------------------*/
113 * We want 3n entries (for some n). This works more nicely for repeated
114 * insert remove loops than (2n + 1).
116 static uint32_t calc_max_entries(size_t value_size, size_t block_size)
119 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
121 block_size -= sizeof(struct node_header);
122 total = block_size / elt_size;
123 n = total / 3; /* rounds down */
128 int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
132 struct btree_node *n;
134 uint32_t max_entries;
136 r = new_block(info, &b);
140 block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
141 max_entries = calc_max_entries(info->value_type.size, block_size);
143 n = dm_block_data(b);
144 memset(n, 0, block_size);
145 n->header.flags = cpu_to_le32(LEAF_NODE);
146 n->header.nr_entries = cpu_to_le32(0);
147 n->header.max_entries = cpu_to_le32(max_entries);
148 n->header.value_size = cpu_to_le32(info->value_type.size);
150 *root = dm_block_location(b);
151 unlock_block(info, b);
155 EXPORT_SYMBOL_GPL(dm_btree_empty);
157 /*----------------------------------------------------------------*/
160 * Deletion uses a recursive algorithm, since we have limited stack space
161 * we explicitly manage our own stack on the heap.
163 #define MAX_SPINE_DEPTH 64
166 struct btree_node *n;
168 unsigned nr_children;
169 unsigned current_child;
173 struct dm_btree_info *info;
174 struct dm_transaction_manager *tm;
176 struct frame spine[MAX_SPINE_DEPTH];
179 static int top_frame(struct del_stack *s, struct frame **f)
182 DMERR("btree deletion stack empty");
186 *f = s->spine + s->top;
191 static int unprocessed_frames(struct del_stack *s)
196 static void prefetch_children(struct del_stack *s, struct frame *f)
199 struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
201 for (i = 0; i < f->nr_children; i++)
202 dm_bm_prefetch(bm, value64(f->n, i));
205 static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
207 return f->level < (info->levels - 1);
210 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
215 if (s->top >= MAX_SPINE_DEPTH - 1) {
216 DMERR("btree deletion stack out of memory");
220 r = dm_tm_ref(s->tm, b, &ref_count);
226 * This is a shared node, so we can just decrement it's
227 * reference counter and leave the children.
233 struct frame *f = s->spine + ++s->top;
235 r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
241 f->n = dm_block_data(f->b);
243 f->nr_children = le32_to_cpu(f->n->header.nr_entries);
244 f->current_child = 0;
246 flags = le32_to_cpu(f->n->header.flags);
247 if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
248 prefetch_children(s, f);
254 static void pop_frame(struct del_stack *s)
256 struct frame *f = s->spine + s->top--;
258 dm_tm_dec(s->tm, dm_block_location(f->b));
259 dm_tm_unlock(s->tm, f->b);
262 static void unlock_all_frames(struct del_stack *s)
266 while (unprocessed_frames(s)) {
267 f = s->spine + s->top--;
268 dm_tm_unlock(s->tm, f->b);
272 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
278 * dm_btree_del() is called via an ioctl, as such should be
279 * considered an FS op. We can't recurse back into the FS, so we
282 s = kmalloc(sizeof(*s), GFP_NOFS);
289 r = push_frame(s, root, 0);
293 while (unprocessed_frames(s)) {
298 r = top_frame(s, &f);
302 if (f->current_child >= f->nr_children) {
307 flags = le32_to_cpu(f->n->header.flags);
308 if (flags & INTERNAL_NODE) {
309 b = value64(f->n, f->current_child);
311 r = push_frame(s, b, f->level);
315 } else if (is_internal_level(info, f)) {
316 b = value64(f->n, f->current_child);
318 r = push_frame(s, b, f->level + 1);
323 if (info->value_type.dec) {
326 for (i = 0; i < f->nr_children; i++)
327 info->value_type.dec(info->value_type.context,
335 /* cleanup all frames of del_stack */
336 unlock_all_frames(s);
342 EXPORT_SYMBOL_GPL(dm_btree_del);
344 /*----------------------------------------------------------------*/
346 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
347 int (*search_fn)(struct btree_node *, uint64_t),
348 uint64_t *result_key, void *v, size_t value_size)
351 uint32_t flags, nr_entries;
354 r = ro_step(s, block);
358 i = search_fn(ro_node(s), key);
360 flags = le32_to_cpu(ro_node(s)->header.flags);
361 nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
362 if (i < 0 || i >= nr_entries)
365 if (flags & INTERNAL_NODE)
366 block = value64(ro_node(s), i);
368 } while (!(flags & LEAF_NODE));
370 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
372 memcpy(v, value_ptr(ro_node(s), i), value_size);
377 int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
378 uint64_t *keys, void *value_le)
380 unsigned level, last_level = info->levels - 1;
383 __le64 internal_value_le;
384 struct ro_spine spine;
386 init_ro_spine(&spine, info);
387 for (level = 0; level < info->levels; level++) {
391 if (level == last_level) {
393 size = info->value_type.size;
396 value_p = &internal_value_le;
397 size = sizeof(uint64_t);
400 r = btree_lookup_raw(&spine, root, keys[level],
405 if (rkey != keys[level]) {
406 exit_ro_spine(&spine);
410 exit_ro_spine(&spine);
414 root = le64_to_cpu(internal_value_le);
416 exit_ro_spine(&spine);
420 EXPORT_SYMBOL_GPL(dm_btree_lookup);
422 static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
423 uint64_t key, uint64_t *rkey, void *value_le)
426 uint32_t flags, nr_entries;
427 struct dm_block *node;
428 struct btree_node *n;
430 r = bn_read_lock(info, root, &node);
434 n = dm_block_data(node);
435 flags = le32_to_cpu(n->header.flags);
436 nr_entries = le32_to_cpu(n->header.nr_entries);
438 if (flags & INTERNAL_NODE) {
439 i = lower_bound(n, key);
442 * avoid early -ENODATA return when all entries are
443 * higher than the search @key.
447 if (i >= nr_entries) {
452 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
453 if (r == -ENODATA && i < (nr_entries - 1)) {
455 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
459 i = upper_bound(n, key);
460 if (i < 0 || i >= nr_entries) {
465 *rkey = le64_to_cpu(n->keys[i]);
466 memcpy(value_le, value_ptr(n, i), info->value_type.size);
469 dm_tm_unlock(info->tm, node);
473 int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
474 uint64_t *keys, uint64_t *rkey, void *value_le)
478 __le64 internal_value_le;
479 struct ro_spine spine;
481 init_ro_spine(&spine, info);
482 for (level = 0; level < info->levels - 1u; level++) {
483 r = btree_lookup_raw(&spine, root, keys[level],
485 &internal_value_le, sizeof(uint64_t));
489 if (*rkey != keys[level]) {
494 root = le64_to_cpu(internal_value_le);
497 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
499 exit_ro_spine(&spine);
503 EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
506 * Splits a node by creating a sibling node and shifting half the nodes
507 * contents across. Assumes there is a parent node, and it has room for
529 * +---------+ +-------+
533 * Where A* is a shadow of A.
535 static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
540 unsigned nr_left, nr_right;
541 struct dm_block *left, *right, *parent;
542 struct btree_node *ln, *rn, *pn;
545 left = shadow_current(s);
547 r = new_block(s->info, &right);
551 ln = dm_block_data(left);
552 rn = dm_block_data(right);
554 nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
555 nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
557 ln->header.nr_entries = cpu_to_le32(nr_left);
559 rn->header.flags = ln->header.flags;
560 rn->header.nr_entries = cpu_to_le32(nr_right);
561 rn->header.max_entries = ln->header.max_entries;
562 rn->header.value_size = ln->header.value_size;
563 memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
565 size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
566 sizeof(uint64_t) : s->info->value_type.size;
567 memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
571 * Patch up the parent
573 parent = shadow_parent(s);
575 pn = dm_block_data(parent);
576 location = cpu_to_le64(dm_block_location(left));
577 __dm_bless_for_disk(&location);
578 memcpy_disk(value_ptr(pn, parent_index),
579 &location, sizeof(__le64));
581 location = cpu_to_le64(dm_block_location(right));
582 __dm_bless_for_disk(&location);
584 r = insert_at(sizeof(__le64), pn, parent_index + 1,
585 le64_to_cpu(rn->keys[0]), &location);
587 unlock_block(s->info, right);
591 if (key < le64_to_cpu(rn->keys[0])) {
592 unlock_block(s->info, right);
595 unlock_block(s->info, left);
603 * Splits a node by creating two new children beneath the given node.
619 * +-------+ +-------+
620 * | B +++ | | C +++ |
621 * +-------+ +-------+
623 static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
627 unsigned nr_left, nr_right;
628 struct dm_block *left, *right, *new_parent;
629 struct btree_node *pn, *ln, *rn;
632 new_parent = shadow_current(s);
634 pn = dm_block_data(new_parent);
635 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
636 sizeof(__le64) : s->info->value_type.size;
638 /* create & init the left block */
639 r = new_block(s->info, &left);
643 ln = dm_block_data(left);
644 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 ln->header.flags = pn->header.flags;
647 ln->header.nr_entries = cpu_to_le32(nr_left);
648 ln->header.max_entries = pn->header.max_entries;
649 ln->header.value_size = pn->header.value_size;
650 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
651 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
653 /* create & init the right block */
654 r = new_block(s->info, &right);
656 unlock_block(s->info, left);
660 rn = dm_block_data(right);
661 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
663 rn->header.flags = pn->header.flags;
664 rn->header.nr_entries = cpu_to_le32(nr_right);
665 rn->header.max_entries = pn->header.max_entries;
666 rn->header.value_size = pn->header.value_size;
667 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
668 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
671 /* new_parent should just point to l and r now */
672 pn->header.flags = cpu_to_le32(INTERNAL_NODE);
673 pn->header.nr_entries = cpu_to_le32(2);
674 pn->header.max_entries = cpu_to_le32(
675 calc_max_entries(sizeof(__le64),
677 dm_tm_get_bm(s->info->tm))));
678 pn->header.value_size = cpu_to_le32(sizeof(__le64));
680 val = cpu_to_le64(dm_block_location(left));
681 __dm_bless_for_disk(&val);
682 pn->keys[0] = ln->keys[0];
683 memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
685 val = cpu_to_le64(dm_block_location(right));
686 __dm_bless_for_disk(&val);
687 pn->keys[1] = rn->keys[0];
688 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
690 unlock_block(s->info, left);
691 unlock_block(s->info, right);
695 static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
696 struct dm_btree_value_type *vt,
697 uint64_t key, unsigned *index)
699 int r, i = *index, top = 1;
700 struct btree_node *node;
703 r = shadow_step(s, root, vt);
707 node = dm_block_data(shadow_current(s));
710 * We have to patch up the parent node, ugly, but I don't
711 * see a way to do this automatically as part of the spine
714 if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
715 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
717 __dm_bless_for_disk(&location);
718 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
719 &location, sizeof(__le64));
722 node = dm_block_data(shadow_current(s));
724 if (node->header.nr_entries == node->header.max_entries) {
726 r = btree_split_beneath(s, key);
728 r = btree_split_sibling(s, i, key);
734 node = dm_block_data(shadow_current(s));
736 i = lower_bound(node, key);
738 if (le32_to_cpu(node->header.flags) & LEAF_NODE)
742 /* change the bounds on the lowest key */
743 node->keys[0] = cpu_to_le64(key);
747 root = value64(node, i);
751 if (i < 0 || le64_to_cpu(node->keys[i]) != key)
758 static bool need_insert(struct btree_node *node, uint64_t *keys,
759 unsigned level, unsigned index)
761 return ((index >= le32_to_cpu(node->header.nr_entries)) ||
762 (le64_to_cpu(node->keys[index]) != keys[level]));
765 static int insert(struct dm_btree_info *info, dm_block_t root,
766 uint64_t *keys, void *value, dm_block_t *new_root,
768 __dm_written_to_disk(value)
771 unsigned level, index = -1, last_level = info->levels - 1;
772 dm_block_t block = root;
773 struct shadow_spine spine;
774 struct btree_node *n;
775 struct dm_btree_value_type le64_type;
777 init_le64_type(info->tm, &le64_type);
778 init_shadow_spine(&spine, info);
780 for (level = 0; level < (info->levels - 1); level++) {
781 r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
785 n = dm_block_data(shadow_current(&spine));
787 if (need_insert(n, keys, level, index)) {
791 r = dm_btree_empty(info, &new_tree);
795 new_le = cpu_to_le64(new_tree);
796 __dm_bless_for_disk(&new_le);
798 r = insert_at(sizeof(uint64_t), n, index,
799 keys[level], &new_le);
804 if (level < last_level)
805 block = value64(n, index);
808 r = btree_insert_raw(&spine, block, &info->value_type,
809 keys[level], &index);
813 n = dm_block_data(shadow_current(&spine));
815 if (need_insert(n, keys, level, index)) {
819 r = insert_at(info->value_type.size, n, index,
827 if (info->value_type.dec &&
828 (!info->value_type.equal ||
829 !info->value_type.equal(
830 info->value_type.context,
833 info->value_type.dec(info->value_type.context,
834 value_ptr(n, index));
836 memcpy_disk(value_ptr(n, index),
837 value, info->value_type.size);
840 *new_root = shadow_root(&spine);
841 exit_shadow_spine(&spine);
846 __dm_unbless_for_disk(value);
848 exit_shadow_spine(&spine);
852 int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
853 uint64_t *keys, void *value, dm_block_t *new_root)
854 __dm_written_to_disk(value)
856 return insert(info, root, keys, value, new_root, NULL);
858 EXPORT_SYMBOL_GPL(dm_btree_insert);
860 int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
861 uint64_t *keys, void *value, dm_block_t *new_root,
863 __dm_written_to_disk(value)
865 return insert(info, root, keys, value, new_root, inserted);
867 EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
869 /*----------------------------------------------------------------*/
871 static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
872 uint64_t *result_key, dm_block_t *next_block)
878 r = ro_step(s, block);
882 flags = le32_to_cpu(ro_node(s)->header.flags);
883 i = le32_to_cpu(ro_node(s)->header.nr_entries);
890 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
892 *result_key = le64_to_cpu(ro_node(s)->keys[0]);
894 if (next_block || flags & INTERNAL_NODE) {
896 block = value64(ro_node(s), i);
898 block = value64(ro_node(s), 0);
901 } while (flags & INTERNAL_NODE);
908 static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
909 bool find_highest, uint64_t *result_keys)
911 int r = 0, count = 0, level;
912 struct ro_spine spine;
914 init_ro_spine(&spine, info);
915 for (level = 0; level < info->levels; level++) {
916 r = find_key(&spine, root, find_highest, result_keys + level,
917 level == info->levels - 1 ? NULL : &root);
927 exit_ro_spine(&spine);
929 return r ? r : count;
932 int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
933 uint64_t *result_keys)
935 return dm_btree_find_key(info, root, true, result_keys);
937 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
939 int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
940 uint64_t *result_keys)
942 return dm_btree_find_key(info, root, false, result_keys);
944 EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
946 /*----------------------------------------------------------------*/
949 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
950 * space. Also this only works for single level trees.
952 static int walk_node(struct dm_btree_info *info, dm_block_t block,
953 int (*fn)(void *context, uint64_t *keys, void *leaf),
958 struct dm_block *node;
959 struct btree_node *n;
962 r = bn_read_lock(info, block, &node);
966 n = dm_block_data(node);
968 nr = le32_to_cpu(n->header.nr_entries);
969 for (i = 0; i < nr; i++) {
970 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
971 r = walk_node(info, value64(n, i), fn, context);
975 keys = le64_to_cpu(*key_ptr(n, i));
976 r = fn(context, &keys, value_ptr(n, i));
983 dm_tm_unlock(info->tm, node);
987 int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
988 int (*fn)(void *context, uint64_t *keys, void *leaf),
991 BUG_ON(info->levels > 1);
992 return walk_node(info, root, fn, context);
994 EXPORT_SYMBOL_GPL(dm_btree_walk);
996 /*----------------------------------------------------------------*/
998 static void prefetch_values(struct dm_btree_cursor *c)
1002 struct cursor_node *n = c->nodes + c->depth - 1;
1003 struct btree_node *bn = dm_block_data(n->b);
1004 struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
1006 BUG_ON(c->info->value_type.size != sizeof(value_le));
1008 nr = le32_to_cpu(bn->header.nr_entries);
1009 for (i = 0; i < nr; i++) {
1010 memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
1011 dm_bm_prefetch(bm, le64_to_cpu(value_le));
1015 static bool leaf_node(struct dm_btree_cursor *c)
1017 struct cursor_node *n = c->nodes + c->depth - 1;
1018 struct btree_node *bn = dm_block_data(n->b);
1020 return le32_to_cpu(bn->header.flags) & LEAF_NODE;
1023 static int push_node(struct dm_btree_cursor *c, dm_block_t b)
1026 struct cursor_node *n = c->nodes + c->depth;
1028 if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
1029 DMERR("couldn't push cursor node, stack depth too high");
1033 r = bn_read_lock(c->info, b, &n->b);
1040 if (c->prefetch_leaves || !leaf_node(c))
1046 static void pop_node(struct dm_btree_cursor *c)
1049 unlock_block(c->info, c->nodes[c->depth].b);
1052 static int inc_or_backtrack(struct dm_btree_cursor *c)
1054 struct cursor_node *n;
1055 struct btree_node *bn;
1061 n = c->nodes + c->depth - 1;
1062 bn = dm_block_data(n->b);
1065 if (n->index < le32_to_cpu(bn->header.nr_entries))
1074 static int find_leaf(struct dm_btree_cursor *c)
1077 struct cursor_node *n;
1078 struct btree_node *bn;
1082 n = c->nodes + c->depth - 1;
1083 bn = dm_block_data(n->b);
1085 if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
1088 memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
1089 r = push_node(c, le64_to_cpu(value_le));
1091 DMERR("push_node failed");
1096 if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
1102 int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
1103 bool prefetch_leaves, struct dm_btree_cursor *c)
1110 c->prefetch_leaves = prefetch_leaves;
1112 r = push_node(c, root);
1116 return find_leaf(c);
1118 EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
1120 void dm_btree_cursor_end(struct dm_btree_cursor *c)
1125 EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
1127 int dm_btree_cursor_next(struct dm_btree_cursor *c)
1129 int r = inc_or_backtrack(c);
1133 DMERR("find_leaf failed");
1138 EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
1140 int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count)
1144 while (count-- && !r)
1145 r = dm_btree_cursor_next(c);
1149 EXPORT_SYMBOL_GPL(dm_btree_cursor_skip);
1151 int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
1154 struct cursor_node *n = c->nodes + c->depth - 1;
1155 struct btree_node *bn = dm_block_data(n->b);
1157 if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
1160 *key = le64_to_cpu(*key_ptr(bn, n->index));
1161 memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
1167 EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);