2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Koji Sato.
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/pagevec.h>
30 static void __nilfs_btree_init(struct nilfs_bmap *bmap);
32 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
34 struct nilfs_btree_path *path;
35 int level = NILFS_BTREE_LEVEL_DATA;
37 path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
41 for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
42 path[level].bp_bh = NULL;
43 path[level].bp_sib_bh = NULL;
44 path[level].bp_index = 0;
45 path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
46 path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
47 path[level].bp_op = NULL;
54 static void nilfs_btree_free_path(struct nilfs_btree_path *path)
56 int level = NILFS_BTREE_LEVEL_DATA;
58 for (; level < NILFS_BTREE_LEVEL_MAX; level++)
59 brelse(path[level].bp_bh);
61 kmem_cache_free(nilfs_btree_path_cache, path);
65 * B-tree node operations
67 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
68 __u64 ptr, struct buffer_head **bhp)
70 struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
71 struct address_space *btnc = btnc_inode->i_mapping;
72 struct buffer_head *bh;
74 bh = nilfs_btnode_create_block(btnc, ptr);
78 set_buffer_nilfs_volatile(bh);
83 static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
85 return node->bn_flags;
89 nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
91 node->bn_flags = flags;
94 static int nilfs_btree_node_root(const struct nilfs_btree_node *node)
96 return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
99 static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
101 return node->bn_level;
105 nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
107 node->bn_level = level;
110 static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
112 return le16_to_cpu(node->bn_nchildren);
116 nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
118 node->bn_nchildren = cpu_to_le16(nchildren);
121 static int nilfs_btree_node_size(const struct nilfs_bmap *btree)
123 return i_blocksize(btree->b_inode);
126 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree)
128 return btree->b_nchildren_per_block;
132 nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
134 return (__le64 *)((char *)(node + 1) +
135 (nilfs_btree_node_root(node) ?
136 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
140 nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax)
142 return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax);
146 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
148 return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index));
152 nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
154 *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key);
158 nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index,
161 return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index));
165 nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr,
168 *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr);
171 static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags,
172 int level, int nchildren, int ncmax,
173 const __u64 *keys, const __u64 *ptrs)
179 nilfs_btree_node_set_flags(node, flags);
180 nilfs_btree_node_set_level(node, level);
181 nilfs_btree_node_set_nchildren(node, nchildren);
183 dkeys = nilfs_btree_node_dkeys(node);
184 dptrs = nilfs_btree_node_dptrs(node, ncmax);
185 for (i = 0; i < nchildren; i++) {
186 dkeys[i] = cpu_to_le64(keys[i]);
187 dptrs[i] = cpu_to_le64(ptrs[i]);
191 /* Assume the buffer heads corresponding to left and right are locked. */
192 static void nilfs_btree_node_move_left(struct nilfs_btree_node *left,
193 struct nilfs_btree_node *right,
194 int n, int lncmax, int rncmax)
196 __le64 *ldkeys, *rdkeys;
197 __le64 *ldptrs, *rdptrs;
198 int lnchildren, rnchildren;
200 ldkeys = nilfs_btree_node_dkeys(left);
201 ldptrs = nilfs_btree_node_dptrs(left, lncmax);
202 lnchildren = nilfs_btree_node_get_nchildren(left);
204 rdkeys = nilfs_btree_node_dkeys(right);
205 rdptrs = nilfs_btree_node_dptrs(right, rncmax);
206 rnchildren = nilfs_btree_node_get_nchildren(right);
208 memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
209 memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs));
210 memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys));
211 memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs));
215 nilfs_btree_node_set_nchildren(left, lnchildren);
216 nilfs_btree_node_set_nchildren(right, rnchildren);
219 /* Assume that the buffer heads corresponding to left and right are locked. */
220 static void nilfs_btree_node_move_right(struct nilfs_btree_node *left,
221 struct nilfs_btree_node *right,
222 int n, int lncmax, int rncmax)
224 __le64 *ldkeys, *rdkeys;
225 __le64 *ldptrs, *rdptrs;
226 int lnchildren, rnchildren;
228 ldkeys = nilfs_btree_node_dkeys(left);
229 ldptrs = nilfs_btree_node_dptrs(left, lncmax);
230 lnchildren = nilfs_btree_node_get_nchildren(left);
232 rdkeys = nilfs_btree_node_dkeys(right);
233 rdptrs = nilfs_btree_node_dptrs(right, rncmax);
234 rnchildren = nilfs_btree_node_get_nchildren(right);
236 memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
237 memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs));
238 memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys));
239 memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs));
243 nilfs_btree_node_set_nchildren(left, lnchildren);
244 nilfs_btree_node_set_nchildren(right, rnchildren);
247 /* Assume that the buffer head corresponding to node is locked. */
248 static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index,
249 __u64 key, __u64 ptr, int ncmax)
255 dkeys = nilfs_btree_node_dkeys(node);
256 dptrs = nilfs_btree_node_dptrs(node, ncmax);
257 nchildren = nilfs_btree_node_get_nchildren(node);
258 if (index < nchildren) {
259 memmove(dkeys + index + 1, dkeys + index,
260 (nchildren - index) * sizeof(*dkeys));
261 memmove(dptrs + index + 1, dptrs + index,
262 (nchildren - index) * sizeof(*dptrs));
264 dkeys[index] = cpu_to_le64(key);
265 dptrs[index] = cpu_to_le64(ptr);
267 nilfs_btree_node_set_nchildren(node, nchildren);
270 /* Assume that the buffer head corresponding to node is locked. */
271 static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index,
272 __u64 *keyp, __u64 *ptrp, int ncmax)
280 dkeys = nilfs_btree_node_dkeys(node);
281 dptrs = nilfs_btree_node_dptrs(node, ncmax);
282 key = le64_to_cpu(dkeys[index]);
283 ptr = le64_to_cpu(dptrs[index]);
284 nchildren = nilfs_btree_node_get_nchildren(node);
290 if (index < nchildren - 1) {
291 memmove(dkeys + index, dkeys + index + 1,
292 (nchildren - index - 1) * sizeof(*dkeys));
293 memmove(dptrs + index, dptrs + index + 1,
294 (nchildren - index - 1) * sizeof(*dptrs));
297 nilfs_btree_node_set_nchildren(node, nchildren);
300 static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
301 __u64 key, int *indexp)
304 int index, low, high, s;
308 high = nilfs_btree_node_get_nchildren(node) - 1;
311 while (low <= high) {
312 index = (low + high) / 2;
313 nkey = nilfs_btree_node_get_key(node, index);
317 } else if (nkey < key) {
327 if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) {
328 if (s > 0 && index > 0)
340 * nilfs_btree_node_broken - verify consistency of btree node
341 * @node: btree node block to be examined
342 * @size: node size (in bytes)
343 * @inode: host inode of btree
344 * @blocknr: block number
346 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
348 static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
349 size_t size, struct inode *inode,
352 int level, flags, nchildren;
355 level = nilfs_btree_node_get_level(node);
356 flags = nilfs_btree_node_get_flags(node);
357 nchildren = nilfs_btree_node_get_nchildren(node);
359 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
360 level >= NILFS_BTREE_LEVEL_MAX ||
361 (flags & NILFS_BTREE_NODE_ROOT) ||
363 nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
364 nilfs_msg(inode->i_sb, KERN_CRIT,
365 "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
366 inode->i_ino, (unsigned long long)blocknr, level,
374 * nilfs_btree_root_broken - verify consistency of btree root node
375 * @node: btree root node to be examined
376 * @inode: host inode of btree
378 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
380 static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
383 int level, flags, nchildren;
386 level = nilfs_btree_node_get_level(node);
387 flags = nilfs_btree_node_get_flags(node);
388 nchildren = nilfs_btree_node_get_nchildren(node);
390 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
391 level >= NILFS_BTREE_LEVEL_MAX ||
393 nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
394 nilfs_msg(inode->i_sb, KERN_CRIT,
395 "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
396 inode->i_ino, level, flags, nchildren);
402 int nilfs_btree_broken_node_block(struct buffer_head *bh)
407 if (buffer_nilfs_checked(bh))
410 inode = bh->b_page->mapping->host;
411 ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
412 bh->b_size, inode, bh->b_blocknr);
414 set_buffer_nilfs_checked(bh);
418 static struct nilfs_btree_node *
419 nilfs_btree_get_root(const struct nilfs_bmap *btree)
421 return (struct nilfs_btree_node *)btree->b_u.u_data;
424 static struct nilfs_btree_node *
425 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level)
427 return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
430 static struct nilfs_btree_node *
431 nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level)
433 return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
436 static int nilfs_btree_height(const struct nilfs_bmap *btree)
438 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1;
441 static struct nilfs_btree_node *
442 nilfs_btree_get_node(const struct nilfs_bmap *btree,
443 const struct nilfs_btree_path *path,
444 int level, int *ncmaxp)
446 struct nilfs_btree_node *node;
448 if (level == nilfs_btree_height(btree) - 1) {
449 node = nilfs_btree_get_root(btree);
450 *ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX;
452 node = nilfs_btree_get_nonroot_node(path, level);
453 *ncmaxp = nilfs_btree_nchildren_per_block(btree);
458 static int nilfs_btree_bad_node(const struct nilfs_bmap *btree,
459 struct nilfs_btree_node *node, int level)
461 if (unlikely(nilfs_btree_node_get_level(node) != level)) {
463 nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
464 "btree level mismatch (ino=%lu): %d != %d",
465 btree->b_inode->i_ino,
466 nilfs_btree_node_get_level(node), level);
472 struct nilfs_btree_readahead_info {
473 struct nilfs_btree_node *node; /* parent node */
474 int max_ra_blocks; /* max nof blocks to read ahead */
475 int index; /* current index on the parent node */
476 int ncmax; /* nof children in the parent node */
479 static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
480 struct buffer_head **bhp,
481 const struct nilfs_btree_readahead_info *ra)
483 struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
484 struct address_space *btnc = btnc_inode->i_mapping;
485 struct buffer_head *bh, *ra_bh;
486 sector_t submit_ptr = 0;
489 ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh,
492 if (likely(ret == -EEXIST))
494 if (ret == -ENOENT) {
496 * Block address translation failed due to invalid
497 * value of 'ptr'. In this case, return internal code
498 * -EINVAL (broken bmap) to notify bmap layer of fatal
499 * metadata corruption.
510 /* read ahead sibling nodes */
511 for (n = ra->max_ra_blocks, i = ra->index + 1;
512 n > 0 && i < ra->ncmax; n--, i++) {
513 ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
515 ret = nilfs_btnode_submit_block(btnc, ptr2, 0,
516 REQ_OP_READ, REQ_RAHEAD,
517 &ra_bh, &submit_ptr);
518 if (likely(!ret || ret == -EEXIST))
520 else if (ret != -EBUSY)
522 if (!buffer_locked(bh))
530 if (!buffer_uptodate(bh)) {
531 nilfs_msg(btree->b_inode->i_sb, KERN_ERR,
532 "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)",
533 btree->b_inode->i_ino, (unsigned long long)ptr);
539 if (nilfs_btree_broken_node_block(bh)) {
540 clear_buffer_uptodate(bh);
549 static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
550 struct buffer_head **bhp)
552 return __nilfs_btree_get_block(btree, ptr, bhp, NULL);
555 static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
556 struct nilfs_btree_path *path,
557 __u64 key, __u64 *ptrp, int minlevel,
560 struct nilfs_btree_node *node;
561 struct nilfs_btree_readahead_info p, *ra;
563 int level, index, found, ncmax, ret;
565 node = nilfs_btree_get_root(btree);
566 level = nilfs_btree_node_get_level(node);
567 if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0)
570 found = nilfs_btree_node_lookup(node, key, &index);
571 ptr = nilfs_btree_node_get_ptr(node, index,
572 NILFS_BTREE_ROOT_NCHILDREN_MAX);
573 path[level].bp_bh = NULL;
574 path[level].bp_index = index;
576 ncmax = nilfs_btree_nchildren_per_block(btree);
578 while (--level >= minlevel) {
580 if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) {
581 p.node = nilfs_btree_get_node(btree, path, level + 1,
587 ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh,
592 node = nilfs_btree_get_nonroot_node(path, level);
593 if (nilfs_btree_bad_node(btree, node, level))
596 found = nilfs_btree_node_lookup(node, key, &index);
600 ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
602 WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
604 ptr = NILFS_BMAP_INVALID_PTR;
606 path[level].bp_index = index;
617 static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
618 struct nilfs_btree_path *path,
619 __u64 *keyp, __u64 *ptrp)
621 struct nilfs_btree_node *node;
623 int index, level, ncmax, ret;
625 node = nilfs_btree_get_root(btree);
626 index = nilfs_btree_node_get_nchildren(node) - 1;
629 level = nilfs_btree_node_get_level(node);
630 ptr = nilfs_btree_node_get_ptr(node, index,
631 NILFS_BTREE_ROOT_NCHILDREN_MAX);
632 path[level].bp_bh = NULL;
633 path[level].bp_index = index;
634 ncmax = nilfs_btree_nchildren_per_block(btree);
636 for (level--; level > 0; level--) {
637 ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
640 node = nilfs_btree_get_nonroot_node(path, level);
641 if (nilfs_btree_bad_node(btree, node, level))
643 index = nilfs_btree_node_get_nchildren(node) - 1;
644 ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
645 path[level].bp_index = index;
649 *keyp = nilfs_btree_node_get_key(node, index);
657 * nilfs_btree_get_next_key - get next valid key from btree path array
658 * @btree: bmap struct of btree
659 * @path: array of nilfs_btree_path struct
660 * @minlevel: start level
661 * @nextkey: place to store the next valid key
663 * Return Value: If a next key was found, 0 is returned. Otherwise,
664 * -ENOENT is returned.
666 static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
667 const struct nilfs_btree_path *path,
668 int minlevel, __u64 *nextkey)
670 struct nilfs_btree_node *node;
671 int maxlevel = nilfs_btree_height(btree) - 1;
672 int index, next_adj, level;
674 /* Next index is already set to bp_index for leaf nodes. */
676 for (level = minlevel; level <= maxlevel; level++) {
677 if (level == maxlevel)
678 node = nilfs_btree_get_root(btree);
680 node = nilfs_btree_get_nonroot_node(path, level);
682 index = path[level].bp_index + next_adj;
683 if (index < nilfs_btree_node_get_nchildren(node)) {
684 /* Next key is in this node */
685 *nextkey = nilfs_btree_node_get_key(node, index);
688 /* For non-leaf nodes, next index is stored at bp_index + 1. */
694 static int nilfs_btree_lookup(const struct nilfs_bmap *btree,
695 __u64 key, int level, __u64 *ptrp)
697 struct nilfs_btree_path *path;
700 path = nilfs_btree_alloc_path();
704 ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0);
706 nilfs_btree_free_path(path);
711 static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
712 __u64 key, __u64 *ptrp,
713 unsigned int maxblocks)
715 struct nilfs_btree_path *path;
716 struct nilfs_btree_node *node;
717 struct inode *dat = NULL;
720 int level = NILFS_BTREE_LEVEL_NODE_MIN;
721 int ret, cnt, index, maxlevel, ncmax;
722 struct nilfs_btree_readahead_info p;
724 path = nilfs_btree_alloc_path();
728 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1);
732 if (NILFS_BMAP_USE_VBN(btree)) {
733 dat = nilfs_bmap_get_dat(btree);
734 ret = nilfs_dat_translate(dat, ptr, &blocknr);
740 if (cnt == maxblocks)
743 maxlevel = nilfs_btree_height(btree) - 1;
744 node = nilfs_btree_get_node(btree, path, level, &ncmax);
745 index = path[level].bp_index + 1;
747 while (index < nilfs_btree_node_get_nchildren(node)) {
748 if (nilfs_btree_node_get_key(node, index) !=
751 ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax);
753 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
758 if (ptr2 != ptr + cnt || ++cnt == maxblocks)
763 if (level == maxlevel)
766 /* look-up right sibling node */
767 p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax);
768 p.index = path[level + 1].bp_index + 1;
770 if (p.index >= nilfs_btree_node_get_nchildren(p.node) ||
771 nilfs_btree_node_get_key(p.node, p.index) != key + cnt)
773 ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax);
774 path[level + 1].bp_index = p.index;
776 brelse(path[level].bp_bh);
777 path[level].bp_bh = NULL;
779 ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh,
783 node = nilfs_btree_get_nonroot_node(path, level);
784 ncmax = nilfs_btree_nchildren_per_block(btree);
786 path[level].bp_index = index;
792 nilfs_btree_free_path(path);
796 static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
797 struct nilfs_btree_path *path,
798 int level, __u64 key)
800 if (level < nilfs_btree_height(btree) - 1) {
802 nilfs_btree_node_set_key(
803 nilfs_btree_get_nonroot_node(path, level),
804 path[level].bp_index, key);
805 if (!buffer_dirty(path[level].bp_bh))
806 mark_buffer_dirty(path[level].bp_bh);
807 } while ((path[level].bp_index == 0) &&
808 (++level < nilfs_btree_height(btree) - 1));
812 if (level == nilfs_btree_height(btree) - 1) {
813 nilfs_btree_node_set_key(nilfs_btree_get_root(btree),
814 path[level].bp_index, key);
818 static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
819 struct nilfs_btree_path *path,
820 int level, __u64 *keyp, __u64 *ptrp)
822 struct nilfs_btree_node *node;
825 if (level < nilfs_btree_height(btree) - 1) {
826 node = nilfs_btree_get_nonroot_node(path, level);
827 ncblk = nilfs_btree_nchildren_per_block(btree);
828 nilfs_btree_node_insert(node, path[level].bp_index,
829 *keyp, *ptrp, ncblk);
830 if (!buffer_dirty(path[level].bp_bh))
831 mark_buffer_dirty(path[level].bp_bh);
833 if (path[level].bp_index == 0)
834 nilfs_btree_promote_key(btree, path, level + 1,
835 nilfs_btree_node_get_key(node,
838 node = nilfs_btree_get_root(btree);
839 nilfs_btree_node_insert(node, path[level].bp_index,
841 NILFS_BTREE_ROOT_NCHILDREN_MAX);
845 static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
846 struct nilfs_btree_path *path,
847 int level, __u64 *keyp, __u64 *ptrp)
849 struct nilfs_btree_node *node, *left;
850 int nchildren, lnchildren, n, move, ncblk;
852 node = nilfs_btree_get_nonroot_node(path, level);
853 left = nilfs_btree_get_sib_node(path, level);
854 nchildren = nilfs_btree_node_get_nchildren(node);
855 lnchildren = nilfs_btree_node_get_nchildren(left);
856 ncblk = nilfs_btree_nchildren_per_block(btree);
859 n = (nchildren + lnchildren + 1) / 2 - lnchildren;
860 if (n > path[level].bp_index) {
861 /* move insert point */
866 nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
868 if (!buffer_dirty(path[level].bp_bh))
869 mark_buffer_dirty(path[level].bp_bh);
870 if (!buffer_dirty(path[level].bp_sib_bh))
871 mark_buffer_dirty(path[level].bp_sib_bh);
873 nilfs_btree_promote_key(btree, path, level + 1,
874 nilfs_btree_node_get_key(node, 0));
877 brelse(path[level].bp_bh);
878 path[level].bp_bh = path[level].bp_sib_bh;
879 path[level].bp_sib_bh = NULL;
880 path[level].bp_index += lnchildren;
881 path[level + 1].bp_index--;
883 brelse(path[level].bp_sib_bh);
884 path[level].bp_sib_bh = NULL;
885 path[level].bp_index -= n;
888 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
891 static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
892 struct nilfs_btree_path *path,
893 int level, __u64 *keyp, __u64 *ptrp)
895 struct nilfs_btree_node *node, *right;
896 int nchildren, rnchildren, n, move, ncblk;
898 node = nilfs_btree_get_nonroot_node(path, level);
899 right = nilfs_btree_get_sib_node(path, level);
900 nchildren = nilfs_btree_node_get_nchildren(node);
901 rnchildren = nilfs_btree_node_get_nchildren(right);
902 ncblk = nilfs_btree_nchildren_per_block(btree);
905 n = (nchildren + rnchildren + 1) / 2 - rnchildren;
906 if (n > nchildren - path[level].bp_index) {
907 /* move insert point */
912 nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
914 if (!buffer_dirty(path[level].bp_bh))
915 mark_buffer_dirty(path[level].bp_bh);
916 if (!buffer_dirty(path[level].bp_sib_bh))
917 mark_buffer_dirty(path[level].bp_sib_bh);
919 path[level + 1].bp_index++;
920 nilfs_btree_promote_key(btree, path, level + 1,
921 nilfs_btree_node_get_key(right, 0));
922 path[level + 1].bp_index--;
925 brelse(path[level].bp_bh);
926 path[level].bp_bh = path[level].bp_sib_bh;
927 path[level].bp_sib_bh = NULL;
928 path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
929 path[level + 1].bp_index++;
931 brelse(path[level].bp_sib_bh);
932 path[level].bp_sib_bh = NULL;
935 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
938 static void nilfs_btree_split(struct nilfs_bmap *btree,
939 struct nilfs_btree_path *path,
940 int level, __u64 *keyp, __u64 *ptrp)
942 struct nilfs_btree_node *node, *right;
943 int nchildren, n, move, ncblk;
945 node = nilfs_btree_get_nonroot_node(path, level);
946 right = nilfs_btree_get_sib_node(path, level);
947 nchildren = nilfs_btree_node_get_nchildren(node);
948 ncblk = nilfs_btree_nchildren_per_block(btree);
951 n = (nchildren + 1) / 2;
952 if (n > nchildren - path[level].bp_index) {
957 nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
959 if (!buffer_dirty(path[level].bp_bh))
960 mark_buffer_dirty(path[level].bp_bh);
961 if (!buffer_dirty(path[level].bp_sib_bh))
962 mark_buffer_dirty(path[level].bp_sib_bh);
965 path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
966 nilfs_btree_node_insert(right, path[level].bp_index,
967 *keyp, *ptrp, ncblk);
969 *keyp = nilfs_btree_node_get_key(right, 0);
970 *ptrp = path[level].bp_newreq.bpr_ptr;
972 brelse(path[level].bp_bh);
973 path[level].bp_bh = path[level].bp_sib_bh;
974 path[level].bp_sib_bh = NULL;
976 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
978 *keyp = nilfs_btree_node_get_key(right, 0);
979 *ptrp = path[level].bp_newreq.bpr_ptr;
981 brelse(path[level].bp_sib_bh);
982 path[level].bp_sib_bh = NULL;
985 path[level + 1].bp_index++;
988 static void nilfs_btree_grow(struct nilfs_bmap *btree,
989 struct nilfs_btree_path *path,
990 int level, __u64 *keyp, __u64 *ptrp)
992 struct nilfs_btree_node *root, *child;
995 root = nilfs_btree_get_root(btree);
996 child = nilfs_btree_get_sib_node(path, level);
997 ncblk = nilfs_btree_nchildren_per_block(btree);
999 n = nilfs_btree_node_get_nchildren(root);
1001 nilfs_btree_node_move_right(root, child, n,
1002 NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
1003 nilfs_btree_node_set_level(root, level + 1);
1005 if (!buffer_dirty(path[level].bp_sib_bh))
1006 mark_buffer_dirty(path[level].bp_sib_bh);
1008 path[level].bp_bh = path[level].bp_sib_bh;
1009 path[level].bp_sib_bh = NULL;
1011 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
1013 *keyp = nilfs_btree_node_get_key(child, 0);
1014 *ptrp = path[level].bp_newreq.bpr_ptr;
1017 static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree,
1018 const struct nilfs_btree_path *path)
1020 struct nilfs_btree_node *node;
1024 return NILFS_BMAP_INVALID_PTR;
1027 level = NILFS_BTREE_LEVEL_NODE_MIN;
1028 if (path[level].bp_index > 0) {
1029 node = nilfs_btree_get_node(btree, path, level, &ncmax);
1030 return nilfs_btree_node_get_ptr(node,
1031 path[level].bp_index - 1,
1036 level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
1037 if (level <= nilfs_btree_height(btree) - 1) {
1038 node = nilfs_btree_get_node(btree, path, level, &ncmax);
1039 return nilfs_btree_node_get_ptr(node, path[level].bp_index,
1043 return NILFS_BMAP_INVALID_PTR;
1046 static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree,
1047 const struct nilfs_btree_path *path,
1052 ptr = nilfs_bmap_find_target_seq(btree, key);
1053 if (ptr != NILFS_BMAP_INVALID_PTR)
1054 /* sequential access */
1057 ptr = nilfs_btree_find_near(btree, path);
1058 if (ptr != NILFS_BMAP_INVALID_PTR)
1063 return nilfs_bmap_find_target_in_group(btree);
1066 static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
1067 struct nilfs_btree_path *path,
1068 int *levelp, __u64 key, __u64 ptr,
1069 struct nilfs_bmap_stats *stats)
1071 struct buffer_head *bh;
1072 struct nilfs_btree_node *node, *parent, *sib;
1074 int pindex, level, ncmax, ncblk, ret;
1075 struct inode *dat = NULL;
1077 stats->bs_nblocks = 0;
1078 level = NILFS_BTREE_LEVEL_DATA;
1080 /* allocate a new ptr for data block */
1081 if (NILFS_BMAP_USE_VBN(btree)) {
1082 path[level].bp_newreq.bpr_ptr =
1083 nilfs_btree_find_target_v(btree, path, key);
1084 dat = nilfs_bmap_get_dat(btree);
1087 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
1091 ncblk = nilfs_btree_nchildren_per_block(btree);
1093 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
1094 level < nilfs_btree_height(btree) - 1;
1096 node = nilfs_btree_get_nonroot_node(path, level);
1097 if (nilfs_btree_node_get_nchildren(node) < ncblk) {
1098 path[level].bp_op = nilfs_btree_do_insert;
1099 stats->bs_nblocks++;
1103 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1104 pindex = path[level + 1].bp_index;
1108 sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
1110 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1112 goto err_out_child_node;
1113 sib = (struct nilfs_btree_node *)bh->b_data;
1114 if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
1115 path[level].bp_sib_bh = bh;
1116 path[level].bp_op = nilfs_btree_carry_left;
1117 stats->bs_nblocks++;
1125 if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) {
1126 sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
1128 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1130 goto err_out_child_node;
1131 sib = (struct nilfs_btree_node *)bh->b_data;
1132 if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
1133 path[level].bp_sib_bh = bh;
1134 path[level].bp_op = nilfs_btree_carry_right;
1135 stats->bs_nblocks++;
1143 path[level].bp_newreq.bpr_ptr =
1144 path[level - 1].bp_newreq.bpr_ptr + 1;
1145 ret = nilfs_bmap_prepare_alloc_ptr(btree,
1146 &path[level].bp_newreq, dat);
1148 goto err_out_child_node;
1149 ret = nilfs_btree_get_new_block(btree,
1150 path[level].bp_newreq.bpr_ptr,
1153 goto err_out_curr_node;
1155 stats->bs_nblocks++;
1157 sib = (struct nilfs_btree_node *)bh->b_data;
1158 nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL);
1159 path[level].bp_sib_bh = bh;
1160 path[level].bp_op = nilfs_btree_split;
1164 node = nilfs_btree_get_root(btree);
1165 if (nilfs_btree_node_get_nchildren(node) <
1166 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1167 path[level].bp_op = nilfs_btree_do_insert;
1168 stats->bs_nblocks++;
1173 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1174 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
1176 goto err_out_child_node;
1177 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
1180 goto err_out_curr_node;
1182 nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data,
1183 0, level, 0, ncblk, NULL, NULL);
1184 path[level].bp_sib_bh = bh;
1185 path[level].bp_op = nilfs_btree_grow;
1188 path[level].bp_op = nilfs_btree_do_insert;
1190 /* a newly-created node block and a data block are added */
1191 stats->bs_nblocks += 2;
1200 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1202 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1203 nilfs_btnode_delete(path[level].bp_sib_bh);
1204 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1208 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1211 stats->bs_nblocks = 0;
1215 static void nilfs_btree_commit_insert(struct nilfs_bmap *btree,
1216 struct nilfs_btree_path *path,
1217 int maxlevel, __u64 key, __u64 ptr)
1219 struct inode *dat = NULL;
1222 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1223 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1224 if (NILFS_BMAP_USE_VBN(btree)) {
1225 nilfs_bmap_set_target_v(btree, key, ptr);
1226 dat = nilfs_bmap_get_dat(btree);
1229 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1230 nilfs_bmap_commit_alloc_ptr(btree,
1231 &path[level - 1].bp_newreq, dat);
1232 path[level].bp_op(btree, path, level, &key, &ptr);
1235 if (!nilfs_bmap_dirty(btree))
1236 nilfs_bmap_set_dirty(btree);
1239 static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
1241 struct nilfs_btree_path *path;
1242 struct nilfs_bmap_stats stats;
1245 path = nilfs_btree_alloc_path();
1249 ret = nilfs_btree_do_lookup(btree, path, key, NULL,
1250 NILFS_BTREE_LEVEL_NODE_MIN, 0);
1251 if (ret != -ENOENT) {
1257 ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats);
1260 nilfs_btree_commit_insert(btree, path, level, key, ptr);
1261 nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
1264 nilfs_btree_free_path(path);
1268 static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
1269 struct nilfs_btree_path *path,
1270 int level, __u64 *keyp, __u64 *ptrp)
1272 struct nilfs_btree_node *node;
1275 if (level < nilfs_btree_height(btree) - 1) {
1276 node = nilfs_btree_get_nonroot_node(path, level);
1277 ncblk = nilfs_btree_nchildren_per_block(btree);
1278 nilfs_btree_node_delete(node, path[level].bp_index,
1280 if (!buffer_dirty(path[level].bp_bh))
1281 mark_buffer_dirty(path[level].bp_bh);
1282 if (path[level].bp_index == 0)
1283 nilfs_btree_promote_key(btree, path, level + 1,
1284 nilfs_btree_node_get_key(node, 0));
1286 node = nilfs_btree_get_root(btree);
1287 nilfs_btree_node_delete(node, path[level].bp_index,
1289 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1293 static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
1294 struct nilfs_btree_path *path,
1295 int level, __u64 *keyp, __u64 *ptrp)
1297 struct nilfs_btree_node *node, *left;
1298 int nchildren, lnchildren, n, ncblk;
1300 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1302 node = nilfs_btree_get_nonroot_node(path, level);
1303 left = nilfs_btree_get_sib_node(path, level);
1304 nchildren = nilfs_btree_node_get_nchildren(node);
1305 lnchildren = nilfs_btree_node_get_nchildren(left);
1306 ncblk = nilfs_btree_nchildren_per_block(btree);
1308 n = (nchildren + lnchildren) / 2 - nchildren;
1310 nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
1312 if (!buffer_dirty(path[level].bp_bh))
1313 mark_buffer_dirty(path[level].bp_bh);
1314 if (!buffer_dirty(path[level].bp_sib_bh))
1315 mark_buffer_dirty(path[level].bp_sib_bh);
1317 nilfs_btree_promote_key(btree, path, level + 1,
1318 nilfs_btree_node_get_key(node, 0));
1320 brelse(path[level].bp_sib_bh);
1321 path[level].bp_sib_bh = NULL;
1322 path[level].bp_index += n;
1325 static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
1326 struct nilfs_btree_path *path,
1327 int level, __u64 *keyp, __u64 *ptrp)
1329 struct nilfs_btree_node *node, *right;
1330 int nchildren, rnchildren, n, ncblk;
1332 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1334 node = nilfs_btree_get_nonroot_node(path, level);
1335 right = nilfs_btree_get_sib_node(path, level);
1336 nchildren = nilfs_btree_node_get_nchildren(node);
1337 rnchildren = nilfs_btree_node_get_nchildren(right);
1338 ncblk = nilfs_btree_nchildren_per_block(btree);
1340 n = (nchildren + rnchildren) / 2 - nchildren;
1342 nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
1344 if (!buffer_dirty(path[level].bp_bh))
1345 mark_buffer_dirty(path[level].bp_bh);
1346 if (!buffer_dirty(path[level].bp_sib_bh))
1347 mark_buffer_dirty(path[level].bp_sib_bh);
1349 path[level + 1].bp_index++;
1350 nilfs_btree_promote_key(btree, path, level + 1,
1351 nilfs_btree_node_get_key(right, 0));
1352 path[level + 1].bp_index--;
1354 brelse(path[level].bp_sib_bh);
1355 path[level].bp_sib_bh = NULL;
1358 static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
1359 struct nilfs_btree_path *path,
1360 int level, __u64 *keyp, __u64 *ptrp)
1362 struct nilfs_btree_node *node, *left;
1365 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1367 node = nilfs_btree_get_nonroot_node(path, level);
1368 left = nilfs_btree_get_sib_node(path, level);
1369 ncblk = nilfs_btree_nchildren_per_block(btree);
1371 n = nilfs_btree_node_get_nchildren(node);
1373 nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
1375 if (!buffer_dirty(path[level].bp_sib_bh))
1376 mark_buffer_dirty(path[level].bp_sib_bh);
1378 nilfs_btnode_delete(path[level].bp_bh);
1379 path[level].bp_bh = path[level].bp_sib_bh;
1380 path[level].bp_sib_bh = NULL;
1381 path[level].bp_index += nilfs_btree_node_get_nchildren(left);
1384 static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
1385 struct nilfs_btree_path *path,
1386 int level, __u64 *keyp, __u64 *ptrp)
1388 struct nilfs_btree_node *node, *right;
1391 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1393 node = nilfs_btree_get_nonroot_node(path, level);
1394 right = nilfs_btree_get_sib_node(path, level);
1395 ncblk = nilfs_btree_nchildren_per_block(btree);
1397 n = nilfs_btree_node_get_nchildren(right);
1399 nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
1401 if (!buffer_dirty(path[level].bp_bh))
1402 mark_buffer_dirty(path[level].bp_bh);
1404 nilfs_btnode_delete(path[level].bp_sib_bh);
1405 path[level].bp_sib_bh = NULL;
1406 path[level + 1].bp_index++;
1409 static void nilfs_btree_shrink(struct nilfs_bmap *btree,
1410 struct nilfs_btree_path *path,
1411 int level, __u64 *keyp, __u64 *ptrp)
1413 struct nilfs_btree_node *root, *child;
1416 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1418 root = nilfs_btree_get_root(btree);
1419 child = nilfs_btree_get_nonroot_node(path, level);
1420 ncblk = nilfs_btree_nchildren_per_block(btree);
1422 nilfs_btree_node_delete(root, 0, NULL, NULL,
1423 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1424 nilfs_btree_node_set_level(root, level);
1425 n = nilfs_btree_node_get_nchildren(child);
1426 nilfs_btree_node_move_left(root, child, n,
1427 NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
1429 nilfs_btnode_delete(path[level].bp_bh);
1430 path[level].bp_bh = NULL;
1433 static void nilfs_btree_nop(struct nilfs_bmap *btree,
1434 struct nilfs_btree_path *path,
1435 int level, __u64 *keyp, __u64 *ptrp)
1439 static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1440 struct nilfs_btree_path *path,
1442 struct nilfs_bmap_stats *stats,
1445 struct buffer_head *bh;
1446 struct nilfs_btree_node *node, *parent, *sib;
1448 int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
1451 stats->bs_nblocks = 0;
1452 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
1453 ncblk = nilfs_btree_nchildren_per_block(btree);
1455 for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
1456 level < nilfs_btree_height(btree) - 1;
1458 node = nilfs_btree_get_nonroot_node(path, level);
1459 path[level].bp_oldreq.bpr_ptr =
1460 nilfs_btree_node_get_ptr(node, dindex, ncblk);
1461 ret = nilfs_bmap_prepare_end_ptr(btree,
1462 &path[level].bp_oldreq, dat);
1464 goto err_out_child_node;
1466 if (nilfs_btree_node_get_nchildren(node) > ncmin) {
1467 path[level].bp_op = nilfs_btree_do_delete;
1468 stats->bs_nblocks++;
1472 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1473 pindex = path[level + 1].bp_index;
1478 sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
1480 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1482 goto err_out_curr_node;
1483 sib = (struct nilfs_btree_node *)bh->b_data;
1484 if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
1485 path[level].bp_sib_bh = bh;
1486 path[level].bp_op = nilfs_btree_borrow_left;
1487 stats->bs_nblocks++;
1490 path[level].bp_sib_bh = bh;
1491 path[level].bp_op = nilfs_btree_concat_left;
1492 stats->bs_nblocks++;
1496 nilfs_btree_node_get_nchildren(parent) - 1) {
1498 sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
1500 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1502 goto err_out_curr_node;
1503 sib = (struct nilfs_btree_node *)bh->b_data;
1504 if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
1505 path[level].bp_sib_bh = bh;
1506 path[level].bp_op = nilfs_btree_borrow_right;
1507 stats->bs_nblocks++;
1510 path[level].bp_sib_bh = bh;
1511 path[level].bp_op = nilfs_btree_concat_right;
1512 stats->bs_nblocks++;
1514 * When merging right sibling node
1515 * into the current node, pointer to
1516 * the right sibling node must be
1517 * terminated instead. The adjustment
1518 * below is required for that.
1520 dindex = pindex + 1;
1525 /* the only child of the root node */
1526 WARN_ON(level != nilfs_btree_height(btree) - 2);
1527 if (nilfs_btree_node_get_nchildren(node) - 1 <=
1528 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1529 path[level].bp_op = nilfs_btree_shrink;
1530 stats->bs_nblocks += 2;
1532 path[level].bp_op = nilfs_btree_nop;
1533 goto shrink_root_child;
1535 path[level].bp_op = nilfs_btree_do_delete;
1536 stats->bs_nblocks++;
1542 /* child of the root node is deleted */
1543 path[level].bp_op = nilfs_btree_do_delete;
1544 stats->bs_nblocks++;
1547 node = nilfs_btree_get_root(btree);
1548 path[level].bp_oldreq.bpr_ptr =
1549 nilfs_btree_node_get_ptr(node, dindex,
1550 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1552 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
1554 goto err_out_child_node;
1563 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
1565 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1566 brelse(path[level].bp_sib_bh);
1567 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
1570 stats->bs_nblocks = 0;
1574 static void nilfs_btree_commit_delete(struct nilfs_bmap *btree,
1575 struct nilfs_btree_path *path,
1576 int maxlevel, struct inode *dat)
1580 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1581 nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat);
1582 path[level].bp_op(btree, path, level, NULL, NULL);
1585 if (!nilfs_bmap_dirty(btree))
1586 nilfs_bmap_set_dirty(btree);
1589 static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
1592 struct nilfs_btree_path *path;
1593 struct nilfs_bmap_stats stats;
1597 path = nilfs_btree_alloc_path();
1601 ret = nilfs_btree_do_lookup(btree, path, key, NULL,
1602 NILFS_BTREE_LEVEL_NODE_MIN, 0);
1607 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
1609 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
1612 nilfs_btree_commit_delete(btree, path, level, dat);
1613 nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks);
1616 nilfs_btree_free_path(path);
1620 static int nilfs_btree_seek_key(const struct nilfs_bmap *btree, __u64 start,
1623 struct nilfs_btree_path *path;
1624 const int minlevel = NILFS_BTREE_LEVEL_NODE_MIN;
1627 path = nilfs_btree_alloc_path();
1631 ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0);
1634 else if (ret == -ENOENT)
1635 ret = nilfs_btree_get_next_key(btree, path, minlevel, keyp);
1637 nilfs_btree_free_path(path);
1641 static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp)
1643 struct nilfs_btree_path *path;
1646 path = nilfs_btree_alloc_path();
1650 ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
1652 nilfs_btree_free_path(path);
1657 static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
1659 struct buffer_head *bh;
1660 struct nilfs_btree_node *root, *node;
1661 __u64 maxkey, nextmaxkey;
1665 root = nilfs_btree_get_root(btree);
1666 switch (nilfs_btree_height(btree)) {
1672 nchildren = nilfs_btree_node_get_nchildren(root);
1675 ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
1676 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1677 ret = nilfs_btree_get_block(btree, ptr, &bh);
1680 node = (struct nilfs_btree_node *)bh->b_data;
1686 nchildren = nilfs_btree_node_get_nchildren(node);
1687 maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
1688 nextmaxkey = (nchildren > 1) ?
1689 nilfs_btree_node_get_key(node, nchildren - 2) : 0;
1693 return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
1696 static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
1697 __u64 *keys, __u64 *ptrs, int nitems)
1699 struct buffer_head *bh;
1700 struct nilfs_btree_node *node, *root;
1704 int nchildren, ncmax, i, ret;
1706 root = nilfs_btree_get_root(btree);
1707 switch (nilfs_btree_height(btree)) {
1711 ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX;
1714 nchildren = nilfs_btree_node_get_nchildren(root);
1715 WARN_ON(nchildren > 1);
1716 ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
1717 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1718 ret = nilfs_btree_get_block(btree, ptr, &bh);
1721 node = (struct nilfs_btree_node *)bh->b_data;
1722 ncmax = nilfs_btree_nchildren_per_block(btree);
1729 nchildren = nilfs_btree_node_get_nchildren(node);
1730 if (nchildren < nitems)
1732 dkeys = nilfs_btree_node_dkeys(node);
1733 dptrs = nilfs_btree_node_dptrs(node, ncmax);
1734 for (i = 0; i < nitems; i++) {
1735 keys[i] = le64_to_cpu(dkeys[i]);
1736 ptrs[i] = le64_to_cpu(dptrs[i]);
1746 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
1747 union nilfs_bmap_ptr_req *dreq,
1748 union nilfs_bmap_ptr_req *nreq,
1749 struct buffer_head **bhp,
1750 struct nilfs_bmap_stats *stats)
1752 struct buffer_head *bh;
1753 struct inode *dat = NULL;
1756 stats->bs_nblocks = 0;
1759 /* cannot find near ptr */
1760 if (NILFS_BMAP_USE_VBN(btree)) {
1761 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
1762 dat = nilfs_bmap_get_dat(btree);
1765 ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
1769 ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
1774 stats->bs_nblocks++;
1776 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1777 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat);
1781 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
1786 stats->bs_nblocks++;
1794 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat);
1796 nilfs_bmap_abort_alloc_ptr(btree, dreq, dat);
1797 stats->bs_nblocks = 0;
1803 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
1804 __u64 key, __u64 ptr,
1805 const __u64 *keys, const __u64 *ptrs,
1807 union nilfs_bmap_ptr_req *dreq,
1808 union nilfs_bmap_ptr_req *nreq,
1809 struct buffer_head *bh)
1811 struct nilfs_btree_node *node;
1816 /* free resources */
1817 if (btree->b_ops->bop_clear != NULL)
1818 btree->b_ops->bop_clear(btree);
1820 /* ptr must be a pointer to a buffer head. */
1821 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1823 /* convert and insert */
1824 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
1825 __nilfs_btree_init(btree);
1827 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
1828 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
1830 /* create child node at level 1 */
1831 node = (struct nilfs_btree_node *)bh->b_data;
1832 ncblk = nilfs_btree_nchildren_per_block(btree);
1833 nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
1834 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
1835 if (!buffer_dirty(bh))
1836 mark_buffer_dirty(bh);
1837 if (!nilfs_bmap_dirty(btree))
1838 nilfs_bmap_set_dirty(btree);
1842 /* create root node at level 2 */
1843 node = nilfs_btree_get_root(btree);
1844 tmpptr = nreq->bpr_ptr;
1845 nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1,
1846 NILFS_BTREE_ROOT_NCHILDREN_MAX,
1849 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
1851 /* create root node at level 1 */
1852 node = nilfs_btree_get_root(btree);
1853 nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n,
1854 NILFS_BTREE_ROOT_NCHILDREN_MAX,
1856 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr,
1857 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1858 if (!nilfs_bmap_dirty(btree))
1859 nilfs_bmap_set_dirty(btree);
1862 if (NILFS_BMAP_USE_VBN(btree))
1863 nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr);
1867 * nilfs_btree_convert_and_insert -
1875 int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
1876 __u64 key, __u64 ptr,
1877 const __u64 *keys, const __u64 *ptrs, int n)
1879 struct buffer_head *bh = NULL;
1880 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni;
1881 struct nilfs_bmap_stats stats;
1884 if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1887 } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1888 nilfs_btree_node_size(btree))) {
1897 ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh,
1901 nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
1903 nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
1907 static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
1908 struct nilfs_btree_path *path,
1910 struct buffer_head *bh)
1912 while ((++level < nilfs_btree_height(btree) - 1) &&
1913 !buffer_dirty(path[level].bp_bh))
1914 mark_buffer_dirty(path[level].bp_bh);
1919 static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
1920 struct nilfs_btree_path *path,
1921 int level, struct inode *dat)
1923 struct nilfs_btree_node *parent;
1926 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1927 path[level].bp_oldreq.bpr_ptr =
1928 nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
1930 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1931 ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
1932 &path[level].bp_newreq.bpr_req);
1936 if (buffer_nilfs_node(path[level].bp_bh)) {
1937 path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr;
1938 path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
1939 path[level].bp_ctxt.bh = path[level].bp_bh;
1940 ret = nilfs_btnode_prepare_change_key(
1941 NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1942 &path[level].bp_ctxt);
1944 nilfs_dat_abort_update(dat,
1945 &path[level].bp_oldreq.bpr_req,
1946 &path[level].bp_newreq.bpr_req);
1954 static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
1955 struct nilfs_btree_path *path,
1956 int level, struct inode *dat)
1958 struct nilfs_btree_node *parent;
1961 nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
1962 &path[level].bp_newreq.bpr_req,
1963 btree->b_ptr_type == NILFS_BMAP_PTR_VS);
1965 if (buffer_nilfs_node(path[level].bp_bh)) {
1966 nilfs_btnode_commit_change_key(
1967 NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1968 &path[level].bp_ctxt);
1969 path[level].bp_bh = path[level].bp_ctxt.bh;
1971 set_buffer_nilfs_volatile(path[level].bp_bh);
1973 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1974 nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index,
1975 path[level].bp_newreq.bpr_ptr, ncmax);
1978 static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
1979 struct nilfs_btree_path *path,
1980 int level, struct inode *dat)
1982 nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
1983 &path[level].bp_newreq.bpr_req);
1984 if (buffer_nilfs_node(path[level].bp_bh))
1985 nilfs_btnode_abort_change_key(
1986 NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1987 &path[level].bp_ctxt);
1990 static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree,
1991 struct nilfs_btree_path *path,
1992 int minlevel, int *maxlevelp,
1998 if (!buffer_nilfs_volatile(path[level].bp_bh)) {
1999 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
2003 while ((++level < nilfs_btree_height(btree) - 1) &&
2004 !buffer_dirty(path[level].bp_bh)) {
2006 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
2007 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
2013 *maxlevelp = level - 1;
2018 while (--level > minlevel)
2019 nilfs_btree_abort_update_v(btree, path, level, dat);
2020 if (!buffer_nilfs_volatile(path[level].bp_bh))
2021 nilfs_btree_abort_update_v(btree, path, level, dat);
2025 static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree,
2026 struct nilfs_btree_path *path,
2027 int minlevel, int maxlevel,
2028 struct buffer_head *bh,
2033 if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
2034 nilfs_btree_commit_update_v(btree, path, minlevel, dat);
2036 for (level = minlevel + 1; level <= maxlevel; level++)
2037 nilfs_btree_commit_update_v(btree, path, level, dat);
2040 static int nilfs_btree_propagate_v(struct nilfs_bmap *btree,
2041 struct nilfs_btree_path *path,
2042 int level, struct buffer_head *bh)
2044 int maxlevel = 0, ret;
2045 struct nilfs_btree_node *parent;
2046 struct inode *dat = nilfs_bmap_get_dat(btree);
2051 path[level].bp_bh = bh;
2052 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
2057 if (buffer_nilfs_volatile(path[level].bp_bh)) {
2058 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
2059 ptr = nilfs_btree_node_get_ptr(parent,
2060 path[level + 1].bp_index,
2062 ret = nilfs_dat_mark_dirty(dat, ptr);
2067 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
2070 brelse(path[level].bp_bh);
2071 path[level].bp_bh = NULL;
2075 static int nilfs_btree_propagate(struct nilfs_bmap *btree,
2076 struct buffer_head *bh)
2078 struct nilfs_btree_path *path;
2079 struct nilfs_btree_node *node;
2083 WARN_ON(!buffer_dirty(bh));
2085 path = nilfs_btree_alloc_path();
2089 if (buffer_nilfs_node(bh)) {
2090 node = (struct nilfs_btree_node *)bh->b_data;
2091 key = nilfs_btree_node_get_key(node, 0);
2092 level = nilfs_btree_node_get_level(node);
2094 key = nilfs_bmap_data_get_key(btree, bh);
2095 level = NILFS_BTREE_LEVEL_DATA;
2098 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
2100 if (unlikely(ret == -ENOENT))
2101 nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
2102 "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
2103 btree->b_inode->i_ino,
2104 (unsigned long long)key, level);
2108 ret = NILFS_BMAP_USE_VBN(btree) ?
2109 nilfs_btree_propagate_v(btree, path, level, bh) :
2110 nilfs_btree_propagate_p(btree, path, level, bh);
2113 nilfs_btree_free_path(path);
2118 static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree,
2119 struct buffer_head *bh)
2121 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr);
2124 static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
2125 struct list_head *lists,
2126 struct buffer_head *bh)
2128 struct list_head *head;
2129 struct buffer_head *cbh;
2130 struct nilfs_btree_node *node, *cnode;
2135 node = (struct nilfs_btree_node *)bh->b_data;
2136 key = nilfs_btree_node_get_key(node, 0);
2137 level = nilfs_btree_node_get_level(node);
2138 if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
2139 level >= NILFS_BTREE_LEVEL_MAX) {
2141 nilfs_msg(btree->b_inode->i_sb, KERN_WARNING,
2142 "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
2143 level, (unsigned long long)key,
2144 btree->b_inode->i_ino,
2145 (unsigned long long)bh->b_blocknr);
2149 list_for_each(head, &lists[level]) {
2150 cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
2151 cnode = (struct nilfs_btree_node *)cbh->b_data;
2152 ckey = nilfs_btree_node_get_key(cnode, 0);
2156 list_add_tail(&bh->b_assoc_buffers, head);
2159 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
2160 struct list_head *listp)
2162 struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
2163 struct address_space *btcache = btnc_inode->i_mapping;
2164 struct list_head lists[NILFS_BTREE_LEVEL_MAX];
2165 struct pagevec pvec;
2166 struct buffer_head *bh, *head;
2170 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
2171 level < NILFS_BTREE_LEVEL_MAX;
2173 INIT_LIST_HEAD(&lists[level]);
2175 pagevec_init(&pvec, 0);
2177 while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
2179 for (i = 0; i < pagevec_count(&pvec); i++) {
2180 bh = head = page_buffers(pvec.pages[i]);
2182 if (buffer_dirty(bh))
2183 nilfs_btree_add_dirty_buffer(btree,
2185 } while ((bh = bh->b_this_page) != head);
2187 pagevec_release(&pvec);
2191 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
2192 level < NILFS_BTREE_LEVEL_MAX;
2194 list_splice_tail(&lists[level], listp);
2197 static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
2198 struct nilfs_btree_path *path,
2200 struct buffer_head **bh,
2202 union nilfs_binfo *binfo)
2204 struct nilfs_btree_node *parent;
2209 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
2210 ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
2212 if (buffer_nilfs_node(*bh)) {
2213 path[level].bp_ctxt.oldkey = ptr;
2214 path[level].bp_ctxt.newkey = blocknr;
2215 path[level].bp_ctxt.bh = *bh;
2216 ret = nilfs_btnode_prepare_change_key(
2217 NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
2218 &path[level].bp_ctxt);
2221 nilfs_btnode_commit_change_key(
2222 NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
2223 &path[level].bp_ctxt);
2224 *bh = path[level].bp_ctxt.bh;
2227 nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr,
2230 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2231 /* on-disk format */
2232 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
2233 binfo->bi_dat.bi_level = level;
2238 static int nilfs_btree_assign_v(struct nilfs_bmap *btree,
2239 struct nilfs_btree_path *path,
2241 struct buffer_head **bh,
2243 union nilfs_binfo *binfo)
2245 struct nilfs_btree_node *parent;
2246 struct inode *dat = nilfs_bmap_get_dat(btree);
2249 union nilfs_bmap_ptr_req req;
2252 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
2253 ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
2256 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
2259 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
2261 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2262 /* on-disk format */
2263 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
2264 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
2269 static int nilfs_btree_assign(struct nilfs_bmap *btree,
2270 struct buffer_head **bh,
2272 union nilfs_binfo *binfo)
2274 struct nilfs_btree_path *path;
2275 struct nilfs_btree_node *node;
2279 path = nilfs_btree_alloc_path();
2283 if (buffer_nilfs_node(*bh)) {
2284 node = (struct nilfs_btree_node *)(*bh)->b_data;
2285 key = nilfs_btree_node_get_key(node, 0);
2286 level = nilfs_btree_node_get_level(node);
2288 key = nilfs_bmap_data_get_key(btree, *bh);
2289 level = NILFS_BTREE_LEVEL_DATA;
2292 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
2294 WARN_ON(ret == -ENOENT);
2298 ret = NILFS_BMAP_USE_VBN(btree) ?
2299 nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
2300 nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
2303 nilfs_btree_free_path(path);
2308 static int nilfs_btree_assign_gc(struct nilfs_bmap *btree,
2309 struct buffer_head **bh,
2311 union nilfs_binfo *binfo)
2313 struct nilfs_btree_node *node;
2317 ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr,
2322 if (buffer_nilfs_node(*bh)) {
2323 node = (struct nilfs_btree_node *)(*bh)->b_data;
2324 key = nilfs_btree_node_get_key(node, 0);
2326 key = nilfs_bmap_data_get_key(btree, *bh);
2328 /* on-disk format */
2329 binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
2330 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
2335 static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
2337 struct buffer_head *bh;
2338 struct nilfs_btree_path *path;
2342 path = nilfs_btree_alloc_path();
2346 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0);
2348 WARN_ON(ret == -ENOENT);
2351 ret = nilfs_btree_get_block(btree, ptr, &bh);
2353 WARN_ON(ret == -ENOENT);
2357 if (!buffer_dirty(bh))
2358 mark_buffer_dirty(bh);
2360 if (!nilfs_bmap_dirty(btree))
2361 nilfs_bmap_set_dirty(btree);
2364 nilfs_btree_free_path(path);
2368 static const struct nilfs_bmap_operations nilfs_btree_ops = {
2369 .bop_lookup = nilfs_btree_lookup,
2370 .bop_lookup_contig = nilfs_btree_lookup_contig,
2371 .bop_insert = nilfs_btree_insert,
2372 .bop_delete = nilfs_btree_delete,
2375 .bop_propagate = nilfs_btree_propagate,
2377 .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
2379 .bop_assign = nilfs_btree_assign,
2380 .bop_mark = nilfs_btree_mark,
2382 .bop_seek_key = nilfs_btree_seek_key,
2383 .bop_last_key = nilfs_btree_last_key,
2385 .bop_check_insert = NULL,
2386 .bop_check_delete = nilfs_btree_check_delete,
2387 .bop_gather_data = nilfs_btree_gather_data,
2390 static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
2392 .bop_lookup_contig = NULL,
2397 .bop_propagate = nilfs_btree_propagate_gc,
2399 .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
2401 .bop_assign = nilfs_btree_assign_gc,
2404 .bop_seek_key = NULL,
2405 .bop_last_key = NULL,
2407 .bop_check_insert = NULL,
2408 .bop_check_delete = NULL,
2409 .bop_gather_data = NULL,
2412 static void __nilfs_btree_init(struct nilfs_bmap *bmap)
2414 bmap->b_ops = &nilfs_btree_ops;
2415 bmap->b_nchildren_per_block =
2416 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
2419 int nilfs_btree_init(struct nilfs_bmap *bmap)
2423 __nilfs_btree_init(bmap);
2425 if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
2428 ret = nilfs_attach_btree_node_cache(
2429 &NILFS_BMAP_I(bmap)->vfs_inode);
2434 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
2436 bmap->b_ops = &nilfs_btree_ops_gc;
2437 bmap->b_nchildren_per_block =
2438 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));