1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_buf_item.h"
17 #include "xfs_btree.h"
18 #include "xfs_errortag.h"
19 #include "xfs_error.h"
20 #include "xfs_trace.h"
21 #include "xfs_alloc.h"
23 #include "xfs_btree_staging.h"
25 #include "xfs_alloc_btree.h"
26 #include "xfs_ialloc_btree.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_rmap_btree.h"
29 #include "xfs_refcount_btree.h"
30 #include "xfs_health.h"
31 #include "xfs_buf_mem.h"
32 #include "xfs_btree_mem.h"
35 * Btree magic numbers.
40 const struct xfs_btree_ops *ops)
42 int idx = xfs_has_crc(mp) ? 1 : 0;
43 __be32 magic = ops->buf_ops->magic[idx];
45 /* Ensure we asked for crc for crc-only magics. */
47 return be32_to_cpu(magic);
51 * These sibling pointer checks are optimised for null sibling pointers. This
52 * happens a lot, and we don't need to byte swap at runtime if the sibling
55 * These are explicitly marked at inline because the cost of calling them as
56 * functions instead of inlining them is about 36 bytes extra code per call site
57 * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
58 * two sibling check functions reduces the compiled code size by over 300
61 static inline xfs_failaddr_t
62 xfs_btree_check_fsblock_siblings(
67 xfs_fsblock_t sibling;
69 if (dsibling == cpu_to_be64(NULLFSBLOCK))
72 sibling = be64_to_cpu(dsibling);
74 return __this_address;
75 if (!xfs_verify_fsbno(mp, sibling))
76 return __this_address;
80 static inline xfs_failaddr_t
81 xfs_btree_check_memblock_siblings(
82 struct xfs_buftarg *btp,
88 if (dsibling == cpu_to_be64(NULLFSBLOCK))
91 sibling = be64_to_cpu(dsibling);
93 return __this_address;
94 if (!xmbuf_verify_daddr(btp, xfbno_to_daddr(sibling)))
95 return __this_address;
99 static inline xfs_failaddr_t
100 xfs_btree_check_agblock_siblings(
101 struct xfs_perag *pag,
105 xfs_agblock_t sibling;
107 if (dsibling == cpu_to_be32(NULLAGBLOCK))
110 sibling = be32_to_cpu(dsibling);
111 if (sibling == agbno)
112 return __this_address;
113 if (!xfs_verify_agbno(pag, sibling))
114 return __this_address;
118 static xfs_failaddr_t
119 __xfs_btree_check_lblock_hdr(
120 struct xfs_btree_cur *cur,
121 struct xfs_btree_block *block,
125 struct xfs_mount *mp = cur->bc_mp;
127 if (xfs_has_crc(mp)) {
128 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
129 return __this_address;
130 if (block->bb_u.l.bb_blkno !=
131 cpu_to_be64(bp ? xfs_buf_daddr(bp) : XFS_BUF_DADDR_NULL))
132 return __this_address;
133 if (block->bb_u.l.bb_pad != cpu_to_be32(0))
134 return __this_address;
137 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
138 return __this_address;
139 if (be16_to_cpu(block->bb_level) != level)
140 return __this_address;
141 if (be16_to_cpu(block->bb_numrecs) >
142 cur->bc_ops->get_maxrecs(cur, level))
143 return __this_address;
149 * Check a long btree block header. Return the address of the failing check,
150 * or NULL if everything is ok.
152 static xfs_failaddr_t
153 __xfs_btree_check_fsblock(
154 struct xfs_btree_cur *cur,
155 struct xfs_btree_block *block,
159 struct xfs_mount *mp = cur->bc_mp;
163 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
168 * For inode-rooted btrees, the root block sits in the inode fork. In
169 * that case bp is NULL, and the block must not have any siblings.
172 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK))
173 return __this_address;
174 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK))
175 return __this_address;
179 fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
180 fa = xfs_btree_check_fsblock_siblings(mp, fsb,
181 block->bb_u.l.bb_leftsib);
183 fa = xfs_btree_check_fsblock_siblings(mp, fsb,
184 block->bb_u.l.bb_rightsib);
189 * Check an in-memory btree block header. Return the address of the failing
190 * check, or NULL if everything is ok.
192 static xfs_failaddr_t
193 __xfs_btree_check_memblock(
194 struct xfs_btree_cur *cur,
195 struct xfs_btree_block *block,
199 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
203 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
207 bno = xfs_daddr_to_xfbno(xfs_buf_daddr(bp));
208 fa = xfs_btree_check_memblock_siblings(btp, bno,
209 block->bb_u.l.bb_leftsib);
211 fa = xfs_btree_check_memblock_siblings(btp, bno,
212 block->bb_u.l.bb_rightsib);
217 * Check a short btree block header. Return the address of the failing check,
218 * or NULL if everything is ok.
220 static xfs_failaddr_t
221 __xfs_btree_check_agblock(
222 struct xfs_btree_cur *cur,
223 struct xfs_btree_block *block,
227 struct xfs_mount *mp = cur->bc_mp;
228 struct xfs_perag *pag = cur->bc_ag.pag;
232 if (xfs_has_crc(mp)) {
233 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
234 return __this_address;
235 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp)))
236 return __this_address;
239 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
240 return __this_address;
241 if (be16_to_cpu(block->bb_level) != level)
242 return __this_address;
243 if (be16_to_cpu(block->bb_numrecs) >
244 cur->bc_ops->get_maxrecs(cur, level))
245 return __this_address;
247 agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
248 fa = xfs_btree_check_agblock_siblings(pag, agbno,
249 block->bb_u.s.bb_leftsib);
251 fa = xfs_btree_check_agblock_siblings(pag, agbno,
252 block->bb_u.s.bb_rightsib);
257 * Internal btree block check.
259 * Return NULL if the block is ok or the address of the failed check otherwise.
262 __xfs_btree_check_block(
263 struct xfs_btree_cur *cur,
264 struct xfs_btree_block *block,
268 switch (cur->bc_ops->type) {
269 case XFS_BTREE_TYPE_MEM:
270 return __xfs_btree_check_memblock(cur, block, level, bp);
271 case XFS_BTREE_TYPE_AG:
272 return __xfs_btree_check_agblock(cur, block, level, bp);
273 case XFS_BTREE_TYPE_INODE:
274 return __xfs_btree_check_fsblock(cur, block, level, bp);
277 return __this_address;
281 static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur)
283 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN)
284 return XFS_ERRTAG_BTREE_CHECK_SBLOCK;
285 return XFS_ERRTAG_BTREE_CHECK_LBLOCK;
289 * Debug routine: check that block header is ok.
292 xfs_btree_check_block(
293 struct xfs_btree_cur *cur, /* btree cursor */
294 struct xfs_btree_block *block, /* generic btree block pointer */
295 int level, /* level of the btree block */
296 struct xfs_buf *bp) /* buffer containing block, if any */
298 struct xfs_mount *mp = cur->bc_mp;
301 fa = __xfs_btree_check_block(cur, block, level, bp);
302 if (XFS_IS_CORRUPT(mp, fa != NULL) ||
303 XFS_TEST_ERROR(false, mp, xfs_btree_block_errtag(cur))) {
305 trace_xfs_btree_corrupt(bp, _RET_IP_);
306 xfs_btree_mark_sick(cur);
307 return -EFSCORRUPTED;
313 __xfs_btree_check_ptr(
314 struct xfs_btree_cur *cur,
315 const union xfs_btree_ptr *ptr,
320 return -EFSCORRUPTED;
322 switch (cur->bc_ops->type) {
323 case XFS_BTREE_TYPE_MEM:
324 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree,
325 be64_to_cpu((&ptr->l)[index])))
326 return -EFSCORRUPTED;
328 case XFS_BTREE_TYPE_INODE:
329 if (!xfs_verify_fsbno(cur->bc_mp,
330 be64_to_cpu((&ptr->l)[index])))
331 return -EFSCORRUPTED;
333 case XFS_BTREE_TYPE_AG:
334 if (!xfs_verify_agbno(cur->bc_ag.pag,
335 be32_to_cpu((&ptr->s)[index])))
336 return -EFSCORRUPTED;
344 * Check that a given (indexed) btree pointer at a certain level of a
345 * btree is valid and doesn't point past where it should.
349 struct xfs_btree_cur *cur,
350 const union xfs_btree_ptr *ptr,
356 error = __xfs_btree_check_ptr(cur, ptr, index, level);
358 switch (cur->bc_ops->type) {
359 case XFS_BTREE_TYPE_MEM:
361 "In-memory: Corrupt %sbt flags 0x%x pointer at level %d index %d fa %pS.",
362 cur->bc_ops->name, cur->bc_flags, level, index,
365 case XFS_BTREE_TYPE_INODE:
367 "Inode %llu fork %d: Corrupt %sbt pointer at level %d index %d.",
368 cur->bc_ino.ip->i_ino,
369 cur->bc_ino.whichfork, cur->bc_ops->name,
372 case XFS_BTREE_TYPE_AG:
374 "AG %u: Corrupt %sbt pointer at level %d index %d.",
375 cur->bc_ag.pag->pag_agno, cur->bc_ops->name,
379 xfs_btree_mark_sick(cur);
386 # define xfs_btree_debug_check_ptr xfs_btree_check_ptr
388 # define xfs_btree_debug_check_ptr(...) (0)
392 * Calculate CRC on the whole btree block and stuff it into the
393 * long-form btree header.
395 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
396 * it into the buffer so recovery knows what the last modification was that made
400 xfs_btree_fsblock_calc_crc(
403 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
404 struct xfs_buf_log_item *bip = bp->b_log_item;
406 if (!xfs_has_crc(bp->b_mount))
409 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
410 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
414 xfs_btree_fsblock_verify_crc(
417 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
418 struct xfs_mount *mp = bp->b_mount;
420 if (xfs_has_crc(mp)) {
421 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
423 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
430 * Calculate CRC on the whole btree block and stuff it into the
431 * short-form btree header.
433 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
434 * it into the buffer so recovery knows what the last modification was that made
438 xfs_btree_agblock_calc_crc(
441 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
442 struct xfs_buf_log_item *bip = bp->b_log_item;
444 if (!xfs_has_crc(bp->b_mount))
447 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
448 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
452 xfs_btree_agblock_verify_crc(
455 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
456 struct xfs_mount *mp = bp->b_mount;
458 if (xfs_has_crc(mp)) {
459 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
461 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
468 xfs_btree_free_block(
469 struct xfs_btree_cur *cur,
474 trace_xfs_btree_free_block(cur, bp);
477 * Don't allow block freeing for a staging cursor, because staging
478 * cursors do not support regular btree modifications.
480 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
482 return -EFSCORRUPTED;
485 error = cur->bc_ops->free_block(cur, bp);
487 xfs_trans_binval(cur->bc_tp, bp);
488 XFS_BTREE_STATS_INC(cur, free);
494 * Delete the btree cursor.
497 xfs_btree_del_cursor(
498 struct xfs_btree_cur *cur, /* btree cursor */
499 int error) /* del because of error */
501 int i; /* btree level */
504 * Clear the buffer pointers and release the buffers. If we're doing
505 * this because of an error, inspect all of the entries in the bc_bufs
506 * array for buffers to be unlocked. This is because some of the btree
507 * code works from level n down to 0, and if we get an error along the
508 * way we won't have initialized all the entries down to 0.
510 for (i = 0; i < cur->bc_nlevels; i++) {
511 if (cur->bc_levels[i].bp)
512 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp);
518 * If we are doing a BMBT update, the number of unaccounted blocks
519 * allocated during this cursor life time should be zero. If it's not
520 * zero, then we should be shut down or on our way to shutdown due to
521 * cancelling a dirty transaction on error.
523 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 ||
524 xfs_is_shutdown(cur->bc_mp) || error != 0);
526 switch (cur->bc_ops->type) {
527 case XFS_BTREE_TYPE_AG:
529 xfs_perag_put(cur->bc_ag.pag);
531 case XFS_BTREE_TYPE_INODE:
534 case XFS_BTREE_TYPE_MEM:
536 xfs_perag_put(cur->bc_mem.pag);
540 kmem_cache_free(cur->bc_cache, cur);
543 /* Return the buffer target for this btree's buffer. */
544 static inline struct xfs_buftarg *
546 struct xfs_btree_cur *cur)
548 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
549 return cur->bc_mem.xfbtree->target;
550 return cur->bc_mp->m_ddev_targp;
553 /* Return the block size (in units of 512b sectors) for this btree. */
554 static inline unsigned int
556 struct xfs_btree_cur *cur)
558 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
560 return cur->bc_mp->m_bsize;
564 * Duplicate the btree cursor.
565 * Allocate a new one, copy the record, re-get the buffers.
568 xfs_btree_dup_cursor(
569 struct xfs_btree_cur *cur, /* input cursor */
570 struct xfs_btree_cur **ncur) /* output cursor */
572 struct xfs_mount *mp = cur->bc_mp;
573 struct xfs_trans *tp = cur->bc_tp;
575 struct xfs_btree_cur *new;
580 * Don't allow staging cursors to be duplicated because they're supposed
581 * to be kept private to a single thread.
583 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
585 return -EFSCORRUPTED;
589 * Allocate a new cursor like the old one.
591 new = cur->bc_ops->dup_cursor(cur);
594 * Copy the record currently in the cursor.
596 new->bc_rec = cur->bc_rec;
599 * For each level current, re-get the buffer and copy the ptr value.
601 for (i = 0; i < new->bc_nlevels; i++) {
602 new->bc_levels[i].ptr = cur->bc_levels[i].ptr;
603 new->bc_levels[i].ra = cur->bc_levels[i].ra;
604 bp = cur->bc_levels[i].bp;
606 error = xfs_trans_read_buf(mp, tp,
607 xfs_btree_buftarg(cur),
609 xfs_btree_bbsize(cur), 0, &bp,
610 cur->bc_ops->buf_ops);
611 if (xfs_metadata_is_sick(error))
612 xfs_btree_mark_sick(new);
614 xfs_btree_del_cursor(new, error);
619 new->bc_levels[i].bp = bp;
626 * XFS btree block layout and addressing:
628 * There are two types of blocks in the btree: leaf and non-leaf blocks.
630 * The leaf record start with a header then followed by records containing
631 * the values. A non-leaf block also starts with the same header, and
632 * then first contains lookup keys followed by an equal number of pointers
633 * to the btree blocks at the previous level.
635 * +--------+-------+-------+-------+-------+-------+-------+
636 * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
637 * +--------+-------+-------+-------+-------+-------+-------+
639 * +--------+-------+-------+-------+-------+-------+-------+
640 * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
641 * +--------+-------+-------+-------+-------+-------+-------+
643 * The header is called struct xfs_btree_block for reasons better left unknown
644 * and comes in different versions for short (32bit) and long (64bit) block
645 * pointers. The record and key structures are defined by the btree instances
646 * and opaque to the btree core. The block pointers are simple disk endian
647 * integers, available in a short (32bit) and long (64bit) variant.
649 * The helpers below calculate the offset of a given record, key or pointer
650 * into a btree block (xfs_btree_*_offset) or return a pointer to the given
651 * record, key or pointer (xfs_btree_*_addr). Note that all addressing
652 * inside the btree block is done using indices starting at one, not zero!
654 * If XFS_BTGEO_OVERLAPPING is set, then this btree supports keys containing
655 * overlapping intervals. In such a tree, records are still sorted lowest to
656 * highest and indexed by the smallest key value that refers to the record.
657 * However, nodes are different: each pointer has two associated keys -- one
658 * indexing the lowest key available in the block(s) below (the same behavior
659 * as the key in a regular btree) and another indexing the highest key
660 * available in the block(s) below. Because records are /not/ sorted by the
661 * highest key, all leaf block updates require us to compute the highest key
662 * that matches any record in the leaf and to recursively update the high keys
663 * in the nodes going further up in the tree, if necessary. Nodes look like
666 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
667 * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
668 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
670 * To perform an interval query on an overlapped tree, perform the usual
671 * depth-first search and use the low and high keys to decide if we can skip
672 * that particular node. If a leaf node is reached, return the records that
673 * intersect the interval. Note that an interval query may return numerous
674 * entries. For a non-overlapped tree, simply search for the record associated
675 * with the lowest key and iterate forward until a non-matching record is
676 * found. Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by
677 * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in
680 * Why do we care about overlapping intervals? Let's say you have a bunch of
681 * reverse mapping records on a reflink filesystem:
683 * 1: +- file A startblock B offset C length D -----------+
684 * 2: +- file E startblock F offset G length H --------------+
685 * 3: +- file I startblock F offset J length K --+
686 * 4: +- file L... --+
688 * Now say we want to map block (B+D) into file A at offset (C+D). Ideally,
689 * we'd simply increment the length of record 1. But how do we find the record
690 * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
691 * record 3 because the keys are ordered first by startblock. An interval
692 * query would return records 1 and 2 because they both overlap (B+D-1), and
693 * from that we can pick out record 1 as the appropriate left neighbor.
695 * In the non-overlapped case you can do a LE lookup and decrement the cursor
696 * because a record's interval must end before the next record.
700 * Return size of the btree block header for this btree instance.
702 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
704 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
705 if (xfs_has_crc(cur->bc_mp))
706 return XFS_BTREE_LBLOCK_CRC_LEN;
707 return XFS_BTREE_LBLOCK_LEN;
709 if (xfs_has_crc(cur->bc_mp))
710 return XFS_BTREE_SBLOCK_CRC_LEN;
711 return XFS_BTREE_SBLOCK_LEN;
715 * Calculate offset of the n-th record in a btree block.
718 xfs_btree_rec_offset(
719 struct xfs_btree_cur *cur,
722 return xfs_btree_block_len(cur) +
723 (n - 1) * cur->bc_ops->rec_len;
727 * Calculate offset of the n-th key in a btree block.
730 xfs_btree_key_offset(
731 struct xfs_btree_cur *cur,
734 return xfs_btree_block_len(cur) +
735 (n - 1) * cur->bc_ops->key_len;
739 * Calculate offset of the n-th high key in a btree block.
742 xfs_btree_high_key_offset(
743 struct xfs_btree_cur *cur,
746 return xfs_btree_block_len(cur) +
747 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
751 * Calculate offset of the n-th block pointer in a btree block.
754 xfs_btree_ptr_offset(
755 struct xfs_btree_cur *cur,
759 return xfs_btree_block_len(cur) +
760 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
761 (n - 1) * cur->bc_ops->ptr_len;
765 * Return a pointer to the n-th record in the btree block.
767 union xfs_btree_rec *
769 struct xfs_btree_cur *cur,
771 struct xfs_btree_block *block)
773 return (union xfs_btree_rec *)
774 ((char *)block + xfs_btree_rec_offset(cur, n));
778 * Return a pointer to the n-th key in the btree block.
780 union xfs_btree_key *
782 struct xfs_btree_cur *cur,
784 struct xfs_btree_block *block)
786 return (union xfs_btree_key *)
787 ((char *)block + xfs_btree_key_offset(cur, n));
791 * Return a pointer to the n-th high key in the btree block.
793 union xfs_btree_key *
794 xfs_btree_high_key_addr(
795 struct xfs_btree_cur *cur,
797 struct xfs_btree_block *block)
799 return (union xfs_btree_key *)
800 ((char *)block + xfs_btree_high_key_offset(cur, n));
804 * Return a pointer to the n-th block pointer in the btree block.
806 union xfs_btree_ptr *
808 struct xfs_btree_cur *cur,
810 struct xfs_btree_block *block)
812 int level = xfs_btree_get_level(block);
814 ASSERT(block->bb_level != 0);
816 return (union xfs_btree_ptr *)
817 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
822 struct xfs_btree_cur *cur)
824 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
826 if (cur->bc_flags & XFS_BTREE_STAGING)
827 return cur->bc_ino.ifake->if_fork;
828 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork);
832 * Get the root block which is stored in the inode.
834 * For now this btree implementation assumes the btree root is always
835 * stored in the if_broot field of an inode fork.
837 STATIC struct xfs_btree_block *
839 struct xfs_btree_cur *cur)
841 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
843 return (struct xfs_btree_block *)ifp->if_broot;
847 * Retrieve the block pointer from the cursor at the given level.
848 * This may be an inode btree root or from a buffer.
850 struct xfs_btree_block * /* generic btree block pointer */
852 struct xfs_btree_cur *cur, /* btree cursor */
853 int level, /* level in btree */
854 struct xfs_buf **bpp) /* buffer containing the block */
856 if (xfs_btree_at_iroot(cur, level)) {
858 return xfs_btree_get_iroot(cur);
861 *bpp = cur->bc_levels[level].bp;
862 return XFS_BUF_TO_BLOCK(*bpp);
866 * Change the cursor to point to the first record at the given level.
867 * Other levels are unaffected.
869 STATIC int /* success=1, failure=0 */
871 struct xfs_btree_cur *cur, /* btree cursor */
872 int level) /* level to change */
874 struct xfs_btree_block *block; /* generic btree block pointer */
875 struct xfs_buf *bp; /* buffer containing block */
878 * Get the block pointer for this level.
880 block = xfs_btree_get_block(cur, level, &bp);
881 if (xfs_btree_check_block(cur, block, level, bp))
884 * It's empty, there is no such record.
886 if (!block->bb_numrecs)
889 * Set the ptr value to 1, that's the first record/key.
891 cur->bc_levels[level].ptr = 1;
896 * Change the cursor to point to the last record in the current block
897 * at the given level. Other levels are unaffected.
899 STATIC int /* success=1, failure=0 */
901 struct xfs_btree_cur *cur, /* btree cursor */
902 int level) /* level to change */
904 struct xfs_btree_block *block; /* generic btree block pointer */
905 struct xfs_buf *bp; /* buffer containing block */
908 * Get the block pointer for this level.
910 block = xfs_btree_get_block(cur, level, &bp);
911 if (xfs_btree_check_block(cur, block, level, bp))
914 * It's empty, there is no such record.
916 if (!block->bb_numrecs)
919 * Set the ptr value to numrecs, that's the last record/key.
921 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs);
926 * Compute first and last byte offsets for the fields given.
927 * Interprets the offsets table, which contains struct field offsets.
931 uint32_t fields, /* bitmask of fields */
932 const short *offsets, /* table of field offsets */
933 int nbits, /* number of bits to inspect */
934 int *first, /* output: first byte offset */
935 int *last) /* output: last byte offset */
937 int i; /* current bit number */
938 uint32_t imask; /* mask for current bit number */
942 * Find the lowest bit, so the first byte offset.
944 for (i = 0, imask = 1u; ; i++, imask <<= 1) {
945 if (imask & fields) {
951 * Find the highest bit, so the last byte offset.
953 for (i = nbits - 1, imask = 1u << i; ; i--, imask >>= 1) {
954 if (imask & fields) {
955 *last = offsets[i + 1] - 1;
962 xfs_btree_readahead_fsblock(
963 struct xfs_btree_cur *cur,
965 struct xfs_btree_block *block)
967 struct xfs_mount *mp = cur->bc_mp;
968 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
969 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
972 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
973 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, left),
974 mp->m_bsize, cur->bc_ops->buf_ops);
978 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
979 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, right),
980 mp->m_bsize, cur->bc_ops->buf_ops);
988 xfs_btree_readahead_memblock(
989 struct xfs_btree_cur *cur,
991 struct xfs_btree_block *block)
993 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
994 xfbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
995 xfbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
998 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
999 xfs_buf_readahead(btp, xfbno_to_daddr(left), XFBNO_BBSIZE,
1000 cur->bc_ops->buf_ops);
1004 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
1005 xfs_buf_readahead(btp, xfbno_to_daddr(right), XFBNO_BBSIZE,
1006 cur->bc_ops->buf_ops);
1014 xfs_btree_readahead_agblock(
1015 struct xfs_btree_cur *cur,
1017 struct xfs_btree_block *block)
1019 struct xfs_mount *mp = cur->bc_mp;
1020 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
1021 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
1022 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
1025 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
1026 xfs_buf_readahead(mp->m_ddev_targp,
1027 XFS_AGB_TO_DADDR(mp, agno, left),
1028 mp->m_bsize, cur->bc_ops->buf_ops);
1032 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
1033 xfs_buf_readahead(mp->m_ddev_targp,
1034 XFS_AGB_TO_DADDR(mp, agno, right),
1035 mp->m_bsize, cur->bc_ops->buf_ops);
1043 * Read-ahead btree blocks, at the given level.
1044 * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
1047 xfs_btree_readahead(
1048 struct xfs_btree_cur *cur, /* btree cursor */
1049 int lev, /* level in btree */
1050 int lr) /* left/right bits */
1052 struct xfs_btree_block *block;
1055 * No readahead needed if we are at the root level and the
1056 * btree root is stored in the inode.
1058 if (xfs_btree_at_iroot(cur, lev))
1061 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra)
1064 cur->bc_levels[lev].ra |= lr;
1065 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp);
1067 switch (cur->bc_ops->type) {
1068 case XFS_BTREE_TYPE_AG:
1069 return xfs_btree_readahead_agblock(cur, lr, block);
1070 case XFS_BTREE_TYPE_INODE:
1071 return xfs_btree_readahead_fsblock(cur, lr, block);
1072 case XFS_BTREE_TYPE_MEM:
1073 return xfs_btree_readahead_memblock(cur, lr, block);
1081 xfs_btree_ptr_to_daddr(
1082 struct xfs_btree_cur *cur,
1083 const union xfs_btree_ptr *ptr,
1088 error = xfs_btree_check_ptr(cur, ptr, 0, 1);
1092 switch (cur->bc_ops->type) {
1093 case XFS_BTREE_TYPE_AG:
1094 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1095 be32_to_cpu(ptr->s));
1097 case XFS_BTREE_TYPE_INODE:
1098 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
1100 case XFS_BTREE_TYPE_MEM:
1101 *daddr = xfbno_to_daddr(be64_to_cpu(ptr->l));
1108 * Readahead @count btree blocks at the given @ptr location.
1110 * We don't need to care about long or short form btrees here as we have a
1111 * method of converting the ptr directly to a daddr available to us.
1114 xfs_btree_readahead_ptr(
1115 struct xfs_btree_cur *cur,
1116 union xfs_btree_ptr *ptr,
1121 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
1123 xfs_buf_readahead(xfs_btree_buftarg(cur), daddr,
1124 xfs_btree_bbsize(cur) * count,
1125 cur->bc_ops->buf_ops);
1129 * Set the buffer for level "lev" in the cursor to bp, releasing
1130 * any previous buffer.
1134 struct xfs_btree_cur *cur, /* btree cursor */
1135 int lev, /* level in btree */
1136 struct xfs_buf *bp) /* new buffer to set */
1138 struct xfs_btree_block *b; /* btree block */
1140 if (cur->bc_levels[lev].bp)
1141 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp);
1142 cur->bc_levels[lev].bp = bp;
1143 cur->bc_levels[lev].ra = 0;
1145 b = XFS_BUF_TO_BLOCK(bp);
1146 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1147 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
1148 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
1149 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
1150 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
1152 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1153 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
1154 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1155 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
1160 xfs_btree_ptr_is_null(
1161 struct xfs_btree_cur *cur,
1162 const union xfs_btree_ptr *ptr)
1164 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1165 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1167 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1171 xfs_btree_set_ptr_null(
1172 struct xfs_btree_cur *cur,
1173 union xfs_btree_ptr *ptr)
1175 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1176 ptr->l = cpu_to_be64(NULLFSBLOCK);
1178 ptr->s = cpu_to_be32(NULLAGBLOCK);
1182 xfs_btree_ptrs_equal(
1183 struct xfs_btree_cur *cur,
1184 union xfs_btree_ptr *ptr1,
1185 union xfs_btree_ptr *ptr2)
1187 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1188 return ptr1->l == ptr2->l;
1189 return ptr1->s == ptr2->s;
1193 * Get/set/init sibling pointers
1196 xfs_btree_get_sibling(
1197 struct xfs_btree_cur *cur,
1198 struct xfs_btree_block *block,
1199 union xfs_btree_ptr *ptr,
1202 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1204 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1205 if (lr == XFS_BB_RIGHTSIB)
1206 ptr->l = block->bb_u.l.bb_rightsib;
1208 ptr->l = block->bb_u.l.bb_leftsib;
1210 if (lr == XFS_BB_RIGHTSIB)
1211 ptr->s = block->bb_u.s.bb_rightsib;
1213 ptr->s = block->bb_u.s.bb_leftsib;
1218 xfs_btree_set_sibling(
1219 struct xfs_btree_cur *cur,
1220 struct xfs_btree_block *block,
1221 const union xfs_btree_ptr *ptr,
1224 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1226 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1227 if (lr == XFS_BB_RIGHTSIB)
1228 block->bb_u.l.bb_rightsib = ptr->l;
1230 block->bb_u.l.bb_leftsib = ptr->l;
1232 if (lr == XFS_BB_RIGHTSIB)
1233 block->bb_u.s.bb_rightsib = ptr->s;
1235 block->bb_u.s.bb_leftsib = ptr->s;
1240 __xfs_btree_init_block(
1241 struct xfs_mount *mp,
1242 struct xfs_btree_block *buf,
1243 const struct xfs_btree_ops *ops,
1249 bool crc = xfs_has_crc(mp);
1250 __u32 magic = xfs_btree_magic(mp, ops);
1252 buf->bb_magic = cpu_to_be32(magic);
1253 buf->bb_level = cpu_to_be16(level);
1254 buf->bb_numrecs = cpu_to_be16(numrecs);
1256 if (ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1257 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1258 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1260 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1261 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1262 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1263 buf->bb_u.l.bb_pad = 0;
1264 buf->bb_u.l.bb_lsn = 0;
1267 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1268 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1270 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1271 /* owner is a 32 bit value on short blocks */
1272 buf->bb_u.s.bb_owner = cpu_to_be32((__u32)owner);
1273 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1274 buf->bb_u.s.bb_lsn = 0;
1280 xfs_btree_init_block(
1281 struct xfs_mount *mp,
1282 struct xfs_btree_block *block,
1283 const struct xfs_btree_ops *ops,
1288 __xfs_btree_init_block(mp, block, ops, XFS_BUF_DADDR_NULL, level,
1294 struct xfs_mount *mp,
1296 const struct xfs_btree_ops *ops,
1301 __xfs_btree_init_block(mp, XFS_BUF_TO_BLOCK(bp), ops,
1302 xfs_buf_daddr(bp), level, numrecs, owner);
1303 bp->b_ops = ops->buf_ops;
1308 struct xfs_btree_cur *cur)
1310 switch (cur->bc_ops->type) {
1311 case XFS_BTREE_TYPE_MEM:
1312 return cur->bc_mem.xfbtree->owner;
1313 case XFS_BTREE_TYPE_INODE:
1314 return cur->bc_ino.ip->i_ino;
1315 case XFS_BTREE_TYPE_AG:
1316 return cur->bc_ag.pag->pag_agno;
1324 xfs_btree_init_block_cur(
1325 struct xfs_btree_cur *cur,
1330 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs,
1331 xfs_btree_owner(cur));
1335 * Return true if ptr is the last record in the btree and
1336 * we need to track updates to this record. The decision
1337 * will be further refined in the update_lastrec method.
1340 xfs_btree_is_lastrec(
1341 struct xfs_btree_cur *cur,
1342 struct xfs_btree_block *block,
1345 union xfs_btree_ptr ptr;
1349 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_LASTREC_UPDATE))
1352 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1353 if (!xfs_btree_ptr_is_null(cur, &ptr))
1359 xfs_btree_buf_to_ptr(
1360 struct xfs_btree_cur *cur,
1362 union xfs_btree_ptr *ptr)
1364 switch (cur->bc_ops->type) {
1365 case XFS_BTREE_TYPE_AG:
1366 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1367 xfs_buf_daddr(bp)));
1369 case XFS_BTREE_TYPE_INODE:
1370 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1371 xfs_buf_daddr(bp)));
1373 case XFS_BTREE_TYPE_MEM:
1374 ptr->l = cpu_to_be64(xfs_daddr_to_xfbno(xfs_buf_daddr(bp)));
1381 struct xfs_btree_cur *cur,
1384 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs);
1388 xfs_btree_get_buf_block(
1389 struct xfs_btree_cur *cur,
1390 const union xfs_btree_ptr *ptr,
1391 struct xfs_btree_block **block,
1392 struct xfs_buf **bpp)
1397 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1400 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d,
1401 xfs_btree_bbsize(cur), 0, bpp);
1405 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1406 *block = XFS_BUF_TO_BLOCK(*bpp);
1411 * Read in the buffer at the given ptr and return the buffer and
1412 * the block pointer within the buffer.
1415 xfs_btree_read_buf_block(
1416 struct xfs_btree_cur *cur,
1417 const union xfs_btree_ptr *ptr,
1419 struct xfs_btree_block **block,
1420 struct xfs_buf **bpp)
1422 struct xfs_mount *mp = cur->bc_mp;
1426 /* need to sort out how callers deal with failures first */
1427 ASSERT(!(flags & XBF_TRYLOCK));
1429 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1432 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d,
1433 xfs_btree_bbsize(cur), flags, bpp,
1434 cur->bc_ops->buf_ops);
1435 if (xfs_metadata_is_sick(error))
1436 xfs_btree_mark_sick(cur);
1440 xfs_btree_set_refs(cur, *bpp);
1441 *block = XFS_BUF_TO_BLOCK(*bpp);
1446 * Copy keys from one btree block to another.
1449 xfs_btree_copy_keys(
1450 struct xfs_btree_cur *cur,
1451 union xfs_btree_key *dst_key,
1452 const union xfs_btree_key *src_key,
1455 ASSERT(numkeys >= 0);
1456 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1460 * Copy records from one btree block to another.
1463 xfs_btree_copy_recs(
1464 struct xfs_btree_cur *cur,
1465 union xfs_btree_rec *dst_rec,
1466 union xfs_btree_rec *src_rec,
1469 ASSERT(numrecs >= 0);
1470 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1474 * Copy block pointers from one btree block to another.
1477 xfs_btree_copy_ptrs(
1478 struct xfs_btree_cur *cur,
1479 union xfs_btree_ptr *dst_ptr,
1480 const union xfs_btree_ptr *src_ptr,
1483 ASSERT(numptrs >= 0);
1484 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len);
1488 * Shift keys one index left/right inside a single btree block.
1491 xfs_btree_shift_keys(
1492 struct xfs_btree_cur *cur,
1493 union xfs_btree_key *key,
1499 ASSERT(numkeys >= 0);
1500 ASSERT(dir == 1 || dir == -1);
1502 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1503 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1507 * Shift records one index left/right inside a single btree block.
1510 xfs_btree_shift_recs(
1511 struct xfs_btree_cur *cur,
1512 union xfs_btree_rec *rec,
1518 ASSERT(numrecs >= 0);
1519 ASSERT(dir == 1 || dir == -1);
1521 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1522 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1526 * Shift block pointers one index left/right inside a single btree block.
1529 xfs_btree_shift_ptrs(
1530 struct xfs_btree_cur *cur,
1531 union xfs_btree_ptr *ptr,
1537 ASSERT(numptrs >= 0);
1538 ASSERT(dir == 1 || dir == -1);
1540 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len);
1541 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len);
1545 * Log key values from the btree block.
1549 struct xfs_btree_cur *cur,
1556 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1557 xfs_trans_log_buf(cur->bc_tp, bp,
1558 xfs_btree_key_offset(cur, first),
1559 xfs_btree_key_offset(cur, last + 1) - 1);
1561 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1562 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1567 * Log record values from the btree block.
1571 struct xfs_btree_cur *cur,
1577 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1578 xfs_trans_log_buf(cur->bc_tp, bp,
1579 xfs_btree_rec_offset(cur, first),
1580 xfs_btree_rec_offset(cur, last + 1) - 1);
1585 * Log block pointer fields from a btree block (nonleaf).
1589 struct xfs_btree_cur *cur, /* btree cursor */
1590 struct xfs_buf *bp, /* buffer containing btree block */
1591 int first, /* index of first pointer to log */
1592 int last) /* index of last pointer to log */
1596 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1597 int level = xfs_btree_get_level(block);
1599 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1600 xfs_trans_log_buf(cur->bc_tp, bp,
1601 xfs_btree_ptr_offset(cur, first, level),
1602 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1604 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1605 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1611 * Log fields from a btree block header.
1614 xfs_btree_log_block(
1615 struct xfs_btree_cur *cur, /* btree cursor */
1616 struct xfs_buf *bp, /* buffer containing btree block */
1617 uint32_t fields) /* mask of fields: XFS_BB_... */
1619 int first; /* first byte offset logged */
1620 int last; /* last byte offset logged */
1621 static const short soffsets[] = { /* table of offsets (short) */
1622 offsetof(struct xfs_btree_block, bb_magic),
1623 offsetof(struct xfs_btree_block, bb_level),
1624 offsetof(struct xfs_btree_block, bb_numrecs),
1625 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1626 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1627 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1628 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1629 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1630 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1631 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1632 XFS_BTREE_SBLOCK_CRC_LEN
1634 static const short loffsets[] = { /* table of offsets (long) */
1635 offsetof(struct xfs_btree_block, bb_magic),
1636 offsetof(struct xfs_btree_block, bb_level),
1637 offsetof(struct xfs_btree_block, bb_numrecs),
1638 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1639 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1640 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1641 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1642 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1643 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1644 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1645 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1646 XFS_BTREE_LBLOCK_CRC_LEN
1652 if (xfs_has_crc(cur->bc_mp)) {
1654 * We don't log the CRC when updating a btree
1655 * block but instead recreate it during log
1656 * recovery. As the log buffers have checksums
1657 * of their own this is safe and avoids logging a crc
1658 * update in a lot of places.
1660 if (fields == XFS_BB_ALL_BITS)
1661 fields = XFS_BB_ALL_BITS_CRC;
1662 nbits = XFS_BB_NUM_BITS_CRC;
1664 nbits = XFS_BB_NUM_BITS;
1666 xfs_btree_offsets(fields,
1667 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ?
1668 loffsets : soffsets,
1669 nbits, &first, &last);
1670 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1671 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1673 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1674 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1679 * Increment cursor by one record at the level.
1680 * For nonzero levels the leaf-ward information is untouched.
1683 xfs_btree_increment(
1684 struct xfs_btree_cur *cur,
1686 int *stat) /* success/failure */
1688 struct xfs_btree_block *block;
1689 union xfs_btree_ptr ptr;
1691 int error; /* error return value */
1694 ASSERT(level < cur->bc_nlevels);
1696 /* Read-ahead to the right at this level. */
1697 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1699 /* Get a pointer to the btree block. */
1700 block = xfs_btree_get_block(cur, level, &bp);
1703 error = xfs_btree_check_block(cur, block, level, bp);
1708 /* We're done if we remain in the block after the increment. */
1709 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block))
1712 /* Fail if we just went off the right edge of the tree. */
1713 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1714 if (xfs_btree_ptr_is_null(cur, &ptr))
1717 XFS_BTREE_STATS_INC(cur, increment);
1720 * March up the tree incrementing pointers.
1721 * Stop when we don't go off the right edge of a block.
1723 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1724 block = xfs_btree_get_block(cur, lev, &bp);
1727 error = xfs_btree_check_block(cur, block, lev, bp);
1732 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block))
1735 /* Read-ahead the right block for the next loop. */
1736 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1740 * If we went off the root then we are either seriously
1741 * confused or have the tree root in an inode.
1743 if (lev == cur->bc_nlevels) {
1744 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
1747 xfs_btree_mark_sick(cur);
1748 error = -EFSCORRUPTED;
1751 ASSERT(lev < cur->bc_nlevels);
1754 * Now walk back down the tree, fixing up the cursor's buffer
1755 * pointers and key numbers.
1757 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1758 union xfs_btree_ptr *ptrp;
1760 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
1762 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1766 xfs_btree_setbuf(cur, lev, bp);
1767 cur->bc_levels[lev].ptr = 1;
1782 * Decrement cursor by one record at the level.
1783 * For nonzero levels the leaf-ward information is untouched.
1786 xfs_btree_decrement(
1787 struct xfs_btree_cur *cur,
1789 int *stat) /* success/failure */
1791 struct xfs_btree_block *block;
1793 int error; /* error return value */
1795 union xfs_btree_ptr ptr;
1797 ASSERT(level < cur->bc_nlevels);
1799 /* Read-ahead to the left at this level. */
1800 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1802 /* We're done if we remain in the block after the decrement. */
1803 if (--cur->bc_levels[level].ptr > 0)
1806 /* Get a pointer to the btree block. */
1807 block = xfs_btree_get_block(cur, level, &bp);
1810 error = xfs_btree_check_block(cur, block, level, bp);
1815 /* Fail if we just went off the left edge of the tree. */
1816 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1817 if (xfs_btree_ptr_is_null(cur, &ptr))
1820 XFS_BTREE_STATS_INC(cur, decrement);
1823 * March up the tree decrementing pointers.
1824 * Stop when we don't go off the left edge of a block.
1826 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1827 if (--cur->bc_levels[lev].ptr > 0)
1829 /* Read-ahead the left block for the next loop. */
1830 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1834 * If we went off the root then we are seriously confused.
1835 * or the root of the tree is in an inode.
1837 if (lev == cur->bc_nlevels) {
1838 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
1841 xfs_btree_mark_sick(cur);
1842 error = -EFSCORRUPTED;
1845 ASSERT(lev < cur->bc_nlevels);
1848 * Now walk back down the tree, fixing up the cursor's buffer
1849 * pointers and key numbers.
1851 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1852 union xfs_btree_ptr *ptrp;
1854 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
1856 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1859 xfs_btree_setbuf(cur, lev, bp);
1860 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block);
1875 * Check the btree block owner now that we have the context to know who the
1878 static inline xfs_failaddr_t
1879 xfs_btree_check_block_owner(
1880 struct xfs_btree_cur *cur,
1881 struct xfs_btree_block *block)
1885 if (!xfs_has_crc(cur->bc_mp) ||
1886 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER))
1889 owner = xfs_btree_owner(cur);
1890 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1891 if (be64_to_cpu(block->bb_u.l.bb_owner) != owner)
1892 return __this_address;
1894 if (be32_to_cpu(block->bb_u.s.bb_owner) != owner)
1895 return __this_address;
1902 xfs_btree_lookup_get_block(
1903 struct xfs_btree_cur *cur, /* btree cursor */
1904 int level, /* level in the btree */
1905 const union xfs_btree_ptr *pp, /* ptr to btree block */
1906 struct xfs_btree_block **blkp) /* return btree block */
1908 struct xfs_buf *bp; /* buffer pointer for btree block */
1912 /* special case the root block if in an inode */
1913 if (xfs_btree_at_iroot(cur, level)) {
1914 *blkp = xfs_btree_get_iroot(cur);
1919 * If the old buffer at this level for the disk address we are
1920 * looking for re-use it.
1922 * Otherwise throw it away and get a new one.
1924 bp = cur->bc_levels[level].bp;
1925 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
1928 if (bp && xfs_buf_daddr(bp) == daddr) {
1929 *blkp = XFS_BUF_TO_BLOCK(bp);
1933 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1937 /* Check the inode owner since the verifiers don't. */
1938 if (xfs_btree_check_block_owner(cur, *blkp) != NULL)
1941 /* Did we get the level we were looking for? */
1942 if (be16_to_cpu((*blkp)->bb_level) != level)
1945 /* Check that internal nodes have at least one record. */
1946 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
1949 xfs_btree_setbuf(cur, level, bp);
1954 xfs_buf_mark_corrupt(bp);
1955 xfs_trans_brelse(cur->bc_tp, bp);
1956 xfs_btree_mark_sick(cur);
1957 return -EFSCORRUPTED;
1961 * Get current search key. For level 0 we don't actually have a key
1962 * structure so we make one up from the record. For all other levels
1963 * we just return the right key.
1965 STATIC union xfs_btree_key *
1966 xfs_lookup_get_search_key(
1967 struct xfs_btree_cur *cur,
1970 struct xfs_btree_block *block,
1971 union xfs_btree_key *kp)
1974 cur->bc_ops->init_key_from_rec(kp,
1975 xfs_btree_rec_addr(cur, keyno, block));
1979 return xfs_btree_key_addr(cur, keyno, block);
1983 * Initialize a pointer to the root block.
1986 xfs_btree_init_ptr_from_cur(
1987 struct xfs_btree_cur *cur,
1988 union xfs_btree_ptr *ptr)
1990 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
1992 * Inode-rooted btrees call xfs_btree_get_iroot to find the root
1993 * in xfs_btree_lookup_get_block and don't need a pointer here.
1996 } else if (cur->bc_flags & XFS_BTREE_STAGING) {
1997 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root);
1999 cur->bc_ops->init_ptr_from_cur(cur, ptr);
2004 * Lookup the record. The cursor is made to point to it, based on dir.
2005 * stat is set to 0 if can't find any such record, 1 for success.
2009 struct xfs_btree_cur *cur, /* btree cursor */
2010 xfs_lookup_t dir, /* <=, ==, or >= */
2011 int *stat) /* success/failure */
2013 struct xfs_btree_block *block; /* current btree block */
2014 int64_t diff; /* difference for the current key */
2015 int error; /* error return value */
2016 int keyno; /* current key number */
2017 int level; /* level in the btree */
2018 union xfs_btree_ptr *pp; /* ptr to btree block */
2019 union xfs_btree_ptr ptr; /* ptr to btree block */
2021 XFS_BTREE_STATS_INC(cur, lookup);
2023 /* No such thing as a zero-level tree. */
2024 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) {
2025 xfs_btree_mark_sick(cur);
2026 return -EFSCORRUPTED;
2032 /* initialise start pointer from cursor */
2033 xfs_btree_init_ptr_from_cur(cur, &ptr);
2037 * Iterate over each level in the btree, starting at the root.
2038 * For each level above the leaves, find the key we need, based
2039 * on the lookup record, then follow the corresponding block
2040 * pointer down to the next level.
2042 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
2043 /* Get the block we need to do the lookup on. */
2044 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
2050 * If we already had a key match at a higher level, we
2051 * know we need to use the first entry in this block.
2055 /* Otherwise search this block. Do a binary search. */
2057 int high; /* high entry number */
2058 int low; /* low entry number */
2060 /* Set low and high entry numbers, 1-based. */
2062 high = xfs_btree_get_numrecs(block);
2064 /* Block is empty, must be an empty leaf. */
2065 if (level != 0 || cur->bc_nlevels != 1) {
2066 XFS_CORRUPTION_ERROR(__func__,
2070 xfs_btree_mark_sick(cur);
2071 return -EFSCORRUPTED;
2074 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE;
2079 /* Binary search the block. */
2080 while (low <= high) {
2081 union xfs_btree_key key;
2082 union xfs_btree_key *kp;
2084 XFS_BTREE_STATS_INC(cur, compare);
2086 /* keyno is average of low and high. */
2087 keyno = (low + high) >> 1;
2089 /* Get current search key */
2090 kp = xfs_lookup_get_search_key(cur, level,
2091 keyno, block, &key);
2094 * Compute difference to get next direction:
2095 * - less than, move right
2096 * - greater than, move left
2097 * - equal, we're done
2099 diff = cur->bc_ops->key_diff(cur, kp);
2110 * If there are more levels, set up for the next level
2111 * by getting the block number and filling in the cursor.
2115 * If we moved left, need the previous key number,
2116 * unless there isn't one.
2118 if (diff > 0 && --keyno < 1)
2120 pp = xfs_btree_ptr_addr(cur, keyno, block);
2122 error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
2126 cur->bc_levels[level].ptr = keyno;
2130 /* Done with the search. See if we need to adjust the results. */
2131 if (dir != XFS_LOOKUP_LE && diff < 0) {
2134 * If ge search and we went off the end of the block, but it's
2135 * not the last block, we're in the wrong block.
2137 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
2138 if (dir == XFS_LOOKUP_GE &&
2139 keyno > xfs_btree_get_numrecs(block) &&
2140 !xfs_btree_ptr_is_null(cur, &ptr)) {
2143 cur->bc_levels[0].ptr = keyno;
2144 error = xfs_btree_increment(cur, 0, &i);
2147 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
2148 xfs_btree_mark_sick(cur);
2149 return -EFSCORRUPTED;
2154 } else if (dir == XFS_LOOKUP_LE && diff > 0)
2156 cur->bc_levels[0].ptr = keyno;
2158 /* Return if we succeeded or not. */
2159 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
2161 else if (dir != XFS_LOOKUP_EQ || diff == 0)
2171 /* Find the high key storage area from a regular key. */
2172 union xfs_btree_key *
2173 xfs_btree_high_key_from_key(
2174 struct xfs_btree_cur *cur,
2175 union xfs_btree_key *key)
2177 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
2178 return (union xfs_btree_key *)((char *)key +
2179 (cur->bc_ops->key_len / 2));
2182 /* Determine the low (and high if overlapped) keys of a leaf block */
2184 xfs_btree_get_leaf_keys(
2185 struct xfs_btree_cur *cur,
2186 struct xfs_btree_block *block,
2187 union xfs_btree_key *key)
2189 union xfs_btree_key max_hkey;
2190 union xfs_btree_key hkey;
2191 union xfs_btree_rec *rec;
2192 union xfs_btree_key *high;
2195 rec = xfs_btree_rec_addr(cur, 1, block);
2196 cur->bc_ops->init_key_from_rec(key, rec);
2198 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2200 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
2201 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2202 rec = xfs_btree_rec_addr(cur, n, block);
2203 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
2204 if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey))
2208 high = xfs_btree_high_key_from_key(cur, key);
2209 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2213 /* Determine the low (and high if overlapped) keys of a node block */
2215 xfs_btree_get_node_keys(
2216 struct xfs_btree_cur *cur,
2217 struct xfs_btree_block *block,
2218 union xfs_btree_key *key)
2220 union xfs_btree_key *hkey;
2221 union xfs_btree_key *max_hkey;
2222 union xfs_btree_key *high;
2225 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2226 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2227 cur->bc_ops->key_len / 2);
2229 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2230 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2231 hkey = xfs_btree_high_key_addr(cur, n, block);
2232 if (xfs_btree_keycmp_gt(cur, hkey, max_hkey))
2236 high = xfs_btree_high_key_from_key(cur, key);
2237 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2239 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2240 cur->bc_ops->key_len);
2244 /* Derive the keys for any btree block. */
2247 struct xfs_btree_cur *cur,
2248 struct xfs_btree_block *block,
2249 union xfs_btree_key *key)
2251 if (be16_to_cpu(block->bb_level) == 0)
2252 xfs_btree_get_leaf_keys(cur, block, key);
2254 xfs_btree_get_node_keys(cur, block, key);
2258 * Decide if we need to update the parent keys of a btree block. For
2259 * a standard btree this is only necessary if we're updating the first
2260 * record/key. For an overlapping btree, we must always update the
2261 * keys because the highest key can be in any of the records or keys
2265 xfs_btree_needs_key_update(
2266 struct xfs_btree_cur *cur,
2269 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1;
2273 * Update the low and high parent keys of the given level, progressing
2274 * towards the root. If force_all is false, stop if the keys for a given
2275 * level do not need updating.
2278 __xfs_btree_updkeys(
2279 struct xfs_btree_cur *cur,
2281 struct xfs_btree_block *block,
2282 struct xfs_buf *bp0,
2285 union xfs_btree_key key; /* keys from current level */
2286 union xfs_btree_key *lkey; /* keys from the next level up */
2287 union xfs_btree_key *hkey;
2288 union xfs_btree_key *nlkey; /* keys from the next level up */
2289 union xfs_btree_key *nhkey;
2293 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
2295 /* Exit if there aren't any parent levels to update. */
2296 if (level + 1 >= cur->bc_nlevels)
2299 trace_xfs_btree_updkeys(cur, level, bp0);
2302 hkey = xfs_btree_high_key_from_key(cur, lkey);
2303 xfs_btree_get_keys(cur, block, lkey);
2304 for (level++; level < cur->bc_nlevels; level++) {
2308 block = xfs_btree_get_block(cur, level, &bp);
2309 trace_xfs_btree_updkeys(cur, level, bp);
2311 error = xfs_btree_check_block(cur, block, level, bp);
2315 ptr = cur->bc_levels[level].ptr;
2316 nlkey = xfs_btree_key_addr(cur, ptr, block);
2317 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2319 xfs_btree_keycmp_eq(cur, nlkey, lkey) &&
2320 xfs_btree_keycmp_eq(cur, nhkey, hkey))
2322 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2323 xfs_btree_log_keys(cur, bp, ptr, ptr);
2324 if (level + 1 >= cur->bc_nlevels)
2326 xfs_btree_get_node_keys(cur, block, lkey);
2332 /* Update all the keys from some level in cursor back to the root. */
2334 xfs_btree_updkeys_force(
2335 struct xfs_btree_cur *cur,
2339 struct xfs_btree_block *block;
2341 block = xfs_btree_get_block(cur, level, &bp);
2342 return __xfs_btree_updkeys(cur, level, block, bp, true);
2346 * Update the parent keys of the given level, progressing towards the root.
2349 xfs_btree_update_keys(
2350 struct xfs_btree_cur *cur,
2353 struct xfs_btree_block *block;
2355 union xfs_btree_key *kp;
2356 union xfs_btree_key key;
2361 block = xfs_btree_get_block(cur, level, &bp);
2362 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)
2363 return __xfs_btree_updkeys(cur, level, block, bp, false);
2366 * Go up the tree from this level toward the root.
2367 * At each level, update the key value to the value input.
2368 * Stop when we reach a level where the cursor isn't pointing
2369 * at the first entry in the block.
2371 xfs_btree_get_keys(cur, block, &key);
2372 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2376 block = xfs_btree_get_block(cur, level, &bp);
2378 error = xfs_btree_check_block(cur, block, level, bp);
2382 ptr = cur->bc_levels[level].ptr;
2383 kp = xfs_btree_key_addr(cur, ptr, block);
2384 xfs_btree_copy_keys(cur, kp, &key, 1);
2385 xfs_btree_log_keys(cur, bp, ptr, ptr);
2392 * Update the record referred to by cur to the value in the
2393 * given record. This either works (return 0) or gets an
2394 * EFSCORRUPTED error.
2398 struct xfs_btree_cur *cur,
2399 union xfs_btree_rec *rec)
2401 struct xfs_btree_block *block;
2405 union xfs_btree_rec *rp;
2407 /* Pick up the current block. */
2408 block = xfs_btree_get_block(cur, 0, &bp);
2411 error = xfs_btree_check_block(cur, block, 0, bp);
2415 /* Get the address of the rec to be updated. */
2416 ptr = cur->bc_levels[0].ptr;
2417 rp = xfs_btree_rec_addr(cur, ptr, block);
2419 /* Fill in the new contents and log them. */
2420 xfs_btree_copy_recs(cur, rp, rec, 1);
2421 xfs_btree_log_recs(cur, bp, ptr, ptr);
2424 * If we are tracking the last record in the tree and
2425 * we are at the far right edge of the tree, update it.
2427 if (xfs_btree_is_lastrec(cur, block, 0)) {
2428 cur->bc_ops->update_lastrec(cur, block, rec,
2429 ptr, LASTREC_UPDATE);
2432 /* Pass new key value up to our parent. */
2433 if (xfs_btree_needs_key_update(cur, ptr)) {
2434 error = xfs_btree_update_keys(cur, 0);
2446 * Move 1 record left from cur/level if possible.
2447 * Update cur to reflect the new path.
2449 STATIC int /* error */
2451 struct xfs_btree_cur *cur,
2453 int *stat) /* success/failure */
2455 struct xfs_buf *lbp; /* left buffer pointer */
2456 struct xfs_btree_block *left; /* left btree block */
2457 int lrecs; /* left record count */
2458 struct xfs_buf *rbp; /* right buffer pointer */
2459 struct xfs_btree_block *right; /* right btree block */
2460 struct xfs_btree_cur *tcur; /* temporary btree cursor */
2461 int rrecs; /* right record count */
2462 union xfs_btree_ptr lptr; /* left btree pointer */
2463 union xfs_btree_key *rkp = NULL; /* right btree key */
2464 union xfs_btree_ptr *rpp = NULL; /* right address pointer */
2465 union xfs_btree_rec *rrp = NULL; /* right record pointer */
2466 int error; /* error return value */
2469 if (xfs_btree_at_iroot(cur, level))
2472 /* Set up variables for this block as "right". */
2473 right = xfs_btree_get_block(cur, level, &rbp);
2476 error = xfs_btree_check_block(cur, right, level, rbp);
2481 /* If we've got no left sibling then we can't shift an entry left. */
2482 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2483 if (xfs_btree_ptr_is_null(cur, &lptr))
2487 * If the cursor entry is the one that would be moved, don't
2488 * do it... it's too complicated.
2490 if (cur->bc_levels[level].ptr <= 1)
2493 /* Set up the left neighbor as "left". */
2494 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2498 /* If it's full, it can't take another entry. */
2499 lrecs = xfs_btree_get_numrecs(left);
2500 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2503 rrecs = xfs_btree_get_numrecs(right);
2506 * We add one entry to the left side and remove one for the right side.
2507 * Account for it here, the changes will be updated on disk and logged
2513 XFS_BTREE_STATS_INC(cur, lshift);
2514 XFS_BTREE_STATS_ADD(cur, moves, 1);
2517 * If non-leaf, copy a key and a ptr to the left block.
2518 * Log the changes to the left block.
2521 /* It's a non-leaf. Move keys and pointers. */
2522 union xfs_btree_key *lkp; /* left btree key */
2523 union xfs_btree_ptr *lpp; /* left address pointer */
2525 lkp = xfs_btree_key_addr(cur, lrecs, left);
2526 rkp = xfs_btree_key_addr(cur, 1, right);
2528 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2529 rpp = xfs_btree_ptr_addr(cur, 1, right);
2531 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
2535 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2536 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2538 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2539 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2541 ASSERT(cur->bc_ops->keys_inorder(cur,
2542 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2544 /* It's a leaf. Move records. */
2545 union xfs_btree_rec *lrp; /* left record pointer */
2547 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2548 rrp = xfs_btree_rec_addr(cur, 1, right);
2550 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2551 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2553 ASSERT(cur->bc_ops->recs_inorder(cur,
2554 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2557 xfs_btree_set_numrecs(left, lrecs);
2558 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2560 xfs_btree_set_numrecs(right, rrecs);
2561 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2564 * Slide the contents of right down one entry.
2566 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2568 /* It's a nonleaf. operate on keys and ptrs */
2569 for (i = 0; i < rrecs; i++) {
2570 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
2575 xfs_btree_shift_keys(cur,
2576 xfs_btree_key_addr(cur, 2, right),
2578 xfs_btree_shift_ptrs(cur,
2579 xfs_btree_ptr_addr(cur, 2, right),
2582 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2583 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2585 /* It's a leaf. operate on records */
2586 xfs_btree_shift_recs(cur,
2587 xfs_btree_rec_addr(cur, 2, right),
2589 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2593 * Using a temporary cursor, update the parent key values of the
2594 * block on the left.
2596 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2597 error = xfs_btree_dup_cursor(cur, &tcur);
2600 i = xfs_btree_firstrec(tcur, level);
2601 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2602 xfs_btree_mark_sick(cur);
2603 error = -EFSCORRUPTED;
2607 error = xfs_btree_decrement(tcur, level, &i);
2611 /* Update the parent high keys of the left block, if needed. */
2612 error = xfs_btree_update_keys(tcur, level);
2616 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2619 /* Update the parent keys of the right block. */
2620 error = xfs_btree_update_keys(cur, level);
2624 /* Slide the cursor value left one. */
2625 cur->bc_levels[level].ptr--;
2638 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2643 * Move 1 record right from cur/level if possible.
2644 * Update cur to reflect the new path.
2646 STATIC int /* error */
2648 struct xfs_btree_cur *cur,
2650 int *stat) /* success/failure */
2652 struct xfs_buf *lbp; /* left buffer pointer */
2653 struct xfs_btree_block *left; /* left btree block */
2654 struct xfs_buf *rbp; /* right buffer pointer */
2655 struct xfs_btree_block *right; /* right btree block */
2656 struct xfs_btree_cur *tcur; /* temporary btree cursor */
2657 union xfs_btree_ptr rptr; /* right block pointer */
2658 union xfs_btree_key *rkp; /* right btree key */
2659 int rrecs; /* right record count */
2660 int lrecs; /* left record count */
2661 int error; /* error return value */
2662 int i; /* loop counter */
2664 if (xfs_btree_at_iroot(cur, level))
2667 /* Set up variables for this block as "left". */
2668 left = xfs_btree_get_block(cur, level, &lbp);
2671 error = xfs_btree_check_block(cur, left, level, lbp);
2676 /* If we've got no right sibling then we can't shift an entry right. */
2677 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2678 if (xfs_btree_ptr_is_null(cur, &rptr))
2682 * If the cursor entry is the one that would be moved, don't
2683 * do it... it's too complicated.
2685 lrecs = xfs_btree_get_numrecs(left);
2686 if (cur->bc_levels[level].ptr >= lrecs)
2689 /* Set up the right neighbor as "right". */
2690 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2694 /* If it's full, it can't take another entry. */
2695 rrecs = xfs_btree_get_numrecs(right);
2696 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2699 XFS_BTREE_STATS_INC(cur, rshift);
2700 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2703 * Make a hole at the start of the right neighbor block, then
2704 * copy the last left block entry to the hole.
2707 /* It's a nonleaf. make a hole in the keys and ptrs */
2708 union xfs_btree_key *lkp;
2709 union xfs_btree_ptr *lpp;
2710 union xfs_btree_ptr *rpp;
2712 lkp = xfs_btree_key_addr(cur, lrecs, left);
2713 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2714 rkp = xfs_btree_key_addr(cur, 1, right);
2715 rpp = xfs_btree_ptr_addr(cur, 1, right);
2717 for (i = rrecs - 1; i >= 0; i--) {
2718 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
2723 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2724 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2726 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
2730 /* Now put the new data in, and log it. */
2731 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2732 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2734 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2735 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2737 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2738 xfs_btree_key_addr(cur, 2, right)));
2740 /* It's a leaf. make a hole in the records */
2741 union xfs_btree_rec *lrp;
2742 union xfs_btree_rec *rrp;
2744 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2745 rrp = xfs_btree_rec_addr(cur, 1, right);
2747 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2749 /* Now put the new data in, and log it. */
2750 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2751 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2755 * Decrement and log left's numrecs, bump and log right's numrecs.
2757 xfs_btree_set_numrecs(left, --lrecs);
2758 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2760 xfs_btree_set_numrecs(right, ++rrecs);
2761 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2764 * Using a temporary cursor, update the parent key values of the
2765 * block on the right.
2767 error = xfs_btree_dup_cursor(cur, &tcur);
2770 i = xfs_btree_lastrec(tcur, level);
2771 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2772 xfs_btree_mark_sick(cur);
2773 error = -EFSCORRUPTED;
2777 error = xfs_btree_increment(tcur, level, &i);
2781 /* Update the parent high keys of the left block, if needed. */
2782 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2783 error = xfs_btree_update_keys(cur, level);
2788 /* Update the parent keys of the right block. */
2789 error = xfs_btree_update_keys(tcur, level);
2793 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2806 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2811 xfs_btree_alloc_block(
2812 struct xfs_btree_cur *cur,
2813 const union xfs_btree_ptr *hint_block,
2814 union xfs_btree_ptr *new_block,
2820 * Don't allow block allocation for a staging cursor, because staging
2821 * cursors do not support regular btree modifications.
2823 * Bulk loading uses a separate callback to obtain new blocks from a
2824 * preallocated list, which prevents ENOSPC failures during loading.
2826 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
2828 return -EFSCORRUPTED;
2831 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
2832 trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
2837 * Split cur/level block in half.
2838 * Return new block number and the key to its first
2839 * record (to be inserted into parent).
2841 STATIC int /* error */
2843 struct xfs_btree_cur *cur,
2845 union xfs_btree_ptr *ptrp,
2846 union xfs_btree_key *key,
2847 struct xfs_btree_cur **curp,
2848 int *stat) /* success/failure */
2850 union xfs_btree_ptr lptr; /* left sibling block ptr */
2851 struct xfs_buf *lbp; /* left buffer pointer */
2852 struct xfs_btree_block *left; /* left btree block */
2853 union xfs_btree_ptr rptr; /* right sibling block ptr */
2854 struct xfs_buf *rbp; /* right buffer pointer */
2855 struct xfs_btree_block *right; /* right btree block */
2856 union xfs_btree_ptr rrptr; /* right-right sibling ptr */
2857 struct xfs_buf *rrbp; /* right-right buffer pointer */
2858 struct xfs_btree_block *rrblock; /* right-right btree block */
2862 int error; /* error return value */
2865 XFS_BTREE_STATS_INC(cur, split);
2867 /* Set up left block (current one). */
2868 left = xfs_btree_get_block(cur, level, &lbp);
2871 error = xfs_btree_check_block(cur, left, level, lbp);
2876 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2878 /* Allocate the new block. If we can't do it, we're toast. Give up. */
2879 error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat);
2884 XFS_BTREE_STATS_INC(cur, alloc);
2886 /* Set up the new block as "right". */
2887 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp);
2891 /* Fill in the btree header for the new right block. */
2892 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2895 * Split the entries between the old and the new block evenly.
2896 * Make sure that if there's an odd number of entries now, that
2897 * each new block will have the same number of entries.
2899 lrecs = xfs_btree_get_numrecs(left);
2901 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1)
2903 src_index = (lrecs - rrecs + 1);
2905 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2907 /* Adjust numrecs for the later get_*_keys() calls. */
2909 xfs_btree_set_numrecs(left, lrecs);
2910 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2913 * Copy btree block entries from the left block over to the
2914 * new block, the right. Update the right block and log the
2918 /* It's a non-leaf. Move keys and pointers. */
2919 union xfs_btree_key *lkp; /* left btree key */
2920 union xfs_btree_ptr *lpp; /* left address pointer */
2921 union xfs_btree_key *rkp; /* right btree key */
2922 union xfs_btree_ptr *rpp; /* right address pointer */
2924 lkp = xfs_btree_key_addr(cur, src_index, left);
2925 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2926 rkp = xfs_btree_key_addr(cur, 1, right);
2927 rpp = xfs_btree_ptr_addr(cur, 1, right);
2929 for (i = src_index; i < rrecs; i++) {
2930 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
2935 /* Copy the keys & pointers to the new block. */
2936 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2937 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2939 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2940 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2942 /* Stash the keys of the new block for later insertion. */
2943 xfs_btree_get_node_keys(cur, right, key);
2945 /* It's a leaf. Move records. */
2946 union xfs_btree_rec *lrp; /* left record pointer */
2947 union xfs_btree_rec *rrp; /* right record pointer */
2949 lrp = xfs_btree_rec_addr(cur, src_index, left);
2950 rrp = xfs_btree_rec_addr(cur, 1, right);
2952 /* Copy records to the new block. */
2953 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2954 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2956 /* Stash the keys of the new block for later insertion. */
2957 xfs_btree_get_leaf_keys(cur, right, key);
2961 * Find the left block number by looking in the buffer.
2962 * Adjust sibling pointers.
2964 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2965 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2966 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2967 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2969 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2970 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2973 * If there's a block to the new block's right, make that block
2974 * point back to right instead of to left.
2976 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2977 error = xfs_btree_read_buf_block(cur, &rrptr,
2978 0, &rrblock, &rrbp);
2981 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2982 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2985 /* Update the parent high keys of the left block, if needed. */
2986 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2987 error = xfs_btree_update_keys(cur, level);
2993 * If the cursor is really in the right block, move it there.
2994 * If it's just pointing past the last entry in left, then we'll
2995 * insert there, so don't change anything in that case.
2997 if (cur->bc_levels[level].ptr > lrecs + 1) {
2998 xfs_btree_setbuf(cur, level, rbp);
2999 cur->bc_levels[level].ptr -= lrecs;
3002 * If there are more levels, we'll need another cursor which refers
3003 * the right block, no matter where this cursor was.
3005 if (level + 1 < cur->bc_nlevels) {
3006 error = xfs_btree_dup_cursor(cur, curp);
3009 (*curp)->bc_levels[level + 1].ptr++;
3023 struct xfs_btree_split_args {
3024 struct xfs_btree_cur *cur;
3026 union xfs_btree_ptr *ptrp;
3027 union xfs_btree_key *key;
3028 struct xfs_btree_cur **curp;
3029 int *stat; /* success/failure */
3031 bool kswapd; /* allocation in kswapd context */
3032 struct completion *done;
3033 struct work_struct work;
3037 * Stack switching interfaces for allocation
3040 xfs_btree_split_worker(
3041 struct work_struct *work)
3043 struct xfs_btree_split_args *args = container_of(work,
3044 struct xfs_btree_split_args, work);
3045 unsigned long pflags;
3046 unsigned long new_pflags = 0;
3049 * we are in a transaction context here, but may also be doing work
3050 * in kswapd context, and hence we may need to inherit that state
3051 * temporarily to ensure that we don't block waiting for memory reclaim
3055 new_pflags |= PF_MEMALLOC | PF_KSWAPD;
3057 current_set_flags_nested(&pflags, new_pflags);
3058 xfs_trans_set_context(args->cur->bc_tp);
3060 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
3061 args->key, args->curp, args->stat);
3063 xfs_trans_clear_context(args->cur->bc_tp);
3064 current_restore_flags_nested(&pflags, new_pflags);
3067 * Do not access args after complete() has run here. We don't own args
3068 * and the owner may run and free args before we return here.
3070 complete(args->done);
3075 * BMBT split requests often come in with little stack to work on so we push
3076 * them off to a worker thread so there is lots of stack to use. For the other
3077 * btree types, just call directly to avoid the context switch overhead here.
3079 * Care must be taken here - the work queue rescuer thread introduces potential
3080 * AGF <> worker queue deadlocks if the BMBT block allocation has to lock new
3081 * AGFs to allocate blocks. A task being run by the rescuer could attempt to
3082 * lock an AGF that is already locked by a task queued to run by the rescuer,
3083 * resulting in an ABBA deadlock as the rescuer cannot run the lock holder to
3084 * release it until the current thread it is running gains the lock.
3086 * To avoid this issue, we only ever queue BMBT splits that don't have an AGF
3087 * already locked to allocate from. The only place that doesn't hold an AGF
3088 * locked is unwritten extent conversion at IO completion, but that has already
3089 * been offloaded to a worker thread and hence has no stack consumption issues
3090 * we have to worry about.
3092 STATIC int /* error */
3094 struct xfs_btree_cur *cur,
3096 union xfs_btree_ptr *ptrp,
3097 union xfs_btree_key *key,
3098 struct xfs_btree_cur **curp,
3099 int *stat) /* success/failure */
3101 struct xfs_btree_split_args args;
3102 DECLARE_COMPLETION_ONSTACK(done);
3104 if (!xfs_btree_is_bmap(cur->bc_ops) ||
3105 cur->bc_tp->t_highest_agno == NULLAGNUMBER)
3106 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
3115 args.kswapd = current_is_kswapd();
3116 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
3117 queue_work(xfs_alloc_wq, &args.work);
3118 wait_for_completion(&done);
3119 destroy_work_on_stack(&args.work);
3123 #define xfs_btree_split __xfs_btree_split
3124 #endif /* __KERNEL__ */
3127 * Copy the old inode root contents into a real block and make the
3128 * broot point to it.
3131 xfs_btree_new_iroot(
3132 struct xfs_btree_cur *cur, /* btree cursor */
3133 int *logflags, /* logging flags for inode */
3134 int *stat) /* return status - 0 fail */
3136 struct xfs_buf *cbp; /* buffer for cblock */
3137 struct xfs_btree_block *block; /* btree block */
3138 struct xfs_btree_block *cblock; /* child btree block */
3139 union xfs_btree_key *ckp; /* child key pointer */
3140 union xfs_btree_ptr *cpp; /* child ptr pointer */
3141 union xfs_btree_key *kp; /* pointer to btree key */
3142 union xfs_btree_ptr *pp; /* pointer to block addr */
3143 union xfs_btree_ptr nptr; /* new block addr */
3144 int level; /* btree level */
3145 int error; /* error return code */
3146 int i; /* loop counter */
3148 XFS_BTREE_STATS_INC(cur, newroot);
3150 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
3152 level = cur->bc_nlevels - 1;
3154 block = xfs_btree_get_iroot(cur);
3155 pp = xfs_btree_ptr_addr(cur, 1, block);
3157 /* Allocate the new block. If we can't do it, we're toast. Give up. */
3158 error = xfs_btree_alloc_block(cur, pp, &nptr, stat);
3164 XFS_BTREE_STATS_INC(cur, alloc);
3166 /* Copy the root into a real block. */
3167 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp);
3172 * we can't just memcpy() the root in for CRC enabled btree blocks.
3173 * In that case have to also ensure the blkno remains correct
3175 memcpy(cblock, block, xfs_btree_block_len(cur));
3176 if (xfs_has_crc(cur->bc_mp)) {
3177 __be64 bno = cpu_to_be64(xfs_buf_daddr(cbp));
3178 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
3179 cblock->bb_u.l.bb_blkno = bno;
3181 cblock->bb_u.s.bb_blkno = bno;
3184 be16_add_cpu(&block->bb_level, 1);
3185 xfs_btree_set_numrecs(block, 1);
3187 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
3188 cur->bc_levels[level + 1].ptr = 1;
3190 kp = xfs_btree_key_addr(cur, 1, block);
3191 ckp = xfs_btree_key_addr(cur, 1, cblock);
3192 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
3194 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3195 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
3196 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3201 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
3203 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
3207 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
3209 xfs_iroot_realloc(cur->bc_ino.ip,
3210 1 - xfs_btree_get_numrecs(cblock),
3211 cur->bc_ino.whichfork);
3213 xfs_btree_setbuf(cur, level, cbp);
3216 * Do all this logging at the end so that
3217 * the root is at the right level.
3219 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
3220 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3221 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3224 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
3233 struct xfs_btree_cur *cur,
3234 const union xfs_btree_ptr *ptr,
3237 if (cur->bc_flags & XFS_BTREE_STAGING) {
3238 /* Update the btree root information for a per-AG fake root. */
3239 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s);
3240 cur->bc_ag.afake->af_levels += inc;
3242 cur->bc_ops->set_root(cur, ptr, inc);
3247 * Allocate a new root block, fill it in.
3249 STATIC int /* error */
3251 struct xfs_btree_cur *cur, /* btree cursor */
3252 int *stat) /* success/failure */
3254 struct xfs_btree_block *block; /* one half of the old root block */
3255 struct xfs_buf *bp; /* buffer containing block */
3256 int error; /* error return value */
3257 struct xfs_buf *lbp; /* left buffer pointer */
3258 struct xfs_btree_block *left; /* left btree block */
3259 struct xfs_buf *nbp; /* new (root) buffer */
3260 struct xfs_btree_block *new; /* new (root) btree block */
3261 int nptr; /* new value for key index, 1 or 2 */
3262 struct xfs_buf *rbp; /* right buffer pointer */
3263 struct xfs_btree_block *right; /* right btree block */
3264 union xfs_btree_ptr rptr;
3265 union xfs_btree_ptr lptr;
3267 XFS_BTREE_STATS_INC(cur, newroot);
3269 /* initialise our start point from the cursor */
3270 xfs_btree_init_ptr_from_cur(cur, &rptr);
3272 /* Allocate the new block. If we can't do it, we're toast. Give up. */
3273 error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat);
3278 XFS_BTREE_STATS_INC(cur, alloc);
3280 /* Set up the new block. */
3281 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp);
3285 /* Set the root in the holding structure increasing the level by 1. */
3286 xfs_btree_set_root(cur, &lptr, 1);
3289 * At the previous root level there are now two blocks: the old root,
3290 * and the new block generated when it was split. We don't know which
3291 * one the cursor is pointing at, so we set up variables "left" and
3292 * "right" for each case.
3294 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3297 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3302 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3303 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3304 /* Our block is left, pick up the right block. */
3306 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3308 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3314 /* Our block is right, pick up the left block. */
3316 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3318 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3319 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3326 /* Fill in the new block's btree header and log it. */
3327 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3328 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3329 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3330 !xfs_btree_ptr_is_null(cur, &rptr));
3332 /* Fill in the key data in the new root. */
3333 if (xfs_btree_get_level(left) > 0) {
3335 * Get the keys for the left block's keys and put them directly
3336 * in the parent block. Do the same for the right block.
3338 xfs_btree_get_node_keys(cur, left,
3339 xfs_btree_key_addr(cur, 1, new));
3340 xfs_btree_get_node_keys(cur, right,
3341 xfs_btree_key_addr(cur, 2, new));
3344 * Get the keys for the left block's records and put them
3345 * directly in the parent block. Do the same for the right
3348 xfs_btree_get_leaf_keys(cur, left,
3349 xfs_btree_key_addr(cur, 1, new));
3350 xfs_btree_get_leaf_keys(cur, right,
3351 xfs_btree_key_addr(cur, 2, new));
3353 xfs_btree_log_keys(cur, nbp, 1, 2);
3355 /* Fill in the pointer data in the new root. */
3356 xfs_btree_copy_ptrs(cur,
3357 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3358 xfs_btree_copy_ptrs(cur,
3359 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3360 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3362 /* Fix up the cursor. */
3363 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3364 cur->bc_levels[cur->bc_nlevels].ptr = nptr;
3366 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
3377 xfs_btree_make_block_unfull(
3378 struct xfs_btree_cur *cur, /* btree cursor */
3379 int level, /* btree level */
3380 int numrecs,/* # of recs in block */
3381 int *oindex,/* old tree index */
3382 int *index, /* new tree index */
3383 union xfs_btree_ptr *nptr, /* new btree ptr */
3384 struct xfs_btree_cur **ncur, /* new btree cursor */
3385 union xfs_btree_key *key, /* key of new block */
3390 if (xfs_btree_at_iroot(cur, level)) {
3391 struct xfs_inode *ip = cur->bc_ino.ip;
3393 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3394 /* A root block that can be made bigger. */
3395 xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
3398 /* A root block that needs replacing */
3401 error = xfs_btree_new_iroot(cur, &logflags, stat);
3402 if (error || *stat == 0)
3405 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3411 /* First, try shifting an entry to the right neighbor. */
3412 error = xfs_btree_rshift(cur, level, stat);
3416 /* Next, try shifting an entry to the left neighbor. */
3417 error = xfs_btree_lshift(cur, level, stat);
3422 *oindex = *index = cur->bc_levels[level].ptr;
3427 * Next, try splitting the current block in half.
3429 * If this works we have to re-set our variables because we
3430 * could be in a different block now.
3432 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3433 if (error || *stat == 0)
3437 *index = cur->bc_levels[level].ptr;
3442 * Insert one record/level. Return information to the caller
3443 * allowing the next level up to proceed if necessary.
3447 struct xfs_btree_cur *cur, /* btree cursor */
3448 int level, /* level to insert record at */
3449 union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
3450 union xfs_btree_rec *rec, /* record to insert */
3451 union xfs_btree_key *key, /* i/o: block key for ptrp */
3452 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
3453 int *stat) /* success/failure */
3455 struct xfs_btree_block *block; /* btree block */
3456 struct xfs_buf *bp; /* buffer for block */
3457 union xfs_btree_ptr nptr; /* new block ptr */
3458 struct xfs_btree_cur *ncur = NULL; /* new btree cursor */
3459 union xfs_btree_key nkey; /* new block key */
3460 union xfs_btree_key *lkey;
3461 int optr; /* old key/record index */
3462 int ptr; /* key/record index */
3463 int numrecs;/* number of records */
3464 int error; /* error return value */
3472 * If we have an external root pointer, and we've made it to the
3473 * root level, allocate a new root block and we're done.
3475 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE &&
3476 level >= cur->bc_nlevels) {
3477 error = xfs_btree_new_root(cur, stat);
3478 xfs_btree_set_ptr_null(cur, ptrp);
3483 /* If we're off the left edge, return failure. */
3484 ptr = cur->bc_levels[level].ptr;
3492 XFS_BTREE_STATS_INC(cur, insrec);
3494 /* Get pointers to the btree buffer and block. */
3495 block = xfs_btree_get_block(cur, level, &bp);
3496 old_bn = bp ? xfs_buf_daddr(bp) : XFS_BUF_DADDR_NULL;
3497 numrecs = xfs_btree_get_numrecs(block);
3500 error = xfs_btree_check_block(cur, block, level, bp);
3504 /* Check that the new entry is being inserted in the right place. */
3505 if (ptr <= numrecs) {
3507 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3508 xfs_btree_rec_addr(cur, ptr, block)));
3510 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3511 xfs_btree_key_addr(cur, ptr, block)));
3517 * If the block is full, we can't insert the new entry until we
3518 * make the block un-full.
3520 xfs_btree_set_ptr_null(cur, &nptr);
3521 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3522 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3523 &optr, &ptr, &nptr, &ncur, lkey, stat);
3524 if (error || *stat == 0)
3529 * The current block may have changed if the block was
3530 * previously full and we have just made space in it.
3532 block = xfs_btree_get_block(cur, level, &bp);
3533 numrecs = xfs_btree_get_numrecs(block);
3536 error = xfs_btree_check_block(cur, block, level, bp);
3542 * At this point we know there's room for our new entry in the block
3543 * we're pointing at.
3545 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3548 /* It's a nonleaf. make a hole in the keys and ptrs */
3549 union xfs_btree_key *kp;
3550 union xfs_btree_ptr *pp;
3552 kp = xfs_btree_key_addr(cur, ptr, block);
3553 pp = xfs_btree_ptr_addr(cur, ptr, block);
3555 for (i = numrecs - ptr; i >= 0; i--) {
3556 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3561 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3562 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3564 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
3568 /* Now put the new data in, bump numrecs and log it. */
3569 xfs_btree_copy_keys(cur, kp, key, 1);
3570 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3572 xfs_btree_set_numrecs(block, numrecs);
3573 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3574 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3576 if (ptr < numrecs) {
3577 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3578 xfs_btree_key_addr(cur, ptr + 1, block)));
3582 /* It's a leaf. make a hole in the records */
3583 union xfs_btree_rec *rp;
3585 rp = xfs_btree_rec_addr(cur, ptr, block);
3587 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3589 /* Now put the new data in, bump numrecs and log it. */
3590 xfs_btree_copy_recs(cur, rp, rec, 1);
3591 xfs_btree_set_numrecs(block, ++numrecs);
3592 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3594 if (ptr < numrecs) {
3595 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3596 xfs_btree_rec_addr(cur, ptr + 1, block)));
3601 /* Log the new number of records in the btree header. */
3602 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3605 * If we just inserted into a new tree block, we have to
3606 * recalculate nkey here because nkey is out of date.
3608 * Otherwise we're just updating an existing block (having shoved
3609 * some records into the new tree block), so use the regular key
3612 if (bp && xfs_buf_daddr(bp) != old_bn) {
3613 xfs_btree_get_keys(cur, block, lkey);
3614 } else if (xfs_btree_needs_key_update(cur, optr)) {
3615 error = xfs_btree_update_keys(cur, level);
3621 * If we are tracking the last record in the tree and
3622 * we are at the far right edge of the tree, update it.
3624 if (xfs_btree_is_lastrec(cur, block, level)) {
3625 cur->bc_ops->update_lastrec(cur, block, rec,
3626 ptr, LASTREC_INSREC);
3630 * Return the new block number, if any.
3631 * If there is one, give back a record value and a cursor too.
3634 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3635 xfs_btree_copy_keys(cur, key, lkey, 1);
3644 xfs_btree_del_cursor(ncur, error);
3649 * Insert the record at the point referenced by cur.
3651 * A multi-level split of the tree on insert will invalidate the original
3652 * cursor. All callers of this function should assume that the cursor is
3653 * no longer valid and revalidate it.
3657 struct xfs_btree_cur *cur,
3660 int error; /* error return value */
3661 int i; /* result value, 0 for failure */
3662 int level; /* current level number in btree */
3663 union xfs_btree_ptr nptr; /* new block number (split result) */
3664 struct xfs_btree_cur *ncur; /* new cursor (split result) */
3665 struct xfs_btree_cur *pcur; /* previous level's cursor */
3666 union xfs_btree_key bkey; /* key of block to insert */
3667 union xfs_btree_key *key;
3668 union xfs_btree_rec rec; /* record to insert */
3675 xfs_btree_set_ptr_null(cur, &nptr);
3677 /* Make a key out of the record data to be inserted, and save it. */
3678 cur->bc_ops->init_rec_from_cur(cur, &rec);
3679 cur->bc_ops->init_key_from_rec(key, &rec);
3682 * Loop going up the tree, starting at the leaf level.
3683 * Stop when we don't get a split block, that must mean that
3684 * the insert is finished with this level.
3688 * Insert nrec/nptr into this level of the tree.
3689 * Note if we fail, nptr will be null.
3691 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3695 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3699 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3700 xfs_btree_mark_sick(cur);
3701 error = -EFSCORRUPTED;
3707 * See if the cursor we just used is trash.
3708 * Can't trash the caller's cursor, but otherwise we should
3709 * if ncur is a new cursor or we're about to be done.
3712 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3713 /* Save the state from the cursor before we trash it */
3714 if (cur->bc_ops->update_cursor &&
3715 !(cur->bc_flags & XFS_BTREE_STAGING))
3716 cur->bc_ops->update_cursor(pcur, cur);
3717 cur->bc_nlevels = pcur->bc_nlevels;
3718 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3720 /* If we got a new cursor, switch to it. */
3725 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3734 * Try to merge a non-leaf block back into the inode root.
3736 * Note: the killroot names comes from the fact that we're effectively
3737 * killing the old root block. But because we can't just delete the
3738 * inode we have to copy the single block it was pointing to into the
3742 xfs_btree_kill_iroot(
3743 struct xfs_btree_cur *cur)
3745 int whichfork = cur->bc_ino.whichfork;
3746 struct xfs_inode *ip = cur->bc_ino.ip;
3747 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
3748 struct xfs_btree_block *block;
3749 struct xfs_btree_block *cblock;
3750 union xfs_btree_key *kp;
3751 union xfs_btree_key *ckp;
3752 union xfs_btree_ptr *pp;
3753 union xfs_btree_ptr *cpp;
3754 struct xfs_buf *cbp;
3760 union xfs_btree_ptr ptr;
3764 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
3765 ASSERT(cur->bc_nlevels > 1);
3768 * Don't deal with the root block needs to be a leaf case.
3769 * We're just going to turn the thing back into extents anyway.
3771 level = cur->bc_nlevels - 1;
3776 * Give up if the root has multiple children.
3778 block = xfs_btree_get_iroot(cur);
3779 if (xfs_btree_get_numrecs(block) != 1)
3782 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3783 numrecs = xfs_btree_get_numrecs(cblock);
3786 * Only do this if the next level will fit.
3787 * Then the data must be copied up to the inode,
3788 * instead of freeing the root you free the next level.
3790 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3793 XFS_BTREE_STATS_INC(cur, killroot);
3796 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3797 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3798 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3799 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3802 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3804 xfs_iroot_realloc(cur->bc_ino.ip, index,
3805 cur->bc_ino.whichfork);
3806 block = ifp->if_broot;
3809 be16_add_cpu(&block->bb_numrecs, index);
3810 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3812 kp = xfs_btree_key_addr(cur, 1, block);
3813 ckp = xfs_btree_key_addr(cur, 1, cblock);
3814 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3816 pp = xfs_btree_ptr_addr(cur, 1, block);
3817 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3819 for (i = 0; i < numrecs; i++) {
3820 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
3825 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3827 error = xfs_btree_free_block(cur, cbp);
3831 cur->bc_levels[level - 1].bp = NULL;
3832 be16_add_cpu(&block->bb_level, -1);
3833 xfs_trans_log_inode(cur->bc_tp, ip,
3834 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
3841 * Kill the current root node, and replace it with it's only child node.
3844 xfs_btree_kill_root(
3845 struct xfs_btree_cur *cur,
3848 union xfs_btree_ptr *newroot)
3852 XFS_BTREE_STATS_INC(cur, killroot);
3855 * Update the root pointer, decreasing the level by 1 and then
3856 * free the old root.
3858 xfs_btree_set_root(cur, newroot, -1);
3860 error = xfs_btree_free_block(cur, bp);
3864 cur->bc_levels[level].bp = NULL;
3865 cur->bc_levels[level].ra = 0;
3872 xfs_btree_dec_cursor(
3873 struct xfs_btree_cur *cur,
3881 error = xfs_btree_decrement(cur, level, &i);
3891 * Single level of the btree record deletion routine.
3892 * Delete record pointed to by cur/level.
3893 * Remove the record from its block then rebalance the tree.
3894 * Return 0 for error, 1 for done, 2 to go on to the next level.
3896 STATIC int /* error */
3898 struct xfs_btree_cur *cur, /* btree cursor */
3899 int level, /* level removing record from */
3900 int *stat) /* fail/done/go-on */
3902 struct xfs_btree_block *block; /* btree block */
3903 union xfs_btree_ptr cptr; /* current block ptr */
3904 struct xfs_buf *bp; /* buffer for block */
3905 int error; /* error return value */
3906 int i; /* loop counter */
3907 union xfs_btree_ptr lptr; /* left sibling block ptr */
3908 struct xfs_buf *lbp; /* left buffer pointer */
3909 struct xfs_btree_block *left; /* left btree block */
3910 int lrecs = 0; /* left record count */
3911 int ptr; /* key/record index */
3912 union xfs_btree_ptr rptr; /* right sibling block ptr */
3913 struct xfs_buf *rbp; /* right buffer pointer */
3914 struct xfs_btree_block *right; /* right btree block */
3915 struct xfs_btree_block *rrblock; /* right-right btree block */
3916 struct xfs_buf *rrbp; /* right-right buffer pointer */
3917 int rrecs = 0; /* right record count */
3918 struct xfs_btree_cur *tcur; /* temporary btree cursor */
3919 int numrecs; /* temporary numrec count */
3923 /* Get the index of the entry being deleted, check for nothing there. */
3924 ptr = cur->bc_levels[level].ptr;
3930 /* Get the buffer & block containing the record or key/ptr. */
3931 block = xfs_btree_get_block(cur, level, &bp);
3932 numrecs = xfs_btree_get_numrecs(block);
3935 error = xfs_btree_check_block(cur, block, level, bp);
3940 /* Fail if we're off the end of the block. */
3941 if (ptr > numrecs) {
3946 XFS_BTREE_STATS_INC(cur, delrec);
3947 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3949 /* Excise the entries being deleted. */
3951 /* It's a nonleaf. operate on keys and ptrs */
3952 union xfs_btree_key *lkp;
3953 union xfs_btree_ptr *lpp;
3955 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3956 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3958 for (i = 0; i < numrecs - ptr; i++) {
3959 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
3964 if (ptr < numrecs) {
3965 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3966 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3967 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3968 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3971 /* It's a leaf. operate on records */
3972 if (ptr < numrecs) {
3973 xfs_btree_shift_recs(cur,
3974 xfs_btree_rec_addr(cur, ptr + 1, block),
3976 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3981 * Decrement and log the number of entries in the block.
3983 xfs_btree_set_numrecs(block, --numrecs);
3984 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3987 * If we are tracking the last record in the tree and
3988 * we are at the far right edge of the tree, update it.
3990 if (xfs_btree_is_lastrec(cur, block, level)) {
3991 cur->bc_ops->update_lastrec(cur, block, NULL,
3992 ptr, LASTREC_DELREC);
3996 * We're at the root level. First, shrink the root block in-memory.
3997 * Try to get rid of the next level down. If we can't then there's
3998 * nothing left to do.
4000 if (xfs_btree_at_iroot(cur, level)) {
4001 xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork);
4003 error = xfs_btree_kill_iroot(cur);
4007 error = xfs_btree_dec_cursor(cur, level, stat);
4015 * If this is the root level, and there's only one entry left, and it's
4016 * NOT the leaf level, then we can get rid of this level.
4018 if (level == cur->bc_nlevels - 1) {
4019 if (numrecs == 1 && level > 0) {
4020 union xfs_btree_ptr *pp;
4022 * pp is still set to the first pointer in the block.
4023 * Make it the new root of the btree.
4025 pp = xfs_btree_ptr_addr(cur, 1, block);
4026 error = xfs_btree_kill_root(cur, bp, level, pp);
4029 } else if (level > 0) {
4030 error = xfs_btree_dec_cursor(cur, level, stat);
4039 * If we deleted the leftmost entry in the block, update the
4040 * key values above us in the tree.
4042 if (xfs_btree_needs_key_update(cur, ptr)) {
4043 error = xfs_btree_update_keys(cur, level);
4049 * If the number of records remaining in the block is at least
4050 * the minimum, we're done.
4052 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
4053 error = xfs_btree_dec_cursor(cur, level, stat);
4060 * Otherwise, we have to move some records around to keep the
4061 * tree balanced. Look at the left and right sibling blocks to
4062 * see if we can re-balance by moving only one record.
4064 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4065 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
4067 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
4069 * One child of root, need to get a chance to copy its contents
4070 * into the root and delete it. Can't go up to next level,
4071 * there's nothing to delete there.
4073 if (xfs_btree_ptr_is_null(cur, &rptr) &&
4074 xfs_btree_ptr_is_null(cur, &lptr) &&
4075 level == cur->bc_nlevels - 2) {
4076 error = xfs_btree_kill_iroot(cur);
4078 error = xfs_btree_dec_cursor(cur, level, stat);
4085 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
4086 !xfs_btree_ptr_is_null(cur, &lptr));
4089 * Duplicate the cursor so our btree manipulations here won't
4090 * disrupt the next level up.
4092 error = xfs_btree_dup_cursor(cur, &tcur);
4097 * If there's a right sibling, see if it's ok to shift an entry
4100 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
4102 * Move the temp cursor to the last entry in the next block.
4103 * Actually any entry but the first would suffice.
4105 i = xfs_btree_lastrec(tcur, level);
4106 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4107 xfs_btree_mark_sick(cur);
4108 error = -EFSCORRUPTED;
4112 error = xfs_btree_increment(tcur, level, &i);
4115 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4116 xfs_btree_mark_sick(cur);
4117 error = -EFSCORRUPTED;
4121 i = xfs_btree_lastrec(tcur, level);
4122 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4123 xfs_btree_mark_sick(cur);
4124 error = -EFSCORRUPTED;
4128 /* Grab a pointer to the block. */
4129 right = xfs_btree_get_block(tcur, level, &rbp);
4131 error = xfs_btree_check_block(tcur, right, level, rbp);
4135 /* Grab the current block number, for future use. */
4136 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
4139 * If right block is full enough so that removing one entry
4140 * won't make it too empty, and left-shifting an entry out
4141 * of right to us works, we're done.
4143 if (xfs_btree_get_numrecs(right) - 1 >=
4144 cur->bc_ops->get_minrecs(tcur, level)) {
4145 error = xfs_btree_lshift(tcur, level, &i);
4149 ASSERT(xfs_btree_get_numrecs(block) >=
4150 cur->bc_ops->get_minrecs(tcur, level));
4152 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4155 error = xfs_btree_dec_cursor(cur, level, stat);
4163 * Otherwise, grab the number of records in right for
4164 * future reference, and fix up the temp cursor to point
4165 * to our block again (last record).
4167 rrecs = xfs_btree_get_numrecs(right);
4168 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
4169 i = xfs_btree_firstrec(tcur, level);
4170 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4171 xfs_btree_mark_sick(cur);
4172 error = -EFSCORRUPTED;
4176 error = xfs_btree_decrement(tcur, level, &i);
4179 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4180 xfs_btree_mark_sick(cur);
4181 error = -EFSCORRUPTED;
4188 * If there's a left sibling, see if it's ok to shift an entry
4191 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
4193 * Move the temp cursor to the first entry in the
4196 i = xfs_btree_firstrec(tcur, level);
4197 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4198 xfs_btree_mark_sick(cur);
4199 error = -EFSCORRUPTED;
4203 error = xfs_btree_decrement(tcur, level, &i);
4206 i = xfs_btree_firstrec(tcur, level);
4207 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4208 xfs_btree_mark_sick(cur);
4209 error = -EFSCORRUPTED;
4213 /* Grab a pointer to the block. */
4214 left = xfs_btree_get_block(tcur, level, &lbp);
4216 error = xfs_btree_check_block(cur, left, level, lbp);
4220 /* Grab the current block number, for future use. */
4221 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
4224 * If left block is full enough so that removing one entry
4225 * won't make it too empty, and right-shifting an entry out
4226 * of left to us works, we're done.
4228 if (xfs_btree_get_numrecs(left) - 1 >=
4229 cur->bc_ops->get_minrecs(tcur, level)) {
4230 error = xfs_btree_rshift(tcur, level, &i);
4234 ASSERT(xfs_btree_get_numrecs(block) >=
4235 cur->bc_ops->get_minrecs(tcur, level));
4236 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4239 cur->bc_levels[0].ptr++;
4247 * Otherwise, grab the number of records in right for
4250 lrecs = xfs_btree_get_numrecs(left);
4253 /* Delete the temp cursor, we're done with it. */
4254 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4257 /* If here, we need to do a join to keep the tree balanced. */
4258 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4260 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4261 lrecs + xfs_btree_get_numrecs(block) <=
4262 cur->bc_ops->get_maxrecs(cur, level)) {
4264 * Set "right" to be the starting block,
4265 * "left" to be the left neighbor.
4270 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
4275 * If that won't work, see if we can join with the right neighbor block.
4277 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4278 rrecs + xfs_btree_get_numrecs(block) <=
4279 cur->bc_ops->get_maxrecs(cur, level)) {
4281 * Set "left" to be the starting block,
4282 * "right" to be the right neighbor.
4287 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4292 * Otherwise, we can't fix the imbalance.
4293 * Just return. This is probably a logic error, but it's not fatal.
4296 error = xfs_btree_dec_cursor(cur, level, stat);
4302 rrecs = xfs_btree_get_numrecs(right);
4303 lrecs = xfs_btree_get_numrecs(left);
4306 * We're now going to join "left" and "right" by moving all the stuff
4307 * in "right" to "left" and deleting "right".
4309 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4311 /* It's a non-leaf. Move keys and pointers. */
4312 union xfs_btree_key *lkp; /* left btree key */
4313 union xfs_btree_ptr *lpp; /* left address pointer */
4314 union xfs_btree_key *rkp; /* right btree key */
4315 union xfs_btree_ptr *rpp; /* right address pointer */
4317 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4318 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4319 rkp = xfs_btree_key_addr(cur, 1, right);
4320 rpp = xfs_btree_ptr_addr(cur, 1, right);
4322 for (i = 1; i < rrecs; i++) {
4323 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
4328 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4329 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4331 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4332 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4334 /* It's a leaf. Move records. */
4335 union xfs_btree_rec *lrp; /* left record pointer */
4336 union xfs_btree_rec *rrp; /* right record pointer */
4338 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4339 rrp = xfs_btree_rec_addr(cur, 1, right);
4341 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4342 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4345 XFS_BTREE_STATS_INC(cur, join);
4348 * Fix up the number of records and right block pointer in the
4349 * surviving block, and log it.
4351 xfs_btree_set_numrecs(left, lrecs + rrecs);
4352 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB);
4353 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4354 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4356 /* If there is a right sibling, point it to the remaining block. */
4357 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4358 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4359 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4362 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4363 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4366 /* Free the deleted block. */
4367 error = xfs_btree_free_block(cur, rbp);
4372 * If we joined with the left neighbor, set the buffer in the
4373 * cursor to the left block, and fix up the index.
4376 cur->bc_levels[level].bp = lbp;
4377 cur->bc_levels[level].ptr += lrecs;
4378 cur->bc_levels[level].ra = 0;
4381 * If we joined with the right neighbor and there's a level above
4382 * us, increment the cursor at that level.
4384 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE ||
4385 level + 1 < cur->bc_nlevels) {
4386 error = xfs_btree_increment(cur, level + 1, &i);
4392 * Readjust the ptr at this level if it's not a leaf, since it's
4393 * still pointing at the deletion point, which makes the cursor
4394 * inconsistent. If this makes the ptr 0, the caller fixes it up.
4395 * We can't use decrement because it would change the next level up.
4398 cur->bc_levels[level].ptr--;
4401 * We combined blocks, so we have to update the parent keys if the
4402 * btree supports overlapped intervals. However,
4403 * bc_levels[level + 1].ptr points to the old block so that the caller
4404 * knows which record to delete. Therefore, the caller must be savvy
4405 * enough to call updkeys for us if we return stat == 2. The other
4406 * exit points from this function don't require deletions further up
4407 * the tree, so they can call updkeys directly.
4410 /* Return value means the next level up has something to do. */
4416 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4421 * Delete the record pointed to by cur.
4422 * The cursor refers to the place where the record was (could be inserted)
4423 * when the operation returns.
4427 struct xfs_btree_cur *cur,
4428 int *stat) /* success/failure */
4430 int error; /* error return value */
4433 bool joined = false;
4436 * Go up the tree, starting at leaf level.
4438 * If 2 is returned then a join was done; go to the next level.
4439 * Otherwise we are done.
4441 for (level = 0, i = 2; i == 2; level++) {
4442 error = xfs_btree_delrec(cur, level, &i);
4450 * If we combined blocks as part of deleting the record, delrec won't
4451 * have updated the parent high keys so we have to do that here.
4453 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) {
4454 error = xfs_btree_updkeys_force(cur, 0);
4460 for (level = 1; level < cur->bc_nlevels; level++) {
4461 if (cur->bc_levels[level].ptr == 0) {
4462 error = xfs_btree_decrement(cur, level, &i);
4477 * Get the data from the pointed-to record.
4481 struct xfs_btree_cur *cur, /* btree cursor */
4482 union xfs_btree_rec **recp, /* output: btree record */
4483 int *stat) /* output: success/failure */
4485 struct xfs_btree_block *block; /* btree block */
4486 struct xfs_buf *bp; /* buffer pointer */
4487 int ptr; /* record number */
4489 int error; /* error return value */
4492 ptr = cur->bc_levels[0].ptr;
4493 block = xfs_btree_get_block(cur, 0, &bp);
4496 error = xfs_btree_check_block(cur, block, 0, bp);
4502 * Off the right end or left end, return failure.
4504 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4510 * Point to the record and extract its data.
4512 *recp = xfs_btree_rec_addr(cur, ptr, block);
4517 /* Visit a block in a btree. */
4519 xfs_btree_visit_block(
4520 struct xfs_btree_cur *cur,
4522 xfs_btree_visit_blocks_fn fn,
4525 struct xfs_btree_block *block;
4527 union xfs_btree_ptr rptr, bufptr;
4530 /* do right sibling readahead */
4531 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4532 block = xfs_btree_get_block(cur, level, &bp);
4534 /* process the block */
4535 error = fn(cur, level, data);
4539 /* now read rh sibling block for next iteration */
4540 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4541 if (xfs_btree_ptr_is_null(cur, &rptr))
4545 * We only visit blocks once in this walk, so we have to avoid the
4546 * internal xfs_btree_lookup_get_block() optimisation where it will
4547 * return the same block without checking if the right sibling points
4548 * back to us and creates a cyclic reference in the btree.
4550 xfs_btree_buf_to_ptr(cur, bp, &bufptr);
4551 if (xfs_btree_ptrs_equal(cur, &rptr, &bufptr)) {
4552 xfs_btree_mark_sick(cur);
4553 return -EFSCORRUPTED;
4556 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4560 /* Visit every block in a btree. */
4562 xfs_btree_visit_blocks(
4563 struct xfs_btree_cur *cur,
4564 xfs_btree_visit_blocks_fn fn,
4568 union xfs_btree_ptr lptr;
4570 struct xfs_btree_block *block = NULL;
4573 xfs_btree_init_ptr_from_cur(cur, &lptr);
4575 /* for each level */
4576 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4577 /* grab the left hand block */
4578 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4582 /* readahead the left most block for the next level down */
4584 union xfs_btree_ptr *ptr;
4586 ptr = xfs_btree_ptr_addr(cur, 1, block);
4587 xfs_btree_readahead_ptr(cur, ptr, 1);
4589 /* save for the next iteration of the loop */
4590 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4592 if (!(flags & XFS_BTREE_VISIT_LEAVES))
4594 } else if (!(flags & XFS_BTREE_VISIT_RECORDS)) {
4598 /* for each buffer in the level */
4600 error = xfs_btree_visit_block(cur, level, fn, data);
4603 if (error != -ENOENT)
4611 * Change the owner of a btree.
4613 * The mechanism we use here is ordered buffer logging. Because we don't know
4614 * how many buffers were are going to need to modify, we don't really want to
4615 * have to make transaction reservations for the worst case of every buffer in a
4616 * full size btree as that may be more space that we can fit in the log....
4618 * We do the btree walk in the most optimal manner possible - we have sibling
4619 * pointers so we can just walk all the blocks on each level from left to right
4620 * in a single pass, and then move to the next level and do the same. We can
4621 * also do readahead on the sibling pointers to get IO moving more quickly,
4622 * though for slow disks this is unlikely to make much difference to performance
4623 * as the amount of CPU work we have to do before moving to the next block is
4626 * For each btree block that we load, modify the owner appropriately, set the
4627 * buffer as an ordered buffer and log it appropriately. We need to ensure that
4628 * we mark the region we change dirty so that if the buffer is relogged in
4629 * a subsequent transaction the changes we make here as an ordered buffer are
4630 * correctly relogged in that transaction. If we are in recovery context, then
4631 * just queue the modified buffer as delayed write buffer so the transaction
4632 * recovery completion writes the changes to disk.
4634 struct xfs_btree_block_change_owner_info {
4636 struct list_head *buffer_list;
4640 xfs_btree_block_change_owner(
4641 struct xfs_btree_cur *cur,
4645 struct xfs_btree_block_change_owner_info *bbcoi = data;
4646 struct xfs_btree_block *block;
4649 /* modify the owner */
4650 block = xfs_btree_get_block(cur, level, &bp);
4651 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
4652 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
4654 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4656 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
4658 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4662 * If the block is a root block hosted in an inode, we might not have a
4663 * buffer pointer here and we shouldn't attempt to log the change as the
4664 * information is already held in the inode and discarded when the root
4665 * block is formatted into the on-disk inode fork. We still change it,
4666 * though, so everything is consistent in memory.
4669 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
4670 ASSERT(level == cur->bc_nlevels - 1);
4675 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
4676 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4680 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4687 xfs_btree_change_owner(
4688 struct xfs_btree_cur *cur,
4690 struct list_head *buffer_list)
4692 struct xfs_btree_block_change_owner_info bbcoi;
4694 bbcoi.new_owner = new_owner;
4695 bbcoi.buffer_list = buffer_list;
4697 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4698 XFS_BTREE_VISIT_ALL, &bbcoi);
4701 /* Verify the v5 fields of a long-format btree block. */
4703 xfs_btree_fsblock_v5hdr_verify(
4707 struct xfs_mount *mp = bp->b_mount;
4708 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4710 if (!xfs_has_crc(mp))
4711 return __this_address;
4712 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
4713 return __this_address;
4714 if (block->bb_u.l.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp)))
4715 return __this_address;
4716 if (owner != XFS_RMAP_OWN_UNKNOWN &&
4717 be64_to_cpu(block->bb_u.l.bb_owner) != owner)
4718 return __this_address;
4722 /* Verify a long-format btree block. */
4724 xfs_btree_fsblock_verify(
4726 unsigned int max_recs)
4728 struct xfs_mount *mp = bp->b_mount;
4729 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4733 ASSERT(!xfs_buftarg_is_mem(bp->b_target));
4735 /* numrecs verification */
4736 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4737 return __this_address;
4739 /* sibling pointer verification */
4740 fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
4741 fa = xfs_btree_check_fsblock_siblings(mp, fsb,
4742 block->bb_u.l.bb_leftsib);
4744 fa = xfs_btree_check_fsblock_siblings(mp, fsb,
4745 block->bb_u.l.bb_rightsib);
4749 /* Verify an in-memory btree block. */
4751 xfs_btree_memblock_verify(
4753 unsigned int max_recs)
4755 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4756 struct xfs_buftarg *btp = bp->b_target;
4760 ASSERT(xfs_buftarg_is_mem(bp->b_target));
4762 /* numrecs verification */
4763 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4764 return __this_address;
4766 /* sibling pointer verification */
4767 bno = xfs_daddr_to_xfbno(xfs_buf_daddr(bp));
4768 fa = xfs_btree_check_memblock_siblings(btp, bno,
4769 block->bb_u.l.bb_leftsib);
4772 fa = xfs_btree_check_memblock_siblings(btp, bno,
4773 block->bb_u.l.bb_rightsib);
4780 * xfs_btree_agblock_v5hdr_verify() -- verify the v5 fields of a short-format
4783 * @bp: buffer containing the btree block
4786 xfs_btree_agblock_v5hdr_verify(
4789 struct xfs_mount *mp = bp->b_mount;
4790 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4791 struct xfs_perag *pag = bp->b_pag;
4793 if (!xfs_has_crc(mp))
4794 return __this_address;
4795 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4796 return __this_address;
4797 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp)))
4798 return __this_address;
4799 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4800 return __this_address;
4805 * xfs_btree_agblock_verify() -- verify a short-format btree block
4807 * @bp: buffer containing the btree block
4808 * @max_recs: maximum records allowed in this btree node
4811 xfs_btree_agblock_verify(
4813 unsigned int max_recs)
4815 struct xfs_mount *mp = bp->b_mount;
4816 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4817 xfs_agblock_t agbno;
4820 ASSERT(!xfs_buftarg_is_mem(bp->b_target));
4822 /* numrecs verification */
4823 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4824 return __this_address;
4826 /* sibling pointer verification */
4827 agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
4828 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno,
4829 block->bb_u.s.bb_leftsib);
4831 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno,
4832 block->bb_u.s.bb_rightsib);
4837 * For the given limits on leaf and keyptr records per block, calculate the
4838 * height of the tree needed to index the number of leaf records.
4841 xfs_btree_compute_maxlevels(
4842 const unsigned int *limits,
4843 unsigned long long records)
4845 unsigned long long level_blocks = howmany_64(records, limits[0]);
4846 unsigned int height = 1;
4848 while (level_blocks > 1) {
4849 level_blocks = howmany_64(level_blocks, limits[1]);
4857 * For the given limits on leaf and keyptr records per block, calculate the
4858 * number of blocks needed to index the given number of leaf records.
4861 xfs_btree_calc_size(
4862 const unsigned int *limits,
4863 unsigned long long records)
4865 unsigned long long level_blocks = howmany_64(records, limits[0]);
4866 unsigned long long blocks = level_blocks;
4868 while (level_blocks > 1) {
4869 level_blocks = howmany_64(level_blocks, limits[1]);
4870 blocks += level_blocks;
4877 * Given a number of available blocks for the btree to consume with records and
4878 * pointers, calculate the height of the tree needed to index all the records
4879 * that space can hold based on the number of pointers each interior node
4882 * We start by assuming a single level tree consumes a single block, then track
4883 * the number of blocks each node level consumes until we no longer have space
4884 * to store the next node level. At this point, we are indexing all the leaf
4885 * blocks in the space, and there's no more free space to split the tree any
4886 * further. That's our maximum btree height.
4889 xfs_btree_space_to_height(
4890 const unsigned int *limits,
4891 unsigned long long leaf_blocks)
4894 * The root btree block can have fewer than minrecs pointers in it
4895 * because the tree might not be big enough to require that amount of
4896 * fanout. Hence it has a minimum size of 2 pointers, not limits[1].
4898 unsigned long long node_blocks = 2;
4899 unsigned long long blocks_left = leaf_blocks - 1;
4900 unsigned int height = 1;
4902 if (leaf_blocks < 1)
4905 while (node_blocks < blocks_left) {
4906 blocks_left -= node_blocks;
4907 node_blocks *= limits[1];
4915 * Query a regular btree for all records overlapping a given interval.
4916 * Start with a LE lookup of the key of low_rec and return all records
4917 * until we find a record with a key greater than the key of high_rec.
4920 xfs_btree_simple_query_range(
4921 struct xfs_btree_cur *cur,
4922 const union xfs_btree_key *low_key,
4923 const union xfs_btree_key *high_key,
4924 xfs_btree_query_range_fn fn,
4927 union xfs_btree_rec *recp;
4928 union xfs_btree_key rec_key;
4930 bool firstrec = true;
4933 ASSERT(cur->bc_ops->init_high_key_from_rec);
4934 ASSERT(cur->bc_ops->diff_two_keys);
4937 * Find the leftmost record. The btree cursor must be set
4938 * to the low record used to generate low_key.
4941 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4945 /* Nothing? See if there's anything to the right. */
4947 error = xfs_btree_increment(cur, 0, &stat);
4953 /* Find the record. */
4954 error = xfs_btree_get_rec(cur, &recp, &stat);
4958 /* Skip if low_key > high_key(rec). */
4960 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4962 if (xfs_btree_keycmp_gt(cur, low_key, &rec_key))
4966 /* Stop if low_key(rec) > high_key. */
4967 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4968 if (xfs_btree_keycmp_gt(cur, &rec_key, high_key))
4972 error = fn(cur, recp, priv);
4977 /* Move on to the next record. */
4978 error = xfs_btree_increment(cur, 0, &stat);
4988 * Query an overlapped interval btree for all records overlapping a given
4989 * interval. This function roughly follows the algorithm given in
4990 * "Interval Trees" of _Introduction to Algorithms_, which is section
4991 * 14.3 in the 2nd and 3rd editions.
4993 * First, generate keys for the low and high records passed in.
4995 * For any leaf node, generate the high and low keys for the record.
4996 * If the record keys overlap with the query low/high keys, pass the
4997 * record to the function iterator.
4999 * For any internal node, compare the low and high keys of each
5000 * pointer against the query low/high keys. If there's an overlap,
5001 * follow the pointer.
5003 * As an optimization, we stop scanning a block when we find a low key
5004 * that is greater than the query's high key.
5007 xfs_btree_overlapped_query_range(
5008 struct xfs_btree_cur *cur,
5009 const union xfs_btree_key *low_key,
5010 const union xfs_btree_key *high_key,
5011 xfs_btree_query_range_fn fn,
5014 union xfs_btree_ptr ptr;
5015 union xfs_btree_ptr *pp;
5016 union xfs_btree_key rec_key;
5017 union xfs_btree_key rec_hkey;
5018 union xfs_btree_key *lkp;
5019 union xfs_btree_key *hkp;
5020 union xfs_btree_rec *recp;
5021 struct xfs_btree_block *block;
5027 /* Load the root of the btree. */
5028 level = cur->bc_nlevels - 1;
5029 xfs_btree_init_ptr_from_cur(cur, &ptr);
5030 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
5033 xfs_btree_get_block(cur, level, &bp);
5034 trace_xfs_btree_overlapped_query_range(cur, level, bp);
5036 error = xfs_btree_check_block(cur, block, level, bp);
5040 cur->bc_levels[level].ptr = 1;
5042 while (level < cur->bc_nlevels) {
5043 block = xfs_btree_get_block(cur, level, &bp);
5045 /* End of node, pop back towards the root. */
5046 if (cur->bc_levels[level].ptr >
5047 be16_to_cpu(block->bb_numrecs)) {
5049 if (level < cur->bc_nlevels - 1)
5050 cur->bc_levels[level + 1].ptr++;
5056 /* Handle a leaf node. */
5057 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr,
5060 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
5061 cur->bc_ops->init_key_from_rec(&rec_key, recp);
5064 * If (query's high key < record's low key), then there
5065 * are no more interesting records in this block. Pop
5066 * up to the leaf level to find more record blocks.
5068 * If (record's high key >= query's low key) and
5069 * (query's high key >= record's low key), then
5070 * this record overlaps the query range; callback.
5072 if (xfs_btree_keycmp_lt(cur, high_key, &rec_key))
5074 if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) {
5075 error = fn(cur, recp, priv);
5079 cur->bc_levels[level].ptr++;
5083 /* Handle an internal node. */
5084 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block);
5085 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr,
5087 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block);
5090 * If (query's high key < pointer's low key), then there are no
5091 * more interesting keys in this block. Pop up one leaf level
5092 * to continue looking for records.
5094 * If (pointer's high key >= query's low key) and
5095 * (query's high key >= pointer's low key), then
5096 * this record overlaps the query range; follow pointer.
5098 if (xfs_btree_keycmp_lt(cur, high_key, lkp))
5100 if (xfs_btree_keycmp_ge(cur, hkp, low_key)) {
5102 error = xfs_btree_lookup_get_block(cur, level, pp,
5106 xfs_btree_get_block(cur, level, &bp);
5107 trace_xfs_btree_overlapped_query_range(cur, level, bp);
5109 error = xfs_btree_check_block(cur, block, level, bp);
5113 cur->bc_levels[level].ptr = 1;
5116 cur->bc_levels[level].ptr++;
5121 * If we don't end this function with the cursor pointing at a record
5122 * block, a subsequent non-error cursor deletion will not release
5123 * node-level buffers, causing a buffer leak. This is quite possible
5124 * with a zero-results range query, so release the buffers if we
5125 * failed to return any results.
5127 if (cur->bc_levels[0].bp == NULL) {
5128 for (i = 0; i < cur->bc_nlevels; i++) {
5129 if (cur->bc_levels[i].bp) {
5130 xfs_trans_brelse(cur->bc_tp,
5131 cur->bc_levels[i].bp);
5132 cur->bc_levels[i].bp = NULL;
5133 cur->bc_levels[i].ptr = 0;
5134 cur->bc_levels[i].ra = 0;
5143 xfs_btree_key_from_irec(
5144 struct xfs_btree_cur *cur,
5145 union xfs_btree_key *key,
5146 const union xfs_btree_irec *irec)
5148 union xfs_btree_rec rec;
5150 cur->bc_rec = *irec;
5151 cur->bc_ops->init_rec_from_cur(cur, &rec);
5152 cur->bc_ops->init_key_from_rec(key, &rec);
5156 * Query a btree for all records overlapping a given interval of keys. The
5157 * supplied function will be called with each record found; return one of the
5158 * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
5159 * code. This function returns -ECANCELED, zero, or a negative error code.
5162 xfs_btree_query_range(
5163 struct xfs_btree_cur *cur,
5164 const union xfs_btree_irec *low_rec,
5165 const union xfs_btree_irec *high_rec,
5166 xfs_btree_query_range_fn fn,
5169 union xfs_btree_key low_key;
5170 union xfs_btree_key high_key;
5172 /* Find the keys of both ends of the interval. */
5173 xfs_btree_key_from_irec(cur, &high_key, high_rec);
5174 xfs_btree_key_from_irec(cur, &low_key, low_rec);
5176 /* Enforce low key <= high key. */
5177 if (!xfs_btree_keycmp_le(cur, &low_key, &high_key))
5180 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
5181 return xfs_btree_simple_query_range(cur, &low_key,
5182 &high_key, fn, priv);
5183 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
5187 /* Query a btree for all records. */
5189 xfs_btree_query_all(
5190 struct xfs_btree_cur *cur,
5191 xfs_btree_query_range_fn fn,
5194 union xfs_btree_key low_key;
5195 union xfs_btree_key high_key;
5197 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
5198 memset(&low_key, 0, sizeof(low_key));
5199 memset(&high_key, 0xFF, sizeof(high_key));
5201 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
5205 xfs_btree_count_blocks_helper(
5206 struct xfs_btree_cur *cur,
5210 xfs_extlen_t *blocks = data;
5216 /* Count the blocks in a btree and return the result in *blocks. */
5218 xfs_btree_count_blocks(
5219 struct xfs_btree_cur *cur,
5220 xfs_extlen_t *blocks)
5223 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
5224 XFS_BTREE_VISIT_ALL, blocks);
5227 /* Compare two btree pointers. */
5229 xfs_btree_diff_two_ptrs(
5230 struct xfs_btree_cur *cur,
5231 const union xfs_btree_ptr *a,
5232 const union xfs_btree_ptr *b)
5234 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
5235 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
5236 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
5239 struct xfs_btree_has_records {
5240 /* Keys for the start and end of the range we want to know about. */
5241 union xfs_btree_key start_key;
5242 union xfs_btree_key end_key;
5244 /* Mask for key comparisons, if desired. */
5245 const union xfs_btree_key *key_mask;
5247 /* Highest record key we've seen so far. */
5248 union xfs_btree_key high_key;
5250 enum xbtree_recpacking outcome;
5254 xfs_btree_has_records_helper(
5255 struct xfs_btree_cur *cur,
5256 const union xfs_btree_rec *rec,
5259 union xfs_btree_key rec_key;
5260 union xfs_btree_key rec_high_key;
5261 struct xfs_btree_has_records *info = priv;
5262 enum xbtree_key_contig key_contig;
5264 cur->bc_ops->init_key_from_rec(&rec_key, rec);
5266 if (info->outcome == XBTREE_RECPACKING_EMPTY) {
5267 info->outcome = XBTREE_RECPACKING_SPARSE;
5270 * If the first record we find does not overlap the start key,
5271 * then there is a hole at the start of the search range.
5272 * Classify this as sparse and stop immediately.
5274 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
5279 * If a subsequent record does not overlap with the any record
5280 * we've seen so far, there is a hole in the middle of the
5281 * search range. Classify this as sparse and stop.
5282 * If the keys overlap and this btree does not allow overlap,
5283 * signal corruption.
5285 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
5286 &rec_key, info->key_mask);
5287 if (key_contig == XBTREE_KEY_OVERLAP &&
5288 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
5289 return -EFSCORRUPTED;
5290 if (key_contig == XBTREE_KEY_GAP)
5295 * If high_key(rec) is larger than any other high key we've seen,
5296 * remember it for later.
5298 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
5299 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
5301 info->high_key = rec_high_key; /* struct copy */
5307 * Scan part of the keyspace of a btree and tell us if that keyspace does not
5308 * map to any records; is fully mapped to records; or is partially mapped to
5309 * records. This is the btree record equivalent to determining if a file is
5312 * For most btree types, the record scan should use all available btree key
5313 * fields to compare the keys encountered. These callers should pass NULL for
5314 * @mask. However, some callers (e.g. scanning physical space in the rmapbt)
5315 * want to ignore some part of the btree record keyspace when performing the
5316 * comparison. These callers should pass in a union xfs_btree_key object with
5317 * the fields that *should* be a part of the comparison set to any nonzero
5318 * value, and the rest zeroed.
5321 xfs_btree_has_records(
5322 struct xfs_btree_cur *cur,
5323 const union xfs_btree_irec *low,
5324 const union xfs_btree_irec *high,
5325 const union xfs_btree_key *mask,
5326 enum xbtree_recpacking *outcome)
5328 struct xfs_btree_has_records info = {
5329 .outcome = XBTREE_RECPACKING_EMPTY,
5334 /* Not all btrees support this operation. */
5335 if (!cur->bc_ops->keys_contiguous) {
5340 xfs_btree_key_from_irec(cur, &info.start_key, low);
5341 xfs_btree_key_from_irec(cur, &info.end_key, high);
5343 error = xfs_btree_query_range(cur, low, high,
5344 xfs_btree_has_records_helper, &info);
5345 if (error == -ECANCELED)
5350 if (info.outcome == XBTREE_RECPACKING_EMPTY)
5354 * If the largest high_key(rec) we saw during the walk is greater than
5355 * the end of the search range, classify this as full. Otherwise,
5356 * there is a hole at the end of the search range.
5358 if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
5360 info.outcome = XBTREE_RECPACKING_FULL;
5363 *outcome = info.outcome;
5367 /* Are there more records in this btree? */
5369 xfs_btree_has_more_records(
5370 struct xfs_btree_cur *cur)
5372 struct xfs_btree_block *block;
5375 block = xfs_btree_get_block(cur, 0, &bp);
5377 /* There are still records in this block. */
5378 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block))
5381 /* There are more record blocks. */
5382 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
5383 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
5385 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
5388 /* Set up all the btree cursor caches. */
5390 xfs_btree_init_cur_caches(void)
5394 error = xfs_allocbt_init_cur_cache();
5397 error = xfs_inobt_init_cur_cache();
5400 error = xfs_bmbt_init_cur_cache();
5403 error = xfs_rmapbt_init_cur_cache();
5406 error = xfs_refcountbt_init_cur_cache();
5412 xfs_btree_destroy_cur_caches();
5416 /* Destroy all the btree cursor caches, if they've been allocated. */
5418 xfs_btree_destroy_cur_caches(void)
5420 xfs_allocbt_destroy_cur_cache();
5421 xfs_inobt_destroy_cur_cache();
5422 xfs_bmbt_destroy_cur_cache();
5423 xfs_rmapbt_destroy_cur_cache();
5424 xfs_refcountbt_destroy_cur_cache();
5427 /* Move the btree cursor before the first record. */
5429 xfs_btree_goto_left_edge(
5430 struct xfs_btree_cur *cur)
5435 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
5436 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
5442 error = xfs_btree_decrement(cur, 0, &stat);
5447 xfs_btree_mark_sick(cur);
5448 return -EFSCORRUPTED;