1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
17 #include "xfs_dir2_priv.h"
18 #include "xfs_trans.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
25 #include "xfs_errortag.h"
30 * Routines to implement directories as Btrees of hashed names.
33 /*========================================================================
34 * Function prototypes for the kernel.
35 *========================================================================*/
38 * Routines used for growing the Btree.
40 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
41 xfs_da_state_blk_t *existing_root,
42 xfs_da_state_blk_t *new_child);
43 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
44 xfs_da_state_blk_t *existing_blk,
45 xfs_da_state_blk_t *split_blk,
46 xfs_da_state_blk_t *blk_to_add,
49 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
50 xfs_da_state_blk_t *node_blk_1,
51 xfs_da_state_blk_t *node_blk_2);
52 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
53 xfs_da_state_blk_t *old_node_blk,
54 xfs_da_state_blk_t *new_node_blk);
57 * Routines used for shrinking the Btree.
59 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
60 xfs_da_state_blk_t *root_blk);
61 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
62 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
63 xfs_da_state_blk_t *drop_blk);
64 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
65 xfs_da_state_blk_t *src_node_blk,
66 xfs_da_state_blk_t *dst_node_blk);
71 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
72 xfs_da_state_blk_t *drop_blk,
73 xfs_da_state_blk_t *save_blk);
76 struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */
79 * Allocate a dir-state structure.
80 * We don't put them on the stack since they're large.
84 struct xfs_da_args *args)
86 struct xfs_da_state *state;
88 state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL);
90 state->mp = args->dp->i_mount;
95 * Kill the altpath contents of a da-state structure.
98 xfs_da_state_kill_altpath(xfs_da_state_t *state)
102 for (i = 0; i < state->altpath.active; i++)
103 state->altpath.blk[i].bp = NULL;
104 state->altpath.active = 0;
108 * Free a da-state structure.
111 xfs_da_state_free(xfs_da_state_t *state)
113 xfs_da_state_kill_altpath(state);
115 memset((char *)state, 0, sizeof(*state));
117 kmem_cache_free(xfs_da_state_cache, state);
122 struct xfs_da_state *state,
123 struct xfs_da_args *args)
125 xfs_da_state_kill_altpath(state);
126 memset(state, 0, sizeof(struct xfs_da_state));
128 state->mp = state->args->dp->i_mount;
131 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
133 if (whichfork == XFS_DATA_FORK)
134 return mp->m_dir_geo->fsbcount;
135 return mp->m_attr_geo->fsbcount;
139 xfs_da3_node_hdr_from_disk(
140 struct xfs_mount *mp,
141 struct xfs_da3_icnode_hdr *to,
142 struct xfs_da_intnode *from)
144 if (xfs_has_crc(mp)) {
145 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
147 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
148 to->back = be32_to_cpu(from3->hdr.info.hdr.back);
149 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
150 to->count = be16_to_cpu(from3->hdr.__count);
151 to->level = be16_to_cpu(from3->hdr.__level);
152 to->btree = from3->__btree;
153 ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
155 to->forw = be32_to_cpu(from->hdr.info.forw);
156 to->back = be32_to_cpu(from->hdr.info.back);
157 to->magic = be16_to_cpu(from->hdr.info.magic);
158 to->count = be16_to_cpu(from->hdr.__count);
159 to->level = be16_to_cpu(from->hdr.__level);
160 to->btree = from->__btree;
161 ASSERT(to->magic == XFS_DA_NODE_MAGIC);
166 xfs_da3_node_hdr_to_disk(
167 struct xfs_mount *mp,
168 struct xfs_da_intnode *to,
169 struct xfs_da3_icnode_hdr *from)
171 if (xfs_has_crc(mp)) {
172 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
174 ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
175 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
176 to3->hdr.info.hdr.back = cpu_to_be32(from->back);
177 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
178 to3->hdr.__count = cpu_to_be16(from->count);
179 to3->hdr.__level = cpu_to_be16(from->level);
181 ASSERT(from->magic == XFS_DA_NODE_MAGIC);
182 to->hdr.info.forw = cpu_to_be32(from->forw);
183 to->hdr.info.back = cpu_to_be32(from->back);
184 to->hdr.info.magic = cpu_to_be16(from->magic);
185 to->hdr.__count = cpu_to_be16(from->count);
186 to->hdr.__level = cpu_to_be16(from->level);
191 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
192 * accessible on v5 filesystems. This header format is common across da node,
193 * attr leaf and dir leaf blocks.
196 xfs_da3_blkinfo_verify(
198 struct xfs_da3_blkinfo *hdr3)
200 struct xfs_mount *mp = bp->b_mount;
201 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
203 if (!xfs_verify_magic16(bp, hdr->magic))
204 return __this_address;
206 if (xfs_has_crc(mp)) {
207 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
208 return __this_address;
209 if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp))
210 return __this_address;
211 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
212 return __this_address;
218 static xfs_failaddr_t
222 struct xfs_mount *mp = bp->b_mount;
223 struct xfs_da_intnode *hdr = bp->b_addr;
224 struct xfs_da3_icnode_hdr ichdr;
227 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
229 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
233 if (ichdr.level == 0)
234 return __this_address;
235 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
236 return __this_address;
237 if (ichdr.count == 0)
238 return __this_address;
241 * we don't know if the node is for and attribute or directory tree,
242 * so only fail if the count is outside both bounds
244 if (ichdr.count > mp->m_dir_geo->node_ents &&
245 ichdr.count > mp->m_attr_geo->node_ents)
246 return __this_address;
248 /* XXX: hash order check? */
254 xfs_da3_node_write_verify(
257 struct xfs_mount *mp = bp->b_mount;
258 struct xfs_buf_log_item *bip = bp->b_log_item;
259 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
262 fa = xfs_da3_node_verify(bp);
264 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
268 if (!xfs_has_crc(mp))
272 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
274 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
278 * leaf/node format detection on trees is sketchy, so a node read can be done on
279 * leaf level blocks when detection identifies the tree as a node format tree
280 * incorrectly. In this case, we need to swap the verifier to match the correct
281 * format of the block being read.
284 xfs_da3_node_read_verify(
287 struct xfs_da_blkinfo *info = bp->b_addr;
290 switch (be16_to_cpu(info->magic)) {
291 case XFS_DA3_NODE_MAGIC:
292 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
293 xfs_verifier_error(bp, -EFSBADCRC,
298 case XFS_DA_NODE_MAGIC:
299 fa = xfs_da3_node_verify(bp);
301 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
303 case XFS_ATTR_LEAF_MAGIC:
304 case XFS_ATTR3_LEAF_MAGIC:
305 bp->b_ops = &xfs_attr3_leaf_buf_ops;
306 bp->b_ops->verify_read(bp);
308 case XFS_DIR2_LEAFN_MAGIC:
309 case XFS_DIR3_LEAFN_MAGIC:
310 bp->b_ops = &xfs_dir3_leafn_buf_ops;
311 bp->b_ops->verify_read(bp);
314 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
319 /* Verify the structure of a da3 block. */
320 static xfs_failaddr_t
321 xfs_da3_node_verify_struct(
324 struct xfs_da_blkinfo *info = bp->b_addr;
326 switch (be16_to_cpu(info->magic)) {
327 case XFS_DA3_NODE_MAGIC:
328 case XFS_DA_NODE_MAGIC:
329 return xfs_da3_node_verify(bp);
330 case XFS_ATTR_LEAF_MAGIC:
331 case XFS_ATTR3_LEAF_MAGIC:
332 bp->b_ops = &xfs_attr3_leaf_buf_ops;
333 return bp->b_ops->verify_struct(bp);
334 case XFS_DIR2_LEAFN_MAGIC:
335 case XFS_DIR3_LEAFN_MAGIC:
336 bp->b_ops = &xfs_dir3_leafn_buf_ops;
337 return bp->b_ops->verify_struct(bp);
339 return __this_address;
343 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
344 .name = "xfs_da3_node",
345 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
346 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
347 .verify_read = xfs_da3_node_read_verify,
348 .verify_write = xfs_da3_node_write_verify,
349 .verify_struct = xfs_da3_node_verify_struct,
353 xfs_da3_node_set_type(
354 struct xfs_trans *tp,
357 struct xfs_da_blkinfo *info = bp->b_addr;
359 switch (be16_to_cpu(info->magic)) {
360 case XFS_DA_NODE_MAGIC:
361 case XFS_DA3_NODE_MAGIC:
362 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
364 case XFS_ATTR_LEAF_MAGIC:
365 case XFS_ATTR3_LEAF_MAGIC:
366 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
368 case XFS_DIR2_LEAFN_MAGIC:
369 case XFS_DIR3_LEAFN_MAGIC:
370 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
373 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
374 info, sizeof(*info));
375 xfs_trans_brelse(tp, bp);
376 return -EFSCORRUPTED;
382 struct xfs_trans *tp,
383 struct xfs_inode *dp,
385 struct xfs_buf **bpp,
390 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
391 &xfs_da3_node_buf_ops);
392 if (error || !*bpp || !tp)
394 return xfs_da3_node_set_type(tp, *bpp);
398 xfs_da3_node_read_mapped(
399 struct xfs_trans *tp,
400 struct xfs_inode *dp,
401 xfs_daddr_t mappedbno,
402 struct xfs_buf **bpp,
405 struct xfs_mount *mp = dp->i_mount;
408 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
409 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
410 bpp, &xfs_da3_node_buf_ops);
414 if (whichfork == XFS_ATTR_FORK)
415 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
417 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
421 return xfs_da3_node_set_type(tp, *bpp);
425 * Copy src directory/attr leaf/node buffer to the dst.
426 * For v5 file systems make sure the right blkno is stamped in.
434 struct xfs_da3_blkinfo *da3 = dst->b_addr;
436 memcpy(dst->b_addr, src->b_addr, size);
437 dst->b_ops = src->b_ops;
438 xfs_trans_buf_copy_type(dst, src);
439 if (xfs_has_crc(dst->b_mount))
440 da3->blkno = cpu_to_be64(xfs_buf_daddr(dst));
443 /*========================================================================
444 * Routines used for growing the Btree.
445 *========================================================================*/
448 * Create the initial contents of an intermediate node.
452 struct xfs_da_args *args,
455 struct xfs_buf **bpp,
458 struct xfs_da_intnode *node;
459 struct xfs_trans *tp = args->trans;
460 struct xfs_mount *mp = tp->t_mountp;
461 struct xfs_da3_icnode_hdr ichdr = {0};
464 struct xfs_inode *dp = args->dp;
466 trace_xfs_da_node_create(args);
467 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
469 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
472 bp->b_ops = &xfs_da3_node_buf_ops;
473 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
476 if (xfs_has_crc(mp)) {
477 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
479 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
480 ichdr.magic = XFS_DA3_NODE_MAGIC;
481 hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
482 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
483 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
485 ichdr.magic = XFS_DA_NODE_MAGIC;
489 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
490 xfs_trans_log_buf(tp, bp,
491 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
498 * Split a leaf node, rebalance, then possibly split
499 * intermediate nodes, rebalance, etc.
503 struct xfs_da_state *state)
505 struct xfs_da_state_blk *oldblk;
506 struct xfs_da_state_blk *newblk;
507 struct xfs_da_state_blk *addblk;
508 struct xfs_da_intnode *node;
514 trace_xfs_da_split(state->args);
516 if (XFS_TEST_ERROR(false, state->mp, XFS_ERRTAG_DA_LEAF_SPLIT))
520 * Walk back up the tree splitting/inserting/adjusting as necessary.
521 * If we need to insert and there isn't room, split the node, then
522 * decide which fragment to insert the new block from below into.
523 * Note that we may split the root this way, but we need more fixup.
525 max = state->path.active - 1;
526 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
527 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
528 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
530 addblk = &state->path.blk[max]; /* initial dummy value */
531 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
532 oldblk = &state->path.blk[i];
533 newblk = &state->altpath.blk[i];
536 * If a leaf node then
537 * Allocate a new leaf node, then rebalance across them.
538 * else if an intermediate node then
539 * We split on the last layer, must we split the node?
541 switch (oldblk->magic) {
542 case XFS_ATTR_LEAF_MAGIC:
543 error = xfs_attr3_leaf_split(state, oldblk, newblk);
544 if ((error != 0) && (error != -ENOSPC)) {
545 return error; /* GROT: attr is inconsistent */
552 * Entry wouldn't fit, split the leaf again. The new
553 * extrablk will be consumed by xfs_da3_node_split if
556 state->extravalid = 1;
558 state->extraafter = 0; /* before newblk */
559 trace_xfs_attr_leaf_split_before(state->args);
560 error = xfs_attr3_leaf_split(state, oldblk,
563 state->extraafter = 1; /* after newblk */
564 trace_xfs_attr_leaf_split_after(state->args);
565 error = xfs_attr3_leaf_split(state, newblk,
569 return error; /* GROT: attr inconsistent */
572 case XFS_DIR2_LEAFN_MAGIC:
573 error = xfs_dir2_leafn_split(state, oldblk, newblk);
578 case XFS_DA_NODE_MAGIC:
579 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
583 return error; /* GROT: dir is inconsistent */
585 * Record the newly split block for the next time thru?
595 * Update the btree to show the new hashval for this child.
597 xfs_da3_fixhashpath(state, &state->path);
603 * xfs_da3_node_split() should have consumed any extra blocks we added
604 * during a double leaf split in the attr fork. This is guaranteed as
605 * we can't be here if the attr fork only has a single leaf block.
607 ASSERT(state->extravalid == 0 ||
608 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
611 * Split the root node.
613 ASSERT(state->path.active == 0);
614 oldblk = &state->path.blk[0];
615 error = xfs_da3_root_split(state, oldblk, addblk);
620 * Update pointers to the node which used to be block 0 and just got
621 * bumped because of the addition of a new root node. Note that the
622 * original block 0 could be at any position in the list of blocks in
625 * Note: the magic numbers and sibling pointers are in the same physical
626 * place for both v2 and v3 headers (by design). Hence it doesn't matter
627 * which version of the xfs_da_intnode structure we use here as the
628 * result will be the same using either structure.
630 node = oldblk->bp->b_addr;
631 if (node->hdr.info.forw) {
632 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
633 xfs_buf_mark_corrupt(oldblk->bp);
634 error = -EFSCORRUPTED;
637 node = addblk->bp->b_addr;
638 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
639 xfs_trans_log_buf(state->args->trans, addblk->bp,
640 XFS_DA_LOGRANGE(node, &node->hdr.info,
641 sizeof(node->hdr.info)));
643 node = oldblk->bp->b_addr;
644 if (node->hdr.info.back) {
645 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
646 xfs_buf_mark_corrupt(oldblk->bp);
647 error = -EFSCORRUPTED;
650 node = addblk->bp->b_addr;
651 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
652 xfs_trans_log_buf(state->args->trans, addblk->bp,
653 XFS_DA_LOGRANGE(node, &node->hdr.info,
654 sizeof(node->hdr.info)));
662 * Split the root. We have to create a new root and point to the two
663 * parts (the split old root) that we just created. Copy block zero to
664 * the EOF, extending the inode in process.
666 STATIC int /* error */
668 struct xfs_da_state *state,
669 struct xfs_da_state_blk *blk1,
670 struct xfs_da_state_blk *blk2)
672 struct xfs_da_intnode *node;
673 struct xfs_da_intnode *oldroot;
674 struct xfs_da_node_entry *btree;
675 struct xfs_da3_icnode_hdr nodehdr;
676 struct xfs_da_args *args;
678 struct xfs_inode *dp;
679 struct xfs_trans *tp;
680 struct xfs_dir2_leaf *leaf;
686 trace_xfs_da_root_split(state->args);
689 * Copy the existing (incorrect) block from the root node position
690 * to a free space somewhere.
693 error = xfs_da_grow_inode(args, &blkno);
699 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
703 oldroot = blk1->bp->b_addr;
704 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
705 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
706 struct xfs_da3_icnode_hdr icnodehdr;
708 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
709 btree = icnodehdr.btree;
710 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
711 level = icnodehdr.level;
713 struct xfs_dir3_icleaf_hdr leafhdr;
715 leaf = (xfs_dir2_leaf_t *)oldroot;
716 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
718 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
719 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
720 size = (int)((char *)&leafhdr.ents[leafhdr.count] -
726 * Copy old root to new buffer and log it.
728 xfs_da_buf_copy(bp, blk1->bp, size);
729 xfs_trans_log_buf(tp, bp, 0, size - 1);
732 * Update blk1 to point to new buffer.
738 * Set up the new root node.
740 error = xfs_da3_node_create(args,
741 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
742 level + 1, &bp, args->whichfork);
747 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
748 btree = nodehdr.btree;
749 btree[0].hashval = cpu_to_be32(blk1->hashval);
750 btree[0].before = cpu_to_be32(blk1->blkno);
751 btree[1].hashval = cpu_to_be32(blk2->hashval);
752 btree[1].before = cpu_to_be32(blk2->blkno);
754 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
757 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
758 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
759 ASSERT(blk1->blkno >= args->geo->leafblk &&
760 blk1->blkno < args->geo->freeblk);
761 ASSERT(blk2->blkno >= args->geo->leafblk &&
762 blk2->blkno < args->geo->freeblk);
766 /* Header is already logged by xfs_da_node_create */
767 xfs_trans_log_buf(tp, bp,
768 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
774 * Split the node, rebalance, then add the new entry.
776 STATIC int /* error */
778 struct xfs_da_state *state,
779 struct xfs_da_state_blk *oldblk,
780 struct xfs_da_state_blk *newblk,
781 struct xfs_da_state_blk *addblk,
785 struct xfs_da_intnode *node;
786 struct xfs_da3_icnode_hdr nodehdr;
791 struct xfs_inode *dp = state->args->dp;
793 trace_xfs_da_node_split(state->args);
795 node = oldblk->bp->b_addr;
796 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
799 * With V2 dirs the extra block is data or freespace.
801 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
802 newcount = 1 + useextra;
804 * Do we have to split the node?
806 if (nodehdr.count + newcount > state->args->geo->node_ents) {
808 * Allocate a new node, add to the doubly linked chain of
809 * nodes, then move some of our excess entries into it.
811 error = xfs_da_grow_inode(state->args, &blkno);
813 return error; /* GROT: dir is inconsistent */
815 error = xfs_da3_node_create(state->args, blkno, treelevel,
816 &newblk->bp, state->args->whichfork);
818 return error; /* GROT: dir is inconsistent */
819 newblk->blkno = blkno;
820 newblk->magic = XFS_DA_NODE_MAGIC;
821 xfs_da3_node_rebalance(state, oldblk, newblk);
822 error = xfs_da3_blk_link(state, oldblk, newblk);
831 * Insert the new entry(s) into the correct block
832 * (updating last hashval in the process).
834 * xfs_da3_node_add() inserts BEFORE the given index,
835 * and as a result of using node_lookup_int() we always
836 * point to a valid entry (not after one), but a split
837 * operation always results in a new block whose hashvals
838 * FOLLOW the current block.
840 * If we had double-split op below us, then add the extra block too.
842 node = oldblk->bp->b_addr;
843 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
844 if (oldblk->index <= nodehdr.count) {
846 xfs_da3_node_add(state, oldblk, addblk);
848 if (state->extraafter)
850 xfs_da3_node_add(state, oldblk, &state->extrablk);
851 state->extravalid = 0;
855 xfs_da3_node_add(state, newblk, addblk);
857 if (state->extraafter)
859 xfs_da3_node_add(state, newblk, &state->extrablk);
860 state->extravalid = 0;
868 * Balance the btree elements between two intermediate nodes,
869 * usually one full and one empty.
871 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
874 xfs_da3_node_rebalance(
875 struct xfs_da_state *state,
876 struct xfs_da_state_blk *blk1,
877 struct xfs_da_state_blk *blk2)
879 struct xfs_da_intnode *node1;
880 struct xfs_da_intnode *node2;
881 struct xfs_da_node_entry *btree1;
882 struct xfs_da_node_entry *btree2;
883 struct xfs_da_node_entry *btree_s;
884 struct xfs_da_node_entry *btree_d;
885 struct xfs_da3_icnode_hdr nodehdr1;
886 struct xfs_da3_icnode_hdr nodehdr2;
887 struct xfs_trans *tp;
891 struct xfs_inode *dp = state->args->dp;
893 trace_xfs_da_node_rebalance(state->args);
895 node1 = blk1->bp->b_addr;
896 node2 = blk2->bp->b_addr;
897 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
898 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
899 btree1 = nodehdr1.btree;
900 btree2 = nodehdr2.btree;
903 * Figure out how many entries need to move, and in which direction.
904 * Swap the nodes around if that makes it simpler.
906 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
907 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
908 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
909 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
911 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
912 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
913 btree1 = nodehdr1.btree;
914 btree2 = nodehdr2.btree;
918 count = (nodehdr1.count - nodehdr2.count) / 2;
921 tp = state->args->trans;
923 * Two cases: high-to-low and low-to-high.
927 * Move elements in node2 up to make a hole.
929 tmp = nodehdr2.count;
931 tmp *= (uint)sizeof(xfs_da_node_entry_t);
932 btree_s = &btree2[0];
933 btree_d = &btree2[count];
934 memmove(btree_d, btree_s, tmp);
938 * Move the req'd B-tree elements from high in node1 to
941 nodehdr2.count += count;
942 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
943 btree_s = &btree1[nodehdr1.count - count];
944 btree_d = &btree2[0];
945 memcpy(btree_d, btree_s, tmp);
946 nodehdr1.count -= count;
949 * Move the req'd B-tree elements from low in node2 to
953 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
954 btree_s = &btree2[0];
955 btree_d = &btree1[nodehdr1.count];
956 memcpy(btree_d, btree_s, tmp);
957 nodehdr1.count += count;
959 xfs_trans_log_buf(tp, blk1->bp,
960 XFS_DA_LOGRANGE(node1, btree_d, tmp));
963 * Move elements in node2 down to fill the hole.
965 tmp = nodehdr2.count - count;
966 tmp *= (uint)sizeof(xfs_da_node_entry_t);
967 btree_s = &btree2[count];
968 btree_d = &btree2[0];
969 memmove(btree_d, btree_s, tmp);
970 nodehdr2.count -= count;
974 * Log header of node 1 and all current bits of node 2.
976 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
977 xfs_trans_log_buf(tp, blk1->bp,
978 XFS_DA_LOGRANGE(node1, &node1->hdr,
979 state->args->geo->node_hdr_size));
981 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
982 xfs_trans_log_buf(tp, blk2->bp,
983 XFS_DA_LOGRANGE(node2, &node2->hdr,
984 state->args->geo->node_hdr_size +
985 (sizeof(btree2[0]) * nodehdr2.count)));
988 * Record the last hashval from each block for upward propagation.
989 * (note: don't use the swapped node pointers)
992 node1 = blk1->bp->b_addr;
993 node2 = blk2->bp->b_addr;
994 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
995 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
996 btree1 = nodehdr1.btree;
997 btree2 = nodehdr2.btree;
999 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
1000 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
1003 * Adjust the expected index for insertion.
1005 if (blk1->index >= nodehdr1.count) {
1006 blk2->index = blk1->index - nodehdr1.count;
1007 blk1->index = nodehdr1.count + 1; /* make it invalid */
1012 * Add a new entry to an intermediate node.
1016 struct xfs_da_state *state,
1017 struct xfs_da_state_blk *oldblk,
1018 struct xfs_da_state_blk *newblk)
1020 struct xfs_da_intnode *node;
1021 struct xfs_da3_icnode_hdr nodehdr;
1022 struct xfs_da_node_entry *btree;
1024 struct xfs_inode *dp = state->args->dp;
1026 trace_xfs_da_node_add(state->args);
1028 node = oldblk->bp->b_addr;
1029 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1030 btree = nodehdr.btree;
1032 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
1033 ASSERT(newblk->blkno != 0);
1034 if (state->args->whichfork == XFS_DATA_FORK)
1035 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
1036 newblk->blkno < state->args->geo->freeblk);
1039 * We may need to make some room before we insert the new node.
1042 if (oldblk->index < nodehdr.count) {
1043 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1044 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1046 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1047 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1048 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1049 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1050 tmp + sizeof(*btree)));
1053 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1054 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1055 XFS_DA_LOGRANGE(node, &node->hdr,
1056 state->args->geo->node_hdr_size));
1059 * Copy the last hash value from the oldblk to propagate upwards.
1061 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1064 /*========================================================================
1065 * Routines used for shrinking the Btree.
1066 *========================================================================*/
1069 * Deallocate an empty leaf node, remove it from its parent,
1070 * possibly deallocating that block, etc...
1074 struct xfs_da_state *state)
1076 struct xfs_da_state_blk *drop_blk;
1077 struct xfs_da_state_blk *save_blk;
1081 trace_xfs_da_join(state->args);
1083 drop_blk = &state->path.blk[ state->path.active-1 ];
1084 save_blk = &state->altpath.blk[ state->path.active-1 ];
1085 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
1086 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
1087 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1090 * Walk back up the tree joining/deallocating as necessary.
1091 * When we stop dropping blocks, break out.
1093 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1094 state->path.active--) {
1096 * See if we can combine the block with a neighbor.
1097 * (action == 0) => no options, just leave
1098 * (action == 1) => coalesce, then unlink
1099 * (action == 2) => block empty, unlink it
1101 switch (drop_blk->magic) {
1102 case XFS_ATTR_LEAF_MAGIC:
1103 error = xfs_attr3_leaf_toosmall(state, &action);
1108 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1110 case XFS_DIR2_LEAFN_MAGIC:
1111 error = xfs_dir2_leafn_toosmall(state, &action);
1116 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1118 case XFS_DA_NODE_MAGIC:
1120 * Remove the offending node, fixup hashvals,
1121 * check for a toosmall neighbor.
1123 xfs_da3_node_remove(state, drop_blk);
1124 xfs_da3_fixhashpath(state, &state->path);
1125 error = xfs_da3_node_toosmall(state, &action);
1130 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1133 xfs_da3_fixhashpath(state, &state->altpath);
1134 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1135 xfs_da_state_kill_altpath(state);
1138 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1140 drop_blk->bp = NULL;
1145 * We joined all the way to the top. If it turns out that
1146 * we only have one entry in the root, make the child block
1149 xfs_da3_node_remove(state, drop_blk);
1150 xfs_da3_fixhashpath(state, &state->path);
1151 error = xfs_da3_root_join(state, &state->path.blk[0]);
1157 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1159 __be16 magic = blkinfo->magic;
1162 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1163 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1164 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1165 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1167 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1168 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1170 ASSERT(!blkinfo->forw);
1171 ASSERT(!blkinfo->back);
1174 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1178 * We have only one entry in the root. Copy the only remaining child of
1179 * the old root to block 0 as the new root node.
1183 struct xfs_da_state *state,
1184 struct xfs_da_state_blk *root_blk)
1186 struct xfs_da_intnode *oldroot;
1187 struct xfs_da_args *args;
1190 struct xfs_da3_icnode_hdr oldroothdr;
1192 struct xfs_inode *dp = state->args->dp;
1194 trace_xfs_da_root_join(state->args);
1196 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1199 oldroot = root_blk->bp->b_addr;
1200 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
1201 ASSERT(oldroothdr.forw == 0);
1202 ASSERT(oldroothdr.back == 0);
1205 * If the root has more than one child, then don't do anything.
1207 if (oldroothdr.count > 1)
1211 * Read in the (only) child block, then copy those bytes into
1212 * the root block's buffer and free the original child block.
1214 child = be32_to_cpu(oldroothdr.btree[0].before);
1216 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
1219 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1222 * Copy child to root buffer and log it.
1224 xfs_da_buf_copy(root_blk->bp, bp, args->geo->blksize);
1225 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1226 args->geo->blksize - 1);
1228 * Now we can drop the child buffer.
1230 error = xfs_da_shrink_inode(args, child, bp);
1235 * Check a node block and its neighbors to see if the block should be
1236 * collapsed into one or the other neighbor. Always keep the block
1237 * with the smaller block number.
1238 * If the current block is over 50% full, don't try to join it, return 0.
1239 * If the block is empty, fill in the state structure and return 2.
1240 * If it can be collapsed, fill in the state structure and return 1.
1241 * If nothing can be done, return 0.
1244 xfs_da3_node_toosmall(
1245 struct xfs_da_state *state,
1248 struct xfs_da_intnode *node;
1249 struct xfs_da_state_blk *blk;
1250 struct xfs_da_blkinfo *info;
1253 struct xfs_da3_icnode_hdr nodehdr;
1259 struct xfs_inode *dp = state->args->dp;
1261 trace_xfs_da_node_toosmall(state->args);
1264 * Check for the degenerate case of the block being over 50% full.
1265 * If so, it's not worth even looking to see if we might be able
1266 * to coalesce with a sibling.
1268 blk = &state->path.blk[ state->path.active-1 ];
1269 info = blk->bp->b_addr;
1270 node = (xfs_da_intnode_t *)info;
1271 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1272 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1273 *action = 0; /* blk over 50%, don't try to join */
1274 return 0; /* blk over 50%, don't try to join */
1278 * Check for the degenerate case of the block being empty.
1279 * If the block is empty, we'll simply delete it, no need to
1280 * coalesce it with a sibling block. We choose (arbitrarily)
1281 * to merge with the forward block unless it is NULL.
1283 if (nodehdr.count == 0) {
1285 * Make altpath point to the block we want to keep and
1286 * path point to the block we want to drop (this one).
1288 forward = (info->forw != 0);
1289 memcpy(&state->altpath, &state->path, sizeof(state->path));
1290 error = xfs_da3_path_shift(state, &state->altpath, forward,
1303 * Examine each sibling block to see if we can coalesce with
1304 * at least 25% free space to spare. We need to figure out
1305 * whether to merge with the forward or the backward block.
1306 * We prefer coalescing with the lower numbered sibling so as
1307 * to shrink a directory over time.
1309 count = state->args->geo->node_ents;
1310 count -= state->args->geo->node_ents >> 2;
1311 count -= nodehdr.count;
1313 /* start with smaller blk num */
1314 forward = nodehdr.forw < nodehdr.back;
1315 for (i = 0; i < 2; forward = !forward, i++) {
1316 struct xfs_da3_icnode_hdr thdr;
1318 blkno = nodehdr.forw;
1320 blkno = nodehdr.back;
1323 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
1324 state->args->whichfork);
1329 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
1330 xfs_trans_brelse(state->args->trans, bp);
1332 if (count - thdr.count >= 0)
1333 break; /* fits with at least 25% to spare */
1341 * Make altpath point to the block we want to keep (the lower
1342 * numbered block) and path point to the block we want to drop.
1344 memcpy(&state->altpath, &state->path, sizeof(state->path));
1345 if (blkno < blk->blkno) {
1346 error = xfs_da3_path_shift(state, &state->altpath, forward,
1349 error = xfs_da3_path_shift(state, &state->path, forward,
1363 * Pick up the last hashvalue from an intermediate node.
1366 xfs_da3_node_lasthash(
1367 struct xfs_inode *dp,
1371 struct xfs_da3_icnode_hdr nodehdr;
1373 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
1375 *count = nodehdr.count;
1378 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1382 * Walk back up the tree adjusting hash values as necessary,
1383 * when we stop making changes, return.
1386 xfs_da3_fixhashpath(
1387 struct xfs_da_state *state,
1388 struct xfs_da_state_path *path)
1390 struct xfs_da_state_blk *blk;
1391 struct xfs_da_intnode *node;
1392 struct xfs_da_node_entry *btree;
1393 xfs_dahash_t lasthash=0;
1396 struct xfs_inode *dp = state->args->dp;
1398 trace_xfs_da_fixhashpath(state->args);
1400 level = path->active-1;
1401 blk = &path->blk[ level ];
1402 switch (blk->magic) {
1403 case XFS_ATTR_LEAF_MAGIC:
1404 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1408 case XFS_DIR2_LEAFN_MAGIC:
1409 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1413 case XFS_DA_NODE_MAGIC:
1414 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1419 for (blk--, level--; level >= 0; blk--, level--) {
1420 struct xfs_da3_icnode_hdr nodehdr;
1422 node = blk->bp->b_addr;
1423 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1424 btree = nodehdr.btree;
1425 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1427 blk->hashval = lasthash;
1428 btree[blk->index].hashval = cpu_to_be32(lasthash);
1429 xfs_trans_log_buf(state->args->trans, blk->bp,
1430 XFS_DA_LOGRANGE(node, &btree[blk->index],
1433 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1438 * Remove an entry from an intermediate node.
1441 xfs_da3_node_remove(
1442 struct xfs_da_state *state,
1443 struct xfs_da_state_blk *drop_blk)
1445 struct xfs_da_intnode *node;
1446 struct xfs_da3_icnode_hdr nodehdr;
1447 struct xfs_da_node_entry *btree;
1450 struct xfs_inode *dp = state->args->dp;
1452 trace_xfs_da_node_remove(state->args);
1454 node = drop_blk->bp->b_addr;
1455 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1456 ASSERT(drop_blk->index < nodehdr.count);
1457 ASSERT(drop_blk->index >= 0);
1460 * Copy over the offending entry, or just zero it out.
1462 index = drop_blk->index;
1463 btree = nodehdr.btree;
1464 if (index < nodehdr.count - 1) {
1465 tmp = nodehdr.count - index - 1;
1466 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1467 memmove(&btree[index], &btree[index + 1], tmp);
1468 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1469 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1470 index = nodehdr.count - 1;
1472 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1473 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1474 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1476 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1477 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1478 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
1481 * Copy the last hash value from the block to propagate upwards.
1483 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1487 * Unbalance the elements between two intermediate nodes,
1488 * move all Btree elements from one node into another.
1491 xfs_da3_node_unbalance(
1492 struct xfs_da_state *state,
1493 struct xfs_da_state_blk *drop_blk,
1494 struct xfs_da_state_blk *save_blk)
1496 struct xfs_da_intnode *drop_node;
1497 struct xfs_da_intnode *save_node;
1498 struct xfs_da_node_entry *drop_btree;
1499 struct xfs_da_node_entry *save_btree;
1500 struct xfs_da3_icnode_hdr drop_hdr;
1501 struct xfs_da3_icnode_hdr save_hdr;
1502 struct xfs_trans *tp;
1505 struct xfs_inode *dp = state->args->dp;
1507 trace_xfs_da_node_unbalance(state->args);
1509 drop_node = drop_blk->bp->b_addr;
1510 save_node = save_blk->bp->b_addr;
1511 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
1512 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
1513 drop_btree = drop_hdr.btree;
1514 save_btree = save_hdr.btree;
1515 tp = state->args->trans;
1518 * If the dying block has lower hashvals, then move all the
1519 * elements in the remaining block up to make a hole.
1521 if ((be32_to_cpu(drop_btree[0].hashval) <
1522 be32_to_cpu(save_btree[0].hashval)) ||
1523 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1524 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1525 /* XXX: check this - is memmove dst correct? */
1526 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1527 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1530 xfs_trans_log_buf(tp, save_blk->bp,
1531 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1532 (save_hdr.count + drop_hdr.count) *
1533 sizeof(xfs_da_node_entry_t)));
1535 sindex = save_hdr.count;
1536 xfs_trans_log_buf(tp, save_blk->bp,
1537 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1538 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1542 * Move all the B-tree elements from drop_blk to save_blk.
1544 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1545 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1546 save_hdr.count += drop_hdr.count;
1548 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
1549 xfs_trans_log_buf(tp, save_blk->bp,
1550 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1551 state->args->geo->node_hdr_size));
1554 * Save the last hashval in the remaining block for upward propagation.
1556 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1559 /*========================================================================
1560 * Routines used for finding things in the Btree.
1561 *========================================================================*/
1564 * Walk down the Btree looking for a particular filename, filling
1565 * in the state structure as we go.
1567 * We will set the state structure to point to each of the elements
1568 * in each of the nodes where either the hashval is or should be.
1570 * We support duplicate hashval's so for each entry in the current
1571 * node that could contain the desired hashval, descend. This is a
1572 * pruned depth-first tree search.
1575 xfs_da3_node_lookup_int(
1576 struct xfs_da_state *state,
1579 struct xfs_da_state_blk *blk;
1580 struct xfs_da_blkinfo *curr;
1581 struct xfs_da_intnode *node;
1582 struct xfs_da_node_entry *btree;
1583 struct xfs_da3_icnode_hdr nodehdr;
1584 struct xfs_da_args *args;
1586 xfs_dahash_t hashval;
1587 xfs_dahash_t btreehashval;
1593 unsigned int expected_level = 0;
1595 struct xfs_inode *dp = state->args->dp;
1600 * Descend thru the B-tree searching each level for the right
1601 * node to use, until the right hashval is found.
1603 blkno = args->geo->leafblk;
1604 for (blk = &state->path.blk[0], state->path.active = 1;
1605 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1606 blk++, state->path.active++) {
1608 * Read the next node down in the tree.
1611 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1612 &blk->bp, args->whichfork);
1615 state->path.active--;
1618 curr = blk->bp->b_addr;
1619 magic = be16_to_cpu(curr->magic);
1621 if (magic == XFS_ATTR_LEAF_MAGIC ||
1622 magic == XFS_ATTR3_LEAF_MAGIC) {
1623 blk->magic = XFS_ATTR_LEAF_MAGIC;
1624 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1628 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1629 magic == XFS_DIR3_LEAFN_MAGIC) {
1630 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1631 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1636 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1637 xfs_buf_mark_corrupt(blk->bp);
1638 return -EFSCORRUPTED;
1641 blk->magic = XFS_DA_NODE_MAGIC;
1644 * Search an intermediate node for a match.
1646 node = blk->bp->b_addr;
1647 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1648 btree = nodehdr.btree;
1650 /* Tree taller than we can handle; bail out! */
1651 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1652 xfs_buf_mark_corrupt(blk->bp);
1653 return -EFSCORRUPTED;
1656 /* Check the level from the root. */
1657 if (blkno == args->geo->leafblk)
1658 expected_level = nodehdr.level - 1;
1659 else if (expected_level != nodehdr.level) {
1660 xfs_buf_mark_corrupt(blk->bp);
1661 return -EFSCORRUPTED;
1665 max = nodehdr.count;
1666 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1669 * Binary search. (note: small blocks will skip loop)
1671 probe = span = max / 2;
1672 hashval = args->hashval;
1675 btreehashval = be32_to_cpu(btree[probe].hashval);
1676 if (btreehashval < hashval)
1678 else if (btreehashval > hashval)
1683 ASSERT((probe >= 0) && (probe < max));
1684 ASSERT((span <= 4) ||
1685 (be32_to_cpu(btree[probe].hashval) == hashval));
1688 * Since we may have duplicate hashval's, find the first
1689 * matching hashval in the node.
1692 be32_to_cpu(btree[probe].hashval) >= hashval) {
1695 while (probe < max &&
1696 be32_to_cpu(btree[probe].hashval) < hashval) {
1701 * Pick the right block to descend on.
1704 blk->index = max - 1;
1705 blkno = be32_to_cpu(btree[max - 1].before);
1708 blkno = be32_to_cpu(btree[probe].before);
1711 /* We can't point back to the root. */
1712 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
1713 return -EFSCORRUPTED;
1716 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
1717 return -EFSCORRUPTED;
1720 * A leaf block that ends in the hashval that we are interested in
1721 * (final hashval == search hashval) means that the next block may
1722 * contain more entries with the same hashval, shift upward to the
1723 * next leaf and keep searching.
1726 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1727 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1728 &blk->index, state);
1729 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1730 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1731 blk->index = args->index;
1732 args->blkno = blk->blkno;
1735 return -EFSCORRUPTED;
1737 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1738 (blk->hashval == args->hashval)) {
1739 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1745 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1746 /* path_shift() gives ENOENT */
1756 /*========================================================================
1758 *========================================================================*/
1761 * Compare two intermediate nodes for "order".
1765 struct xfs_inode *dp,
1766 struct xfs_buf *node1_bp,
1767 struct xfs_buf *node2_bp)
1769 struct xfs_da_intnode *node1;
1770 struct xfs_da_intnode *node2;
1771 struct xfs_da_node_entry *btree1;
1772 struct xfs_da_node_entry *btree2;
1773 struct xfs_da3_icnode_hdr node1hdr;
1774 struct xfs_da3_icnode_hdr node2hdr;
1776 node1 = node1_bp->b_addr;
1777 node2 = node2_bp->b_addr;
1778 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
1779 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
1780 btree1 = node1hdr.btree;
1781 btree2 = node2hdr.btree;
1783 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1784 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1785 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1786 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1793 * Link a new block into a doubly linked list of blocks (of whatever type).
1797 struct xfs_da_state *state,
1798 struct xfs_da_state_blk *old_blk,
1799 struct xfs_da_state_blk *new_blk)
1801 struct xfs_da_blkinfo *old_info;
1802 struct xfs_da_blkinfo *new_info;
1803 struct xfs_da_blkinfo *tmp_info;
1804 struct xfs_da_args *args;
1808 struct xfs_inode *dp = state->args->dp;
1811 * Set up environment.
1814 ASSERT(args != NULL);
1815 old_info = old_blk->bp->b_addr;
1816 new_info = new_blk->bp->b_addr;
1817 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1818 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1819 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1821 switch (old_blk->magic) {
1822 case XFS_ATTR_LEAF_MAGIC:
1823 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1825 case XFS_DIR2_LEAFN_MAGIC:
1826 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1828 case XFS_DA_NODE_MAGIC:
1829 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1834 * Link blocks in appropriate order.
1838 * Link new block in before existing block.
1840 trace_xfs_da_link_before(args);
1841 new_info->forw = cpu_to_be32(old_blk->blkno);
1842 new_info->back = old_info->back;
1843 if (old_info->back) {
1844 error = xfs_da3_node_read(args->trans, dp,
1845 be32_to_cpu(old_info->back),
1846 &bp, args->whichfork);
1850 tmp_info = bp->b_addr;
1851 ASSERT(tmp_info->magic == old_info->magic);
1852 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1853 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1854 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1856 old_info->back = cpu_to_be32(new_blk->blkno);
1859 * Link new block in after existing block.
1861 trace_xfs_da_link_after(args);
1862 new_info->forw = old_info->forw;
1863 new_info->back = cpu_to_be32(old_blk->blkno);
1864 if (old_info->forw) {
1865 error = xfs_da3_node_read(args->trans, dp,
1866 be32_to_cpu(old_info->forw),
1867 &bp, args->whichfork);
1871 tmp_info = bp->b_addr;
1872 ASSERT(tmp_info->magic == old_info->magic);
1873 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1874 tmp_info->back = cpu_to_be32(new_blk->blkno);
1875 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1877 old_info->forw = cpu_to_be32(new_blk->blkno);
1880 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1881 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1886 * Unlink a block from a doubly linked list of blocks.
1888 STATIC int /* error */
1890 struct xfs_da_state *state,
1891 struct xfs_da_state_blk *drop_blk,
1892 struct xfs_da_state_blk *save_blk)
1894 struct xfs_da_blkinfo *drop_info;
1895 struct xfs_da_blkinfo *save_info;
1896 struct xfs_da_blkinfo *tmp_info;
1897 struct xfs_da_args *args;
1902 * Set up environment.
1905 ASSERT(args != NULL);
1906 save_info = save_blk->bp->b_addr;
1907 drop_info = drop_blk->bp->b_addr;
1908 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1909 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1910 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1911 ASSERT(save_blk->magic == drop_blk->magic);
1912 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1913 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1914 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1915 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1918 * Unlink the leaf block from the doubly linked chain of leaves.
1920 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1921 trace_xfs_da_unlink_back(args);
1922 save_info->back = drop_info->back;
1923 if (drop_info->back) {
1924 error = xfs_da3_node_read(args->trans, args->dp,
1925 be32_to_cpu(drop_info->back),
1926 &bp, args->whichfork);
1930 tmp_info = bp->b_addr;
1931 ASSERT(tmp_info->magic == save_info->magic);
1932 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1933 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1934 xfs_trans_log_buf(args->trans, bp, 0,
1935 sizeof(*tmp_info) - 1);
1938 trace_xfs_da_unlink_forward(args);
1939 save_info->forw = drop_info->forw;
1940 if (drop_info->forw) {
1941 error = xfs_da3_node_read(args->trans, args->dp,
1942 be32_to_cpu(drop_info->forw),
1943 &bp, args->whichfork);
1947 tmp_info = bp->b_addr;
1948 ASSERT(tmp_info->magic == save_info->magic);
1949 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1950 tmp_info->back = cpu_to_be32(save_blk->blkno);
1951 xfs_trans_log_buf(args->trans, bp, 0,
1952 sizeof(*tmp_info) - 1);
1956 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1961 * Move a path "forward" or "!forward" one block at the current level.
1963 * This routine will adjust a "path" to point to the next block
1964 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1965 * Btree, including updating pointers to the intermediate nodes between
1966 * the new bottom and the root.
1970 struct xfs_da_state *state,
1971 struct xfs_da_state_path *path,
1976 struct xfs_da_state_blk *blk;
1977 struct xfs_da_blkinfo *info;
1978 struct xfs_da_args *args;
1979 struct xfs_da_node_entry *btree;
1980 struct xfs_da3_icnode_hdr nodehdr;
1982 xfs_dablk_t blkno = 0;
1985 struct xfs_inode *dp = state->args->dp;
1987 trace_xfs_da_path_shift(state->args);
1990 * Roll up the Btree looking for the first block where our
1991 * current index is not at the edge of the block. Note that
1992 * we skip the bottom layer because we want the sibling block.
1995 ASSERT(args != NULL);
1996 ASSERT(path != NULL);
1997 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1998 level = (path->active-1) - 1; /* skip bottom layer in path */
1999 for (; level >= 0; level--) {
2000 blk = &path->blk[level];
2001 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2004 if (forward && (blk->index < nodehdr.count - 1)) {
2006 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2008 } else if (!forward && (blk->index > 0)) {
2010 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2015 *result = -ENOENT; /* we're out of our tree */
2016 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
2021 * Roll down the edge of the subtree until we reach the
2022 * same depth we were at originally.
2024 for (blk++, level++; level < path->active; blk++, level++) {
2026 * Read the next child block into a local buffer.
2028 error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
2034 * Release the old block (if it's dirty, the trans doesn't
2035 * actually let go) and swap the local buffer into the path
2036 * structure. This ensures failure of the above read doesn't set
2037 * a NULL buffer in an active slot in the path.
2040 xfs_trans_brelse(args->trans, blk->bp);
2044 info = blk->bp->b_addr;
2045 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
2046 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
2047 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2048 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
2049 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
2050 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
2054 * Note: we flatten the magic number to a single type so we
2055 * don't have to compare against crc/non-crc types elsewhere.
2057 switch (be16_to_cpu(info->magic)) {
2058 case XFS_DA_NODE_MAGIC:
2059 case XFS_DA3_NODE_MAGIC:
2060 blk->magic = XFS_DA_NODE_MAGIC;
2061 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2063 btree = nodehdr.btree;
2064 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2068 blk->index = nodehdr.count - 1;
2069 blkno = be32_to_cpu(btree[blk->index].before);
2071 case XFS_ATTR_LEAF_MAGIC:
2072 case XFS_ATTR3_LEAF_MAGIC:
2073 blk->magic = XFS_ATTR_LEAF_MAGIC;
2074 ASSERT(level == path->active-1);
2076 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
2078 case XFS_DIR2_LEAFN_MAGIC:
2079 case XFS_DIR3_LEAFN_MAGIC:
2080 blk->magic = XFS_DIR2_LEAFN_MAGIC;
2081 ASSERT(level == path->active-1);
2083 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2096 /*========================================================================
2098 *========================================================================*/
2101 * Implement a simple hash on a character string.
2102 * Rotate the hash value by 7 bits, then XOR each character in.
2103 * This is implemented with some source-level loop unrolling.
2106 xfs_da_hashname(const uint8_t *name, int namelen)
2111 * Do four characters at a time as long as we can.
2113 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2114 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2115 (name[3] << 0) ^ rol32(hash, 7 * 4);
2118 * Now do the rest of the characters.
2122 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2125 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2127 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2128 default: /* case 0: */
2135 struct xfs_da_args *args,
2136 const unsigned char *name,
2139 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2140 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2144 xfs_da_grow_inode_int(
2145 struct xfs_da_args *args,
2149 struct xfs_trans *tp = args->trans;
2150 struct xfs_inode *dp = args->dp;
2151 int w = args->whichfork;
2152 xfs_rfsblock_t nblks = dp->i_nblocks;
2153 struct xfs_bmbt_irec map, *mapp;
2154 int nmap, error, got, i, mapi;
2157 * Find a spot in the file space to put the new block.
2159 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2164 * Try mapping it in one filesystem block.
2167 error = xfs_bmapi_write(tp, dp, *bno, count,
2168 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2169 args->total, &map, &nmap);
2177 } else if (nmap == 0 && count > 1) {
2182 * If we didn't get it and the block might work if fragmented,
2183 * try without the CONTIG flag. Loop until we get it all.
2185 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2186 for (b = *bno, mapi = 0; b < *bno + count; ) {
2187 c = (int)(*bno + count - b);
2188 nmap = min(XFS_BMAP_MAX_NMAP, c);
2189 error = xfs_bmapi_write(tp, dp, b, c,
2190 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2191 args->total, &mapp[mapi], &nmap);
2197 b = mapp[mapi - 1].br_startoff +
2198 mapp[mapi - 1].br_blockcount;
2206 * Count the blocks we got, make sure it matches the total.
2208 for (i = 0, got = 0; i < mapi; i++)
2209 got += mapp[i].br_blockcount;
2210 if (got != count || mapp[0].br_startoff != *bno ||
2211 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2217 /* account for newly allocated blocks in reserved blocks total */
2218 args->total -= dp->i_nblocks - nblks;
2227 * Add a block to the btree ahead of the file.
2228 * Return the new block number to the caller.
2232 struct xfs_da_args *args,
2233 xfs_dablk_t *new_blkno)
2238 trace_xfs_da_grow_inode(args);
2240 bno = args->geo->leafblk;
2241 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2243 *new_blkno = (xfs_dablk_t)bno;
2248 * Ick. We need to always be able to remove a btree block, even
2249 * if there's no space reservation because the filesystem is full.
2250 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2251 * It swaps the target block with the last block in the file. The
2252 * last block in the file can always be removed since it can't cause
2253 * a bmap btree split to do that.
2256 xfs_da3_swap_lastblock(
2257 struct xfs_da_args *args,
2258 xfs_dablk_t *dead_blknop,
2259 struct xfs_buf **dead_bufp)
2261 struct xfs_da_blkinfo *dead_info;
2262 struct xfs_da_blkinfo *sib_info;
2263 struct xfs_da_intnode *par_node;
2264 struct xfs_da_intnode *dead_node;
2265 struct xfs_dir2_leaf *dead_leaf2;
2266 struct xfs_da_node_entry *btree;
2267 struct xfs_da3_icnode_hdr par_hdr;
2268 struct xfs_inode *dp;
2269 struct xfs_trans *tp;
2270 struct xfs_mount *mp;
2271 struct xfs_buf *dead_buf;
2272 struct xfs_buf *last_buf;
2273 struct xfs_buf *sib_buf;
2274 struct xfs_buf *par_buf;
2275 xfs_dahash_t dead_hash;
2276 xfs_fileoff_t lastoff;
2277 xfs_dablk_t dead_blkno;
2278 xfs_dablk_t last_blkno;
2279 xfs_dablk_t sib_blkno;
2280 xfs_dablk_t par_blkno;
2287 trace_xfs_da_swap_lastblock(args);
2289 dead_buf = *dead_bufp;
2290 dead_blkno = *dead_blknop;
2293 w = args->whichfork;
2294 ASSERT(w == XFS_DATA_FORK);
2296 lastoff = args->geo->freeblk;
2297 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2300 if (XFS_IS_CORRUPT(mp, lastoff == 0))
2301 return -EFSCORRUPTED;
2303 * Read the last block in the btree space.
2305 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2306 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
2310 * Copy the last block into the dead buffer and log it.
2312 xfs_da_buf_copy(dead_buf, last_buf, args->geo->blksize);
2313 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2314 dead_info = dead_buf->b_addr;
2317 * Get values from the moved block.
2319 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2320 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2321 struct xfs_dir3_icleaf_hdr leafhdr;
2322 struct xfs_dir2_leaf_entry *ents;
2324 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2325 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
2327 ents = leafhdr.ents;
2329 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2331 struct xfs_da3_icnode_hdr deadhdr;
2333 dead_node = (xfs_da_intnode_t *)dead_info;
2334 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
2335 btree = deadhdr.btree;
2336 dead_level = deadhdr.level;
2337 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2339 sib_buf = par_buf = NULL;
2341 * If the moved block has a left sibling, fix up the pointers.
2343 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2344 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2347 sib_info = sib_buf->b_addr;
2348 if (XFS_IS_CORRUPT(mp,
2349 be32_to_cpu(sib_info->forw) != last_blkno ||
2350 sib_info->magic != dead_info->magic)) {
2351 error = -EFSCORRUPTED;
2354 sib_info->forw = cpu_to_be32(dead_blkno);
2355 xfs_trans_log_buf(tp, sib_buf,
2356 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2357 sizeof(sib_info->forw)));
2361 * If the moved block has a right sibling, fix up the pointers.
2363 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2364 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2367 sib_info = sib_buf->b_addr;
2368 if (XFS_IS_CORRUPT(mp,
2369 be32_to_cpu(sib_info->back) != last_blkno ||
2370 sib_info->magic != dead_info->magic)) {
2371 error = -EFSCORRUPTED;
2374 sib_info->back = cpu_to_be32(dead_blkno);
2375 xfs_trans_log_buf(tp, sib_buf,
2376 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2377 sizeof(sib_info->back)));
2380 par_blkno = args->geo->leafblk;
2383 * Walk down the tree looking for the parent of the moved block.
2386 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2389 par_node = par_buf->b_addr;
2390 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2391 if (XFS_IS_CORRUPT(mp,
2392 level >= 0 && level != par_hdr.level + 1)) {
2393 error = -EFSCORRUPTED;
2396 level = par_hdr.level;
2397 btree = par_hdr.btree;
2399 entno < par_hdr.count &&
2400 be32_to_cpu(btree[entno].hashval) < dead_hash;
2403 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
2404 error = -EFSCORRUPTED;
2407 par_blkno = be32_to_cpu(btree[entno].before);
2408 if (level == dead_level + 1)
2410 xfs_trans_brelse(tp, par_buf);
2414 * We're in the right parent block.
2415 * Look for the right entry.
2419 entno < par_hdr.count &&
2420 be32_to_cpu(btree[entno].before) != last_blkno;
2423 if (entno < par_hdr.count)
2425 par_blkno = par_hdr.forw;
2426 xfs_trans_brelse(tp, par_buf);
2428 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
2429 error = -EFSCORRUPTED;
2432 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2435 par_node = par_buf->b_addr;
2436 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2437 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
2438 error = -EFSCORRUPTED;
2441 btree = par_hdr.btree;
2445 * Update the parent entry pointing to the moved block.
2447 btree[entno].before = cpu_to_be32(dead_blkno);
2448 xfs_trans_log_buf(tp, par_buf,
2449 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2450 sizeof(btree[entno].before)));
2451 *dead_blknop = last_blkno;
2452 *dead_bufp = last_buf;
2456 xfs_trans_brelse(tp, par_buf);
2458 xfs_trans_brelse(tp, sib_buf);
2459 xfs_trans_brelse(tp, last_buf);
2464 * Remove a btree block from a directory or attribute.
2467 xfs_da_shrink_inode(
2468 struct xfs_da_args *args,
2469 xfs_dablk_t dead_blkno,
2470 struct xfs_buf *dead_buf)
2472 struct xfs_inode *dp;
2473 int done, error, w, count;
2474 struct xfs_trans *tp;
2476 trace_xfs_da_shrink_inode(args);
2479 w = args->whichfork;
2481 count = args->geo->fsbcount;
2484 * Remove extents. If we get ENOSPC for a dir we have to move
2485 * the last block to the place we want to kill.
2487 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2488 xfs_bmapi_aflag(w), 0, &done);
2489 if (error == -ENOSPC) {
2490 if (w != XFS_DATA_FORK)
2492 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2500 xfs_trans_binval(tp, dead_buf);
2506 struct xfs_inode *dp,
2510 struct xfs_buf_map **mapp,
2513 struct xfs_mount *mp = dp->i_mount;
2514 int nfsb = xfs_dabuf_nfsb(mp, whichfork);
2515 struct xfs_bmbt_irec irec, *irecs = &irec;
2516 struct xfs_buf_map *map = *mapp;
2517 xfs_fileoff_t off = bno;
2518 int error = 0, nirecs, i;
2521 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
2524 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
2525 xfs_bmapi_aflag(whichfork));
2527 goto out_free_irecs;
2530 * Use the caller provided map for the single map case, else allocate a
2531 * larger one that needs to be free by the caller.
2534 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
2537 goto out_free_irecs;
2542 for (i = 0; i < nirecs; i++) {
2543 if (irecs[i].br_startblock == HOLESTARTBLOCK ||
2544 irecs[i].br_startblock == DELAYSTARTBLOCK)
2545 goto invalid_mapping;
2546 if (off != irecs[i].br_startoff)
2547 goto invalid_mapping;
2549 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2550 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2551 off += irecs[i].br_blockcount;
2554 if (off != bno + nfsb)
2555 goto invalid_mapping;
2564 /* Caller ok with no mapping. */
2565 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
2566 error = -EFSCORRUPTED;
2567 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2568 xfs_alert(mp, "%s: bno %u inode %llu",
2569 __func__, bno, dp->i_ino);
2571 for (i = 0; i < nirecs; i++) {
2573 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2574 i, irecs[i].br_startoff,
2575 irecs[i].br_startblock,
2576 irecs[i].br_blockcount,
2583 goto out_free_irecs;
2587 * Get a buffer for the dir/attr block.
2591 struct xfs_trans *tp,
2592 struct xfs_inode *dp,
2594 struct xfs_buf **bpp,
2597 struct xfs_mount *mp = dp->i_mount;
2599 struct xfs_buf_map map, *mapp = ↦
2604 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
2605 if (error || nmap == 0)
2608 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
2622 * Get a buffer for the dir/attr block, fill in the contents.
2626 struct xfs_trans *tp,
2627 struct xfs_inode *dp,
2630 struct xfs_buf **bpp,
2632 const struct xfs_buf_ops *ops)
2634 struct xfs_mount *mp = dp->i_mount;
2636 struct xfs_buf_map map, *mapp = ↦
2641 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2645 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
2650 if (whichfork == XFS_ATTR_FORK)
2651 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2653 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2663 * Readahead the dir/attr block.
2667 struct xfs_inode *dp,
2671 const struct xfs_buf_ops *ops)
2673 struct xfs_buf_map map;
2674 struct xfs_buf_map *mapp;
2680 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2684 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);