1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Christoph Hellwig.
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trace.h"
17 * In-core extent record layout:
19 * +-------+----------------------------+
20 * | 00:53 | all 54 bits of startoff |
21 * | 54:63 | low 10 bits of startblock |
22 * +-------+----------------------------+
23 * | 00:20 | all 21 bits of length |
24 * | 21 | unwritten extent bit |
25 * | 22:63 | high 42 bits of startblock |
26 * +-------+----------------------------+
28 #define XFS_IEXT_STARTOFF_MASK xfs_mask64lo(BMBT_STARTOFF_BITLEN)
29 #define XFS_IEXT_LENGTH_MASK xfs_mask64lo(BMBT_BLOCKCOUNT_BITLEN)
30 #define XFS_IEXT_STARTBLOCK_MASK xfs_mask64lo(BMBT_STARTBLOCK_BITLEN)
38 * Given that the length can't be a zero, only an empty hi value indicates an
41 static bool xfs_iext_rec_is_empty(struct xfs_iext_rec *rec)
46 static inline void xfs_iext_rec_clear(struct xfs_iext_rec *rec)
54 struct xfs_iext_rec *rec,
55 struct xfs_bmbt_irec *irec)
57 ASSERT((irec->br_startoff & ~XFS_IEXT_STARTOFF_MASK) == 0);
58 ASSERT((irec->br_blockcount & ~XFS_IEXT_LENGTH_MASK) == 0);
59 ASSERT((irec->br_startblock & ~XFS_IEXT_STARTBLOCK_MASK) == 0);
61 rec->lo = irec->br_startoff & XFS_IEXT_STARTOFF_MASK;
62 rec->hi = irec->br_blockcount & XFS_IEXT_LENGTH_MASK;
64 rec->lo |= (irec->br_startblock << 54);
65 rec->hi |= ((irec->br_startblock & ~xfs_mask64lo(10)) << (22 - 10));
67 if (irec->br_state == XFS_EXT_UNWRITTEN)
73 struct xfs_bmbt_irec *irec,
74 struct xfs_iext_rec *rec)
76 irec->br_startoff = rec->lo & XFS_IEXT_STARTOFF_MASK;
77 irec->br_blockcount = rec->hi & XFS_IEXT_LENGTH_MASK;
79 irec->br_startblock = rec->lo >> 54;
80 irec->br_startblock |= (rec->hi & xfs_mask64hi(42)) >> (22 - 10);
82 if (rec->hi & (1 << 21))
83 irec->br_state = XFS_EXT_UNWRITTEN;
85 irec->br_state = XFS_EXT_NORM;
90 KEYS_PER_NODE = NODE_SIZE / (sizeof(uint64_t) + sizeof(void *)),
91 RECS_PER_LEAF = (NODE_SIZE - (2 * sizeof(struct xfs_iext_leaf *))) /
92 sizeof(struct xfs_iext_rec),
96 * In-core extent btree block layout:
98 * There are two types of blocks in the btree: leaf and inner (non-leaf) blocks.
100 * The leaf blocks are made up by %KEYS_PER_NODE extent records, which each
101 * contain the startoffset, blockcount, startblock and unwritten extent flag.
102 * See above for the exact format, followed by pointers to the previous and next
103 * leaf blocks (if there are any).
105 * The inner (non-leaf) blocks first contain KEYS_PER_NODE lookup keys, followed
106 * by an equal number of pointers to the btree blocks at the next lower level.
108 * +-------+-------+-------+-------+-------+----------+----------+
109 * Leaf: | rec 1 | rec 2 | rec 3 | rec 4 | rec N | prev-ptr | next-ptr |
110 * +-------+-------+-------+-------+-------+----------+----------+
112 * +-------+-------+-------+-------+-------+-------+------+-------+
113 * Inner: | key 1 | key 2 | key 3 | key N | ptr 1 | ptr 2 | ptr3 | ptr N |
114 * +-------+-------+-------+-------+-------+-------+------+-------+
116 struct xfs_iext_node {
117 uint64_t keys[KEYS_PER_NODE];
118 #define XFS_IEXT_KEY_INVALID (1ULL << 63)
119 void *ptrs[KEYS_PER_NODE];
122 struct xfs_iext_leaf {
123 struct xfs_iext_rec recs[RECS_PER_LEAF];
124 struct xfs_iext_leaf *prev;
125 struct xfs_iext_leaf *next;
128 inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
130 return ifp->if_bytes / sizeof(struct xfs_iext_rec);
133 static inline int xfs_iext_max_recs(struct xfs_ifork *ifp)
135 if (ifp->if_height == 1)
136 return xfs_iext_count(ifp);
137 return RECS_PER_LEAF;
140 static inline struct xfs_iext_rec *cur_rec(struct xfs_iext_cursor *cur)
142 return &cur->leaf->recs[cur->pos];
145 static inline bool xfs_iext_valid(struct xfs_ifork *ifp,
146 struct xfs_iext_cursor *cur)
150 if (cur->pos < 0 || cur->pos >= xfs_iext_max_recs(ifp))
152 if (xfs_iext_rec_is_empty(cur_rec(cur)))
158 xfs_iext_find_first_leaf(
159 struct xfs_ifork *ifp)
161 struct xfs_iext_node *node = ifp->if_data;
167 for (height = ifp->if_height; height > 1; height--) {
168 node = node->ptrs[0];
176 xfs_iext_find_last_leaf(
177 struct xfs_ifork *ifp)
179 struct xfs_iext_node *node = ifp->if_data;
185 for (height = ifp->if_height; height > 1; height--) {
186 for (i = 1; i < KEYS_PER_NODE; i++)
189 node = node->ptrs[i - 1];
198 struct xfs_ifork *ifp,
199 struct xfs_iext_cursor *cur)
202 cur->leaf = xfs_iext_find_first_leaf(ifp);
207 struct xfs_ifork *ifp,
208 struct xfs_iext_cursor *cur)
212 cur->leaf = xfs_iext_find_last_leaf(ifp);
218 for (i = 1; i < xfs_iext_max_recs(ifp); i++) {
219 if (xfs_iext_rec_is_empty(&cur->leaf->recs[i]))
227 struct xfs_ifork *ifp,
228 struct xfs_iext_cursor *cur)
231 ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
232 xfs_iext_first(ifp, cur);
236 ASSERT(cur->pos >= 0);
237 ASSERT(cur->pos < xfs_iext_max_recs(ifp));
240 if (ifp->if_height > 1 && !xfs_iext_valid(ifp, cur) &&
242 cur->leaf = cur->leaf->next;
249 struct xfs_ifork *ifp,
250 struct xfs_iext_cursor *cur)
253 ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
254 xfs_iext_last(ifp, cur);
258 ASSERT(cur->pos >= 0);
259 ASSERT(cur->pos <= RECS_PER_LEAF);
264 if (xfs_iext_valid(ifp, cur))
266 } while (cur->pos > 0);
268 if (ifp->if_height > 1 && cur->leaf->prev) {
269 cur->leaf = cur->leaf->prev;
270 cur->pos = RECS_PER_LEAF;
277 struct xfs_iext_node *node,
279 xfs_fileoff_t offset)
281 if (node->keys[n] > offset)
283 if (node->keys[n] < offset)
290 struct xfs_iext_rec *rec,
291 xfs_fileoff_t offset)
293 uint64_t rec_offset = rec->lo & XFS_IEXT_STARTOFF_MASK;
294 uint32_t rec_len = rec->hi & XFS_IEXT_LENGTH_MASK;
296 if (rec_offset > offset)
298 if (rec_offset + rec_len <= offset)
305 struct xfs_ifork *ifp,
306 xfs_fileoff_t offset,
309 struct xfs_iext_node *node = ifp->if_data;
315 for (height = ifp->if_height; height > level; height--) {
316 for (i = 1; i < KEYS_PER_NODE; i++)
317 if (xfs_iext_key_cmp(node, i, offset) > 0)
320 node = node->ptrs[i - 1];
330 struct xfs_iext_node *node,
331 xfs_fileoff_t offset)
335 for (i = 1; i < KEYS_PER_NODE; i++) {
336 if (xfs_iext_key_cmp(node, i, offset) > 0)
344 xfs_iext_node_insert_pos(
345 struct xfs_iext_node *node,
346 xfs_fileoff_t offset)
350 for (i = 0; i < KEYS_PER_NODE; i++) {
351 if (xfs_iext_key_cmp(node, i, offset) > 0)
355 return KEYS_PER_NODE;
359 xfs_iext_node_nr_entries(
360 struct xfs_iext_node *node,
365 for (i = start; i < KEYS_PER_NODE; i++) {
366 if (node->keys[i] == XFS_IEXT_KEY_INVALID)
374 xfs_iext_leaf_nr_entries(
375 struct xfs_ifork *ifp,
376 struct xfs_iext_leaf *leaf,
381 for (i = start; i < xfs_iext_max_recs(ifp); i++) {
382 if (xfs_iext_rec_is_empty(&leaf->recs[i]))
389 static inline uint64_t
391 struct xfs_iext_leaf *leaf,
394 return leaf->recs[n].lo & XFS_IEXT_STARTOFF_MASK;
399 struct xfs_ifork *ifp)
401 struct xfs_iext_node *node = kmem_zalloc(NODE_SIZE, KM_NOFS);
404 if (ifp->if_height == 1) {
405 struct xfs_iext_leaf *prev = ifp->if_data;
407 node->keys[0] = xfs_iext_leaf_key(prev, 0);
408 node->ptrs[0] = prev;
410 struct xfs_iext_node *prev = ifp->if_data;
412 ASSERT(ifp->if_height > 1);
414 node->keys[0] = prev->keys[0];
415 node->ptrs[0] = prev;
418 for (i = 1; i < KEYS_PER_NODE; i++)
419 node->keys[i] = XFS_IEXT_KEY_INVALID;
426 xfs_iext_update_node(
427 struct xfs_ifork *ifp,
428 xfs_fileoff_t old_offset,
429 xfs_fileoff_t new_offset,
433 struct xfs_iext_node *node = ifp->if_data;
436 for (height = ifp->if_height; height > level; height--) {
437 for (i = 0; i < KEYS_PER_NODE; i++) {
438 if (i > 0 && xfs_iext_key_cmp(node, i, old_offset) > 0)
440 if (node->keys[i] == old_offset)
441 node->keys[i] = new_offset;
443 node = node->ptrs[i - 1];
450 static struct xfs_iext_node *
452 struct xfs_iext_node **nodep,
456 struct xfs_iext_node *node = *nodep;
457 struct xfs_iext_node *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
458 const int nr_move = KEYS_PER_NODE / 2;
459 int nr_keep = nr_move + (KEYS_PER_NODE & 1);
462 /* for sequential append operations just spill over into the new node */
463 if (*pos == KEYS_PER_NODE) {
471 for (i = 0; i < nr_move; i++) {
472 new->keys[i] = node->keys[nr_keep + i];
473 new->ptrs[i] = node->ptrs[nr_keep + i];
475 node->keys[nr_keep + i] = XFS_IEXT_KEY_INVALID;
476 node->ptrs[nr_keep + i] = NULL;
479 if (*pos >= nr_keep) {
482 *nr_entries = nr_move;
484 *nr_entries = nr_keep;
487 for (; i < KEYS_PER_NODE; i++)
488 new->keys[i] = XFS_IEXT_KEY_INVALID;
493 xfs_iext_insert_node(
494 struct xfs_ifork *ifp,
499 struct xfs_iext_node *node, *new;
500 int i, pos, nr_entries;
503 if (ifp->if_height < level)
507 node = xfs_iext_find_level(ifp, offset, level);
508 pos = xfs_iext_node_insert_pos(node, offset);
509 nr_entries = xfs_iext_node_nr_entries(node, pos);
511 ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0);
512 ASSERT(nr_entries <= KEYS_PER_NODE);
514 if (nr_entries == KEYS_PER_NODE)
515 new = xfs_iext_split_node(&node, &pos, &nr_entries);
518 * Update the pointers in higher levels if the first entry changes
519 * in an existing node.
521 if (node != new && pos == 0 && nr_entries > 0)
522 xfs_iext_update_node(ifp, node->keys[0], offset, level, node);
524 for (i = nr_entries; i > pos; i--) {
525 node->keys[i] = node->keys[i - 1];
526 node->ptrs[i] = node->ptrs[i - 1];
528 node->keys[pos] = offset;
529 node->ptrs[pos] = ptr;
532 offset = new->keys[0];
539 static struct xfs_iext_leaf *
541 struct xfs_iext_cursor *cur,
544 struct xfs_iext_leaf *leaf = cur->leaf;
545 struct xfs_iext_leaf *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
546 const int nr_move = RECS_PER_LEAF / 2;
547 int nr_keep = nr_move + (RECS_PER_LEAF & 1);
550 /* for sequential append operations just spill over into the new node */
551 if (cur->pos == RECS_PER_LEAF) {
558 for (i = 0; i < nr_move; i++) {
559 new->recs[i] = leaf->recs[nr_keep + i];
560 xfs_iext_rec_clear(&leaf->recs[nr_keep + i]);
563 if (cur->pos >= nr_keep) {
566 *nr_entries = nr_move;
568 *nr_entries = nr_keep;
572 leaf->next->prev = new;
573 new->next = leaf->next;
581 struct xfs_ifork *ifp,
582 struct xfs_iext_cursor *cur)
584 ASSERT(ifp->if_bytes == 0);
586 ifp->if_data = kmem_zalloc(sizeof(struct xfs_iext_rec), KM_NOFS);
589 /* now that we have a node step into it */
590 cur->leaf = ifp->if_data;
595 xfs_iext_realloc_root(
596 struct xfs_ifork *ifp,
597 struct xfs_iext_cursor *cur)
599 int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
602 /* account for the prev/next pointers */
603 if (new_size / sizeof(struct xfs_iext_rec) == RECS_PER_LEAF)
604 new_size = NODE_SIZE;
606 new = krealloc(ifp->if_data, new_size, GFP_NOFS | __GFP_NOFAIL);
607 memset(new + ifp->if_bytes, 0, new_size - ifp->if_bytes);
613 * Increment the sequence counter on extent tree changes. If we are on a COW
614 * fork, this allows the writeback code to skip looking for a COW extent if the
615 * COW fork hasn't changed. We use WRITE_ONCE here to ensure the update to the
616 * sequence counter is seen before the modifications to the extent tree itself
619 static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp)
621 WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
626 struct xfs_ifork *ifp,
627 struct xfs_iext_cursor *cur,
628 struct xfs_bmbt_irec *irec)
630 xfs_fileoff_t offset = irec->br_startoff;
631 struct xfs_iext_leaf *new = NULL;
634 xfs_iext_inc_seq(ifp);
636 if (ifp->if_height == 0)
637 xfs_iext_alloc_root(ifp, cur);
638 else if (ifp->if_height == 1)
639 xfs_iext_realloc_root(ifp, cur);
641 nr_entries = xfs_iext_leaf_nr_entries(ifp, cur->leaf, cur->pos);
642 ASSERT(nr_entries <= RECS_PER_LEAF);
643 ASSERT(cur->pos >= nr_entries ||
644 xfs_iext_rec_cmp(cur_rec(cur), irec->br_startoff) != 0);
646 if (nr_entries == RECS_PER_LEAF)
647 new = xfs_iext_split_leaf(cur, &nr_entries);
650 * Update the pointers in higher levels if the first entry changes
651 * in an existing node.
653 if (cur->leaf != new && cur->pos == 0 && nr_entries > 0) {
654 xfs_iext_update_node(ifp, xfs_iext_leaf_key(cur->leaf, 0),
655 offset, 1, cur->leaf);
658 for (i = nr_entries; i > cur->pos; i--)
659 cur->leaf->recs[i] = cur->leaf->recs[i - 1];
660 xfs_iext_set(cur_rec(cur), irec);
661 ifp->if_bytes += sizeof(struct xfs_iext_rec);
664 xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
669 struct xfs_inode *ip,
670 struct xfs_iext_cursor *cur,
671 struct xfs_bmbt_irec *irec,
674 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
676 xfs_iext_insert_raw(ifp, cur, irec);
677 trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
680 static struct xfs_iext_node *
681 xfs_iext_rebalance_node(
682 struct xfs_iext_node *parent,
684 struct xfs_iext_node *node,
688 * If the neighbouring nodes are completely full, or have different
689 * parents, we might never be able to merge our node, and will only
690 * delete it once the number of entries hits zero.
696 struct xfs_iext_node *prev = parent->ptrs[*pos - 1];
697 int nr_prev = xfs_iext_node_nr_entries(prev, 0), i;
699 if (nr_prev + nr_entries <= KEYS_PER_NODE) {
700 for (i = 0; i < nr_entries; i++) {
701 prev->keys[nr_prev + i] = node->keys[i];
702 prev->ptrs[nr_prev + i] = node->ptrs[i];
708 if (*pos + 1 < xfs_iext_node_nr_entries(parent, *pos)) {
709 struct xfs_iext_node *next = parent->ptrs[*pos + 1];
710 int nr_next = xfs_iext_node_nr_entries(next, 0), i;
712 if (nr_entries + nr_next <= KEYS_PER_NODE) {
714 * Merge the next node into this node so that we don't
715 * have to do an additional update of the keys in the
718 for (i = 0; i < nr_next; i++) {
719 node->keys[nr_entries + i] = next->keys[i];
720 node->ptrs[nr_entries + i] = next->ptrs[i];
732 xfs_iext_remove_node(
733 struct xfs_ifork *ifp,
734 xfs_fileoff_t offset,
737 struct xfs_iext_node *node, *parent;
738 int level = 2, pos, nr_entries, i;
740 ASSERT(level <= ifp->if_height);
741 node = xfs_iext_find_level(ifp, offset, level);
742 pos = xfs_iext_node_pos(node, offset);
744 ASSERT(node->ptrs[pos]);
745 ASSERT(node->ptrs[pos] == victim);
748 nr_entries = xfs_iext_node_nr_entries(node, pos) - 1;
749 offset = node->keys[0];
750 for (i = pos; i < nr_entries; i++) {
751 node->keys[i] = node->keys[i + 1];
752 node->ptrs[i] = node->ptrs[i + 1];
754 node->keys[nr_entries] = XFS_IEXT_KEY_INVALID;
755 node->ptrs[nr_entries] = NULL;
757 if (pos == 0 && nr_entries > 0) {
758 xfs_iext_update_node(ifp, offset, node->keys[0], level, node);
759 offset = node->keys[0];
762 if (nr_entries >= KEYS_PER_NODE / 2)
765 if (level < ifp->if_height) {
767 * If we aren't at the root yet try to find a neighbour node to
768 * merge with (or delete the node if it is empty), and then
769 * recurse up to the next level.
772 parent = xfs_iext_find_level(ifp, offset, level);
773 pos = xfs_iext_node_pos(parent, offset);
775 ASSERT(pos != KEYS_PER_NODE);
776 ASSERT(parent->ptrs[pos] == node);
778 node = xfs_iext_rebalance_node(parent, &pos, node, nr_entries);
784 } else if (nr_entries == 1) {
786 * If we are at the root and only one entry is left we can just
787 * free this node and update the root pointer.
789 ASSERT(node == ifp->if_data);
790 ifp->if_data = node->ptrs[0];
797 xfs_iext_rebalance_leaf(
798 struct xfs_ifork *ifp,
799 struct xfs_iext_cursor *cur,
800 struct xfs_iext_leaf *leaf,
801 xfs_fileoff_t offset,
805 * If the neighbouring nodes are completely full we might never be able
806 * to merge our node, and will only delete it once the number of
813 int nr_prev = xfs_iext_leaf_nr_entries(ifp, leaf->prev, 0), i;
815 if (nr_prev + nr_entries <= RECS_PER_LEAF) {
816 for (i = 0; i < nr_entries; i++)
817 leaf->prev->recs[nr_prev + i] = leaf->recs[i];
819 if (cur->leaf == leaf) {
820 cur->leaf = leaf->prev;
828 int nr_next = xfs_iext_leaf_nr_entries(ifp, leaf->next, 0), i;
830 if (nr_entries + nr_next <= RECS_PER_LEAF) {
832 * Merge the next node into this node so that we don't
833 * have to do an additional update of the keys in the
836 for (i = 0; i < nr_next; i++) {
837 leaf->recs[nr_entries + i] =
841 if (cur->leaf == leaf->next) {
843 cur->pos += nr_entries;
846 offset = xfs_iext_leaf_key(leaf->next, 0);
855 leaf->prev->next = leaf->next;
857 leaf->next->prev = leaf->prev;
858 xfs_iext_remove_node(ifp, offset, leaf);
862 xfs_iext_free_last_leaf(
863 struct xfs_ifork *ifp)
866 kmem_free(ifp->if_data);
872 struct xfs_inode *ip,
873 struct xfs_iext_cursor *cur,
876 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
877 struct xfs_iext_leaf *leaf = cur->leaf;
878 xfs_fileoff_t offset = xfs_iext_leaf_key(leaf, 0);
881 trace_xfs_iext_remove(ip, cur, state, _RET_IP_);
883 ASSERT(ifp->if_height > 0);
884 ASSERT(ifp->if_data != NULL);
885 ASSERT(xfs_iext_valid(ifp, cur));
887 xfs_iext_inc_seq(ifp);
889 nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
890 for (i = cur->pos; i < nr_entries; i++)
891 leaf->recs[i] = leaf->recs[i + 1];
892 xfs_iext_rec_clear(&leaf->recs[nr_entries]);
893 ifp->if_bytes -= sizeof(struct xfs_iext_rec);
895 if (cur->pos == 0 && nr_entries > 0) {
896 xfs_iext_update_node(ifp, offset, xfs_iext_leaf_key(leaf, 0), 1,
898 offset = xfs_iext_leaf_key(leaf, 0);
899 } else if (cur->pos == nr_entries) {
900 if (ifp->if_height > 1 && leaf->next)
901 cur->leaf = leaf->next;
907 if (nr_entries >= RECS_PER_LEAF / 2)
910 if (ifp->if_height > 1)
911 xfs_iext_rebalance_leaf(ifp, cur, leaf, offset, nr_entries);
912 else if (nr_entries == 0)
913 xfs_iext_free_last_leaf(ifp);
917 * Lookup the extent covering bno.
919 * If there is an extent covering bno return the extent index, and store the
920 * expanded extent structure in *gotp, and the extent cursor in *cur.
921 * If there is no extent covering bno, but there is an extent after it (e.g.
922 * it lies in a hole) return that extent in *gotp and its cursor in *cur
924 * If bno is beyond the last extent return false, and return an invalid
928 xfs_iext_lookup_extent(
929 struct xfs_inode *ip,
930 struct xfs_ifork *ifp,
931 xfs_fileoff_t offset,
932 struct xfs_iext_cursor *cur,
933 struct xfs_bmbt_irec *gotp)
935 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
937 cur->leaf = xfs_iext_find_level(ifp, offset, 1);
943 for (cur->pos = 0; cur->pos < xfs_iext_max_recs(ifp); cur->pos++) {
944 struct xfs_iext_rec *rec = cur_rec(cur);
946 if (xfs_iext_rec_is_empty(rec))
948 if (xfs_iext_rec_cmp(rec, offset) >= 0)
952 /* Try looking in the next node for an entry > offset */
953 if (ifp->if_height == 1 || !cur->leaf->next)
955 cur->leaf = cur->leaf->next;
957 if (!xfs_iext_valid(ifp, cur))
960 xfs_iext_get(gotp, cur_rec(cur));
965 * Returns the last extent before end, and if this extent doesn't cover
966 * end, update end to the end of the extent.
969 xfs_iext_lookup_extent_before(
970 struct xfs_inode *ip,
971 struct xfs_ifork *ifp,
973 struct xfs_iext_cursor *cur,
974 struct xfs_bmbt_irec *gotp)
976 /* could be optimized to not even look up the next on a match.. */
977 if (xfs_iext_lookup_extent(ip, ifp, *end - 1, cur, gotp) &&
978 gotp->br_startoff <= *end - 1)
980 if (!xfs_iext_prev_extent(ifp, cur, gotp))
982 *end = gotp->br_startoff + gotp->br_blockcount;
987 xfs_iext_update_extent(
988 struct xfs_inode *ip,
990 struct xfs_iext_cursor *cur,
991 struct xfs_bmbt_irec *new)
993 struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
995 xfs_iext_inc_seq(ifp);
998 struct xfs_bmbt_irec old;
1000 xfs_iext_get(&old, cur_rec(cur));
1001 if (new->br_startoff != old.br_startoff) {
1002 xfs_iext_update_node(ifp, old.br_startoff,
1003 new->br_startoff, 1, cur->leaf);
1007 trace_xfs_bmap_pre_update(ip, cur, state, _RET_IP_);
1008 xfs_iext_set(cur_rec(cur), new);
1009 trace_xfs_bmap_post_update(ip, cur, state, _RET_IP_);
1013 * Return true if the cursor points at an extent and return the extent structure
1014 * in gotp. Else return false.
1017 xfs_iext_get_extent(
1018 struct xfs_ifork *ifp,
1019 struct xfs_iext_cursor *cur,
1020 struct xfs_bmbt_irec *gotp)
1022 if (!xfs_iext_valid(ifp, cur))
1024 xfs_iext_get(gotp, cur_rec(cur));
1029 * This is a recursive function, because of that we need to be extremely
1030 * careful with stack usage.
1033 xfs_iext_destroy_node(
1034 struct xfs_iext_node *node,
1040 for (i = 0; i < KEYS_PER_NODE; i++) {
1041 if (node->keys[i] == XFS_IEXT_KEY_INVALID)
1043 xfs_iext_destroy_node(node->ptrs[i], level - 1);
1052 struct xfs_ifork *ifp)
1054 xfs_iext_destroy_node(ifp->if_data, ifp->if_height);
1058 ifp->if_data = NULL;