1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_btree.h"
16 #include "xfs_refcount_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_errortag.h"
19 #include "xfs_error.h"
20 #include "xfs_trace.h"
21 #include "xfs_trans.h"
23 #include "xfs_refcount.h"
27 struct kmem_cache *xfs_refcount_intent_cache;
29 /* Allowable refcount adjustment amounts. */
30 enum xfs_refc_adjust_op {
31 XFS_REFCOUNT_ADJUST_INCREASE = 1,
32 XFS_REFCOUNT_ADJUST_DECREASE = -1,
33 XFS_REFCOUNT_ADJUST_COW_ALLOC = 0,
34 XFS_REFCOUNT_ADJUST_COW_FREE = -1,
37 STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
38 xfs_agblock_t agbno, xfs_extlen_t aglen);
39 STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
40 xfs_agblock_t agbno, xfs_extlen_t aglen);
43 * Look up the first record less than or equal to [bno, len] in the btree
47 xfs_refcount_lookup_le(
48 struct xfs_btree_cur *cur,
52 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
54 cur->bc_rec.rc.rc_startblock = bno;
55 cur->bc_rec.rc.rc_blockcount = 0;
56 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
60 * Look up the first record greater than or equal to [bno, len] in the btree
64 xfs_refcount_lookup_ge(
65 struct xfs_btree_cur *cur,
69 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
71 cur->bc_rec.rc.rc_startblock = bno;
72 cur->bc_rec.rc.rc_blockcount = 0;
73 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
77 * Look up the first record equal to [bno, len] in the btree
81 xfs_refcount_lookup_eq(
82 struct xfs_btree_cur *cur,
86 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
88 cur->bc_rec.rc.rc_startblock = bno;
89 cur->bc_rec.rc.rc_blockcount = 0;
90 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
93 /* Convert on-disk record to in-core format. */
95 xfs_refcount_btrec_to_irec(
96 const union xfs_btree_rec *rec,
97 struct xfs_refcount_irec *irec)
99 irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock);
100 irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
101 irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
105 * Get the data from the pointed-to record.
108 xfs_refcount_get_rec(
109 struct xfs_btree_cur *cur,
110 struct xfs_refcount_irec *irec,
113 struct xfs_mount *mp = cur->bc_mp;
114 struct xfs_perag *pag = cur->bc_ag.pag;
115 union xfs_btree_rec *rec;
117 xfs_agblock_t realstart;
119 error = xfs_btree_get_rec(cur, &rec, stat);
123 xfs_refcount_btrec_to_irec(rec, irec);
124 if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
127 /* handle special COW-staging state */
128 realstart = irec->rc_startblock;
129 if (realstart & XFS_REFC_COW_START) {
130 if (irec->rc_refcount != 1)
132 realstart &= ~XFS_REFC_COW_START;
133 } else if (irec->rc_refcount < 2) {
137 /* check for valid extent range, including overflow */
138 if (!xfs_verify_agbno(pag, realstart))
140 if (realstart > realstart + irec->rc_blockcount)
142 if (!xfs_verify_agbno(pag, realstart + irec->rc_blockcount - 1))
145 if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
148 trace_xfs_refcount_get(cur->bc_mp, pag->pag_agno, irec);
153 "Refcount BTree record corruption in AG %d detected!",
156 "Start block 0x%x, block count 0x%x, references 0x%x",
157 irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
158 return -EFSCORRUPTED;
162 * Update the record referred to by cur to the value given
163 * by [bno, len, refcount].
164 * This either works (return 0) or gets an EFSCORRUPTED error.
168 struct xfs_btree_cur *cur,
169 struct xfs_refcount_irec *irec)
171 union xfs_btree_rec rec;
174 trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
175 rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
176 rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
177 rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
178 error = xfs_btree_update(cur, &rec);
180 trace_xfs_refcount_update_error(cur->bc_mp,
181 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
186 * Insert the record referred to by cur to the value given
187 * by [bno, len, refcount].
188 * This either works (return 0) or gets an EFSCORRUPTED error.
192 struct xfs_btree_cur *cur,
193 struct xfs_refcount_irec *irec,
198 trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
199 cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
200 cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
201 cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
202 error = xfs_btree_insert(cur, i);
205 if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
206 error = -EFSCORRUPTED;
212 trace_xfs_refcount_insert_error(cur->bc_mp,
213 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
218 * Remove the record referred to by cur, then set the pointer to the spot
219 * where the record could be re-inserted, in case we want to increment or
220 * decrement the cursor.
221 * This either works (return 0) or gets an EFSCORRUPTED error.
225 struct xfs_btree_cur *cur,
228 struct xfs_refcount_irec irec;
232 error = xfs_refcount_get_rec(cur, &irec, &found_rec);
235 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
236 error = -EFSCORRUPTED;
239 trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec);
240 error = xfs_btree_delete(cur, i);
241 if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
242 error = -EFSCORRUPTED;
247 error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
250 trace_xfs_refcount_delete_error(cur->bc_mp,
251 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
256 * Adjusting the Reference Count
258 * As stated elsewhere, the reference count btree (refcbt) stores
259 * >1 reference counts for extents of physical blocks. In this
260 * operation, we're either raising or lowering the reference count of
261 * some subrange stored in the tree:
263 * <------ adjustment range ------>
264 * ----+ +---+-----+ +--+--------+---------
265 * 2 | | 3 | 4 | |17| 55 | 10
266 * ----+ +---+-----+ +--+--------+---------
267 * X axis is physical blocks number;
268 * reference counts are the numbers inside the rectangles
270 * The first thing we need to do is to ensure that there are no
271 * refcount extents crossing either boundary of the range to be
272 * adjusted. For any extent that does cross a boundary, split it into
273 * two extents so that we can increment the refcount of one of the
276 * <------ adjustment range ------>
277 * ----+ +---+-----+ +--+--------+----+----
278 * 2 | | 3 | 2 | |17| 55 | 10 | 10
279 * ----+ +---+-----+ +--+--------+----+----
281 * For this next step, let's assume that all the physical blocks in
282 * the adjustment range are mapped to a file and are therefore in use
283 * at least once. Therefore, we can infer that any gap in the
284 * refcount tree within the adjustment range represents a physical
285 * extent with refcount == 1:
287 * <------ adjustment range ------>
288 * ----+---+---+-----+-+--+--------+----+----
289 * 2 |"1"| 3 | 2 |1|17| 55 | 10 | 10
290 * ----+---+---+-----+-+--+--------+----+----
293 * For each extent that falls within the interval range, figure out
294 * which extent is to the left or the right of that extent. Now we
295 * have a left, current, and right extent. If the new reference count
296 * of the center extent enables us to merge left, center, and right
297 * into one record covering all three, do so. If the center extent is
298 * at the left end of the range, abuts the left extent, and its new
299 * reference count matches the left extent's record, then merge them.
300 * If the center extent is at the right end of the range, abuts the
301 * right extent, and the reference counts match, merge those. In the
302 * example, we can left merge (assuming an increment operation):
304 * <------ adjustment range ------>
305 * --------+---+-----+-+--+--------+----+----
306 * 2 | 3 | 2 |1|17| 55 | 10 | 10
307 * --------+---+-----+-+--+--------+----+----
310 * For all other extents within the range, adjust the reference count
311 * or delete it if the refcount falls below 2. If we were
312 * incrementing, the end result looks like this:
314 * <------ adjustment range ------>
315 * --------+---+-----+-+--+--------+----+----
316 * 2 | 4 | 3 |2|18| 56 | 11 | 10
317 * --------+---+-----+-+--+--------+----+----
319 * The result of a decrement operation looks as such:
321 * <------ adjustment range ------>
322 * ----+ +---+ +--+--------+----+----
323 * 2 | | 2 | |16| 54 | 9 | 10
324 * ----+ +---+ +--+--------+----+----
327 * The blocks marked "D" are freed; the blocks marked "1" are only
328 * referenced once and therefore the record is removed from the
332 /* Next block after this extent. */
333 static inline xfs_agblock_t
335 struct xfs_refcount_irec *rc)
337 return rc->rc_startblock + rc->rc_blockcount;
341 * Split a refcount extent that crosses agbno.
344 xfs_refcount_split_extent(
345 struct xfs_btree_cur *cur,
349 struct xfs_refcount_irec rcext, tmp;
353 *shape_changed = false;
354 error = xfs_refcount_lookup_le(cur, agbno, &found_rec);
360 error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
363 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
364 error = -EFSCORRUPTED;
367 if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
370 *shape_changed = true;
371 trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
374 /* Establish the right extent. */
376 tmp.rc_startblock = agbno;
377 tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
378 error = xfs_refcount_update(cur, &tmp);
382 /* Insert the left extent. */
384 tmp.rc_blockcount = agbno - rcext.rc_startblock;
385 error = xfs_refcount_insert(cur, &tmp, &found_rec);
388 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
389 error = -EFSCORRUPTED;
395 trace_xfs_refcount_split_extent_error(cur->bc_mp,
396 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
401 * Merge the left, center, and right extents.
404 xfs_refcount_merge_center_extents(
405 struct xfs_btree_cur *cur,
406 struct xfs_refcount_irec *left,
407 struct xfs_refcount_irec *center,
408 struct xfs_refcount_irec *right,
409 unsigned long long extlen,
415 trace_xfs_refcount_merge_center_extents(cur->bc_mp,
416 cur->bc_ag.pag->pag_agno, left, center, right);
419 * Make sure the center and right extents are not in the btree.
420 * If the center extent was synthesized, the first delete call
421 * removes the right extent and we skip the second deletion.
422 * If center and right were in the btree, then the first delete
423 * call removes the center and the second one removes the right
426 error = xfs_refcount_lookup_ge(cur, center->rc_startblock,
430 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
431 error = -EFSCORRUPTED;
435 error = xfs_refcount_delete(cur, &found_rec);
438 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
439 error = -EFSCORRUPTED;
443 if (center->rc_refcount > 1) {
444 error = xfs_refcount_delete(cur, &found_rec);
447 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
448 error = -EFSCORRUPTED;
453 /* Enlarge the left extent. */
454 error = xfs_refcount_lookup_le(cur, left->rc_startblock,
458 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
459 error = -EFSCORRUPTED;
463 left->rc_blockcount = extlen;
464 error = xfs_refcount_update(cur, left);
472 trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
473 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
478 * Merge with the left extent.
481 xfs_refcount_merge_left_extent(
482 struct xfs_btree_cur *cur,
483 struct xfs_refcount_irec *left,
484 struct xfs_refcount_irec *cleft,
485 xfs_agblock_t *agbno,
491 trace_xfs_refcount_merge_left_extent(cur->bc_mp,
492 cur->bc_ag.pag->pag_agno, left, cleft);
494 /* If the extent at agbno (cleft) wasn't synthesized, remove it. */
495 if (cleft->rc_refcount > 1) {
496 error = xfs_refcount_lookup_le(cur, cleft->rc_startblock,
500 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
501 error = -EFSCORRUPTED;
505 error = xfs_refcount_delete(cur, &found_rec);
508 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
509 error = -EFSCORRUPTED;
514 /* Enlarge the left extent. */
515 error = xfs_refcount_lookup_le(cur, left->rc_startblock,
519 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
520 error = -EFSCORRUPTED;
524 left->rc_blockcount += cleft->rc_blockcount;
525 error = xfs_refcount_update(cur, left);
529 *agbno += cleft->rc_blockcount;
530 *aglen -= cleft->rc_blockcount;
534 trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
535 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
540 * Merge with the right extent.
543 xfs_refcount_merge_right_extent(
544 struct xfs_btree_cur *cur,
545 struct xfs_refcount_irec *right,
546 struct xfs_refcount_irec *cright,
552 trace_xfs_refcount_merge_right_extent(cur->bc_mp,
553 cur->bc_ag.pag->pag_agno, cright, right);
556 * If the extent ending at agbno+aglen (cright) wasn't synthesized,
559 if (cright->rc_refcount > 1) {
560 error = xfs_refcount_lookup_le(cur, cright->rc_startblock,
564 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
565 error = -EFSCORRUPTED;
569 error = xfs_refcount_delete(cur, &found_rec);
572 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
573 error = -EFSCORRUPTED;
578 /* Enlarge the right extent. */
579 error = xfs_refcount_lookup_le(cur, right->rc_startblock,
583 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
584 error = -EFSCORRUPTED;
588 right->rc_startblock -= cright->rc_blockcount;
589 right->rc_blockcount += cright->rc_blockcount;
590 error = xfs_refcount_update(cur, right);
594 *aglen -= cright->rc_blockcount;
598 trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
599 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
603 #define XFS_FIND_RCEXT_SHARED 1
604 #define XFS_FIND_RCEXT_COW 2
606 * Find the left extent and the one after it (cleft). This function assumes
607 * that we've already split any extent crossing agbno.
610 xfs_refcount_find_left_extents(
611 struct xfs_btree_cur *cur,
612 struct xfs_refcount_irec *left,
613 struct xfs_refcount_irec *cleft,
618 struct xfs_refcount_irec tmp;
622 left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
623 error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec);
629 error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
632 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
633 error = -EFSCORRUPTED;
637 if (xfs_refc_next(&tmp) != agbno)
639 if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
641 if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
643 /* We have a left extent; retrieve (or invent) the next right one */
646 error = xfs_btree_increment(cur, 0, &found_rec);
650 error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
653 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
654 error = -EFSCORRUPTED;
658 /* if tmp starts at the end of our range, just use that */
659 if (tmp.rc_startblock == agbno)
663 * There's a gap in the refcntbt at the start of the
664 * range we're interested in (refcount == 1) so
665 * synthesize the implied extent and pass it back.
666 * We assume here that the agbno/aglen range was
667 * passed in from a data fork extent mapping and
668 * therefore is allocated to exactly one owner.
670 cleft->rc_startblock = agbno;
671 cleft->rc_blockcount = min(aglen,
672 tmp.rc_startblock - agbno);
673 cleft->rc_refcount = 1;
677 * No extents, so pretend that there's one covering the whole
680 cleft->rc_startblock = agbno;
681 cleft->rc_blockcount = aglen;
682 cleft->rc_refcount = 1;
684 trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
689 trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
690 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
695 * Find the right extent and the one before it (cright). This function
696 * assumes that we've already split any extents crossing agbno + aglen.
699 xfs_refcount_find_right_extents(
700 struct xfs_btree_cur *cur,
701 struct xfs_refcount_irec *right,
702 struct xfs_refcount_irec *cright,
707 struct xfs_refcount_irec tmp;
711 right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
712 error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec);
718 error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
721 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
722 error = -EFSCORRUPTED;
726 if (tmp.rc_startblock != agbno + aglen)
728 if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
730 if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
732 /* We have a right extent; retrieve (or invent) the next left one */
735 error = xfs_btree_decrement(cur, 0, &found_rec);
739 error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
742 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
743 error = -EFSCORRUPTED;
747 /* if tmp ends at the end of our range, just use that */
748 if (xfs_refc_next(&tmp) == agbno + aglen)
752 * There's a gap in the refcntbt at the end of the
753 * range we're interested in (refcount == 1) so
754 * create the implied extent and pass it back.
755 * We assume here that the agbno/aglen range was
756 * passed in from a data fork extent mapping and
757 * therefore is allocated to exactly one owner.
759 cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
760 cright->rc_blockcount = right->rc_startblock -
761 cright->rc_startblock;
762 cright->rc_refcount = 1;
766 * No extents, so pretend that there's one covering the whole
769 cright->rc_startblock = agbno;
770 cright->rc_blockcount = aglen;
771 cright->rc_refcount = 1;
773 trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
774 cright, right, agbno + aglen);
778 trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
779 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
783 /* Is this extent valid? */
786 struct xfs_refcount_irec *rc)
788 return rc->rc_startblock != NULLAGBLOCK;
792 * Try to merge with any extents on the boundaries of the adjustment range.
795 xfs_refcount_merge_extents(
796 struct xfs_btree_cur *cur,
797 xfs_agblock_t *agbno,
799 enum xfs_refc_adjust_op adjust,
803 struct xfs_refcount_irec left = {0}, cleft = {0};
804 struct xfs_refcount_irec cright = {0}, right = {0};
806 unsigned long long ulen;
809 *shape_changed = false;
811 * Find the extent just below agbno [left], just above agbno [cleft],
812 * just below (agbno + aglen) [cright], and just above (agbno + aglen)
815 error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno,
819 error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno,
824 /* No left or right extent to merge; exit. */
825 if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
828 cequal = (cleft.rc_startblock == cright.rc_startblock) &&
829 (cleft.rc_blockcount == cright.rc_blockcount);
831 /* Try to merge left, cleft, and right. cleft must == cright. */
832 ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
834 if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
835 xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
836 left.rc_refcount == cleft.rc_refcount + adjust &&
837 right.rc_refcount == cleft.rc_refcount + adjust &&
838 ulen < MAXREFCEXTLEN) {
839 *shape_changed = true;
840 return xfs_refcount_merge_center_extents(cur, &left, &cleft,
841 &right, ulen, aglen);
844 /* Try to merge left and cleft. */
845 ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
846 if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
847 left.rc_refcount == cleft.rc_refcount + adjust &&
848 ulen < MAXREFCEXTLEN) {
849 *shape_changed = true;
850 error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
856 * If we just merged left + cleft and cleft == cright,
857 * we no longer have a cright to merge with right. We're done.
863 /* Try to merge cright and right. */
864 ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
865 if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
866 right.rc_refcount == cright.rc_refcount + adjust &&
867 ulen < MAXREFCEXTLEN) {
868 *shape_changed = true;
869 return xfs_refcount_merge_right_extent(cur, &right, &cright,
877 * XXX: This is a pretty hand-wavy estimate. The penalty for guessing
878 * true incorrectly is a shutdown FS; the penalty for guessing false
879 * incorrectly is more transaction rolls than might be necessary.
880 * Be conservative here.
883 xfs_refcount_still_have_space(
884 struct xfs_btree_cur *cur)
886 unsigned long overhead;
889 * Worst case estimate: full splits of the free space and rmap btrees
890 * to handle each of the shape changes to the refcount btree.
892 overhead = xfs_allocfree_block_count(cur->bc_mp,
893 cur->bc_ag.refc.shape_changes);
894 overhead += cur->bc_mp->m_refc_maxlevels;
895 overhead *= cur->bc_mp->m_sb.sb_blocksize;
898 * Only allow 2 refcount extent updates per transaction if the
899 * refcount continue update "error" has been injected.
901 if (cur->bc_ag.refc.nr_ops > 2 &&
902 XFS_TEST_ERROR(false, cur->bc_mp,
903 XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
906 if (cur->bc_ag.refc.nr_ops == 0)
908 else if (overhead > cur->bc_tp->t_log_res)
910 return cur->bc_tp->t_log_res - overhead >
911 cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
915 * Adjust the refcounts of middle extents. At this point we should have
916 * split extents that crossed the adjustment range; merged with adjacent
917 * extents; and updated agbno/aglen to reflect the merges. Therefore,
918 * all we have to do is update the extents inside [agbno, agbno + aglen].
921 xfs_refcount_adjust_extents(
922 struct xfs_btree_cur *cur,
923 xfs_agblock_t *agbno,
925 enum xfs_refc_adjust_op adj)
927 struct xfs_refcount_irec ext, tmp;
929 int found_rec, found_tmp;
932 /* Merging did all the work already. */
936 error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec);
940 while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
941 error = xfs_refcount_get_rec(cur, &ext, &found_rec);
945 ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
946 ext.rc_blockcount = 0;
951 * Deal with a hole in the refcount tree; if a file maps to
952 * these blocks and there's no refcountbt record, pretend that
953 * there is one with refcount == 1.
955 if (ext.rc_startblock != *agbno) {
956 tmp.rc_startblock = *agbno;
957 tmp.rc_blockcount = min(*aglen,
958 ext.rc_startblock - *agbno);
959 tmp.rc_refcount = 1 + adj;
960 trace_xfs_refcount_modify_extent(cur->bc_mp,
961 cur->bc_ag.pag->pag_agno, &tmp);
964 * Either cover the hole (increment) or
965 * delete the range (decrement).
967 cur->bc_ag.refc.nr_ops++;
968 if (tmp.rc_refcount) {
969 error = xfs_refcount_insert(cur, &tmp,
973 if (XFS_IS_CORRUPT(cur->bc_mp,
975 error = -EFSCORRUPTED;
979 fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
980 cur->bc_ag.pag->pag_agno,
982 xfs_free_extent_later(cur->bc_tp, fsbno,
983 tmp.rc_blockcount, NULL);
986 (*agbno) += tmp.rc_blockcount;
987 (*aglen) -= tmp.rc_blockcount;
989 error = xfs_refcount_lookup_ge(cur, *agbno,
995 /* Stop if there's nothing left to modify */
996 if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
1000 * Adjust the reference count and either update the tree
1001 * (incr) or free the blocks (decr).
1003 if (ext.rc_refcount == MAXREFCOUNT)
1005 ext.rc_refcount += adj;
1006 trace_xfs_refcount_modify_extent(cur->bc_mp,
1007 cur->bc_ag.pag->pag_agno, &ext);
1008 cur->bc_ag.refc.nr_ops++;
1009 if (ext.rc_refcount > 1) {
1010 error = xfs_refcount_update(cur, &ext);
1013 } else if (ext.rc_refcount == 1) {
1014 error = xfs_refcount_delete(cur, &found_rec);
1017 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
1018 error = -EFSCORRUPTED;
1023 fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
1024 cur->bc_ag.pag->pag_agno,
1026 xfs_free_extent_later(cur->bc_tp, fsbno,
1027 ext.rc_blockcount, NULL);
1031 error = xfs_btree_increment(cur, 0, &found_rec);
1036 (*agbno) += ext.rc_blockcount;
1037 (*aglen) -= ext.rc_blockcount;
1042 trace_xfs_refcount_modify_extent_error(cur->bc_mp,
1043 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
1047 /* Adjust the reference count of a range of AG blocks. */
1049 xfs_refcount_adjust(
1050 struct xfs_btree_cur *cur,
1051 xfs_agblock_t agbno,
1053 xfs_agblock_t *new_agbno,
1054 xfs_extlen_t *new_aglen,
1055 enum xfs_refc_adjust_op adj)
1058 int shape_changes = 0;
1063 if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
1064 trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1067 trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1071 * Ensure that no rcextents cross the boundary of the adjustment range.
1073 error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
1079 error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
1086 * Try to merge with the left or right extents of the range.
1088 error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj,
1089 XFS_FIND_RCEXT_SHARED, &shape_changed);
1095 cur->bc_ag.refc.shape_changes++;
1097 /* Now that we've taken care of the ends, adjust the middle extents */
1098 error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen, adj);
1105 trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1110 /* Clean up after calling xfs_refcount_finish_one. */
1112 xfs_refcount_finish_one_cleanup(
1113 struct xfs_trans *tp,
1114 struct xfs_btree_cur *rcur,
1117 struct xfs_buf *agbp;
1121 agbp = rcur->bc_ag.agbp;
1122 xfs_btree_del_cursor(rcur, error);
1124 xfs_trans_brelse(tp, agbp);
1128 * Process one of the deferred refcount operations. We pass back the
1129 * btree cursor to maintain our lock on the btree between calls.
1130 * This saves time and eliminates a buffer deadlock between the
1131 * superblock and the AGF because we'll always grab them in the same
1135 xfs_refcount_finish_one(
1136 struct xfs_trans *tp,
1137 enum xfs_refcount_intent_type type,
1138 xfs_fsblock_t startblock,
1139 xfs_extlen_t blockcount,
1140 xfs_fsblock_t *new_fsb,
1141 xfs_extlen_t *new_len,
1142 struct xfs_btree_cur **pcur)
1144 struct xfs_mount *mp = tp->t_mountp;
1145 struct xfs_btree_cur *rcur;
1146 struct xfs_buf *agbp = NULL;
1149 xfs_agblock_t new_agbno;
1150 unsigned long nr_ops = 0;
1151 int shape_changes = 0;
1152 struct xfs_perag *pag;
1154 pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
1155 bno = XFS_FSB_TO_AGBNO(mp, startblock);
1157 trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
1158 type, XFS_FSB_TO_AGBNO(mp, startblock),
1161 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) {
1167 * If we haven't gotten a cursor or the cursor AG doesn't match
1168 * the startblock, get one now.
1171 if (rcur != NULL && rcur->bc_ag.pag != pag) {
1172 nr_ops = rcur->bc_ag.refc.nr_ops;
1173 shape_changes = rcur->bc_ag.refc.shape_changes;
1174 xfs_refcount_finish_one_cleanup(tp, rcur, 0);
1179 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_FREEING,
1184 rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
1185 rcur->bc_ag.refc.nr_ops = nr_ops;
1186 rcur->bc_ag.refc.shape_changes = shape_changes;
1191 case XFS_REFCOUNT_INCREASE:
1192 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
1193 new_len, XFS_REFCOUNT_ADJUST_INCREASE);
1194 *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
1196 case XFS_REFCOUNT_DECREASE:
1197 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
1198 new_len, XFS_REFCOUNT_ADJUST_DECREASE);
1199 *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
1201 case XFS_REFCOUNT_ALLOC_COW:
1202 *new_fsb = startblock + blockcount;
1204 error = __xfs_refcount_cow_alloc(rcur, bno, blockcount);
1206 case XFS_REFCOUNT_FREE_COW:
1207 *new_fsb = startblock + blockcount;
1209 error = __xfs_refcount_cow_free(rcur, bno, blockcount);
1213 error = -EFSCORRUPTED;
1215 if (!error && *new_len > 0)
1216 trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type,
1217 bno, blockcount, new_agbno, *new_len);
1224 * Record a refcount intent for later processing.
1228 struct xfs_trans *tp,
1229 enum xfs_refcount_intent_type type,
1230 xfs_fsblock_t startblock,
1231 xfs_extlen_t blockcount)
1233 struct xfs_refcount_intent *ri;
1235 trace_xfs_refcount_defer(tp->t_mountp,
1236 XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
1237 type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
1240 ri = kmem_cache_alloc(xfs_refcount_intent_cache,
1241 GFP_NOFS | __GFP_NOFAIL);
1242 INIT_LIST_HEAD(&ri->ri_list);
1244 ri->ri_startblock = startblock;
1245 ri->ri_blockcount = blockcount;
1247 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
1251 * Increase the reference count of the blocks backing a file's extent.
1254 xfs_refcount_increase_extent(
1255 struct xfs_trans *tp,
1256 struct xfs_bmbt_irec *PREV)
1258 if (!xfs_has_reflink(tp->t_mountp))
1261 __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock,
1262 PREV->br_blockcount);
1266 * Decrease the reference count of the blocks backing a file's extent.
1269 xfs_refcount_decrease_extent(
1270 struct xfs_trans *tp,
1271 struct xfs_bmbt_irec *PREV)
1273 if (!xfs_has_reflink(tp->t_mountp))
1276 __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock,
1277 PREV->br_blockcount);
1281 * Given an AG extent, find the lowest-numbered run of shared blocks
1282 * within that range and return the range in fbno/flen. If
1283 * find_end_of_shared is set, return the longest contiguous extent of
1284 * shared blocks; if not, just return the first extent we find. If no
1285 * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
1286 * and 0, respectively.
1289 xfs_refcount_find_shared(
1290 struct xfs_btree_cur *cur,
1291 xfs_agblock_t agbno,
1293 xfs_agblock_t *fbno,
1295 bool find_end_of_shared)
1297 struct xfs_refcount_irec tmp;
1302 trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1305 /* By default, skip the whole range */
1306 *fbno = NULLAGBLOCK;
1309 /* Try to find a refcount extent that crosses the start */
1310 error = xfs_refcount_lookup_le(cur, agbno, &have);
1314 /* No left extent, look at the next one */
1315 error = xfs_btree_increment(cur, 0, &have);
1321 error = xfs_refcount_get_rec(cur, &tmp, &i);
1324 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1325 error = -EFSCORRUPTED;
1329 /* If the extent ends before the start, look at the next one */
1330 if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
1331 error = xfs_btree_increment(cur, 0, &have);
1336 error = xfs_refcount_get_rec(cur, &tmp, &i);
1339 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1340 error = -EFSCORRUPTED;
1345 /* If the extent starts after the range we want, bail out */
1346 if (tmp.rc_startblock >= agbno + aglen)
1349 /* We found the start of a shared extent! */
1350 if (tmp.rc_startblock < agbno) {
1351 tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
1352 tmp.rc_startblock = agbno;
1355 *fbno = tmp.rc_startblock;
1356 *flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
1357 if (!find_end_of_shared)
1360 /* Otherwise, find the end of this shared extent */
1361 while (*fbno + *flen < agbno + aglen) {
1362 error = xfs_btree_increment(cur, 0, &have);
1367 error = xfs_refcount_get_rec(cur, &tmp, &i);
1370 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1371 error = -EFSCORRUPTED;
1374 if (tmp.rc_startblock >= agbno + aglen ||
1375 tmp.rc_startblock != *fbno + *flen)
1377 *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
1381 trace_xfs_refcount_find_shared_result(cur->bc_mp,
1382 cur->bc_ag.pag->pag_agno, *fbno, *flen);
1386 trace_xfs_refcount_find_shared_error(cur->bc_mp,
1387 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
1392 * Recovering CoW Blocks After a Crash
1394 * Due to the way that the copy on write mechanism works, there's a window of
1395 * opportunity in which we can lose track of allocated blocks during a crash.
1396 * Because CoW uses delayed allocation in the in-core CoW fork, writeback
1397 * causes blocks to be allocated and stored in the CoW fork. The blocks are
1398 * no longer in the free space btree but are not otherwise recorded anywhere
1399 * until the write completes and the blocks are mapped into the file. A crash
1400 * in between allocation and remapping results in the replacement blocks being
1401 * lost. This situation is exacerbated by the CoW extent size hint because
1402 * allocations can hang around for long time.
1404 * However, there is a place where we can record these allocations before they
1405 * become mappings -- the reference count btree. The btree does not record
1406 * extents with refcount == 1, so we can record allocations with a refcount of
1407 * 1. Blocks being used for CoW writeout cannot be shared, so there should be
1408 * no conflict with shared block records. These mappings should be created
1409 * when we allocate blocks to the CoW fork and deleted when they're removed
1410 * from the CoW fork.
1412 * Minor nit: records for in-progress CoW allocations and records for shared
1413 * extents must never be merged, to preserve the property that (except for CoW
1414 * allocations) there are no refcount btree entries with refcount == 1. The
1415 * only time this could potentially happen is when unsharing a block that's
1416 * adjacent to CoW allocations, so we must be careful to avoid this.
1418 * At mount time we recover lost CoW allocations by searching the refcount
1419 * btree for these refcount == 1 mappings. These represent CoW allocations
1420 * that were in progress at the time the filesystem went down, so we can free
1421 * them to get the space back.
1423 * This mechanism is superior to creating EFIs for unmapped CoW extents for
1424 * several reasons -- first, EFIs pin the tail of the log and would have to be
1425 * periodically relogged to avoid filling up the log. Second, CoW completions
1426 * will have to file an EFD and create new EFIs for whatever remains in the
1427 * CoW fork; this partially takes care of (1) but extent-size reservations
1428 * will have to periodically relog even if there's no writeout in progress.
1429 * This can happen if the CoW extent size hint is set, which you really want.
1430 * Third, EFIs cannot currently be automatically relogged into newer
1431 * transactions to advance the log tail. Fourth, stuffing the log full of
1432 * EFIs places an upper bound on the number of CoW allocations that can be
1433 * held filesystem-wide at any given time. Recording them in the refcount
1434 * btree doesn't require us to maintain any state in memory and doesn't pin
1438 * Adjust the refcounts of CoW allocations. These allocations are "magic"
1439 * in that they're not referenced anywhere else in the filesystem, so we
1440 * stash them in the refcount btree with a refcount of 1 until either file
1441 * remapping (or CoW cancellation) happens.
1444 xfs_refcount_adjust_cow_extents(
1445 struct xfs_btree_cur *cur,
1446 xfs_agblock_t agbno,
1448 enum xfs_refc_adjust_op adj)
1450 struct xfs_refcount_irec ext, tmp;
1452 int found_rec, found_tmp;
1457 /* Find any overlapping refcount records */
1458 error = xfs_refcount_lookup_ge(cur, agbno, &found_rec);
1461 error = xfs_refcount_get_rec(cur, &ext, &found_rec);
1465 ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks +
1467 ext.rc_blockcount = 0;
1468 ext.rc_refcount = 0;
1472 case XFS_REFCOUNT_ADJUST_COW_ALLOC:
1473 /* Adding a CoW reservation, there should be nothing here. */
1474 if (XFS_IS_CORRUPT(cur->bc_mp,
1475 agbno + aglen > ext.rc_startblock)) {
1476 error = -EFSCORRUPTED;
1480 tmp.rc_startblock = agbno;
1481 tmp.rc_blockcount = aglen;
1482 tmp.rc_refcount = 1;
1483 trace_xfs_refcount_modify_extent(cur->bc_mp,
1484 cur->bc_ag.pag->pag_agno, &tmp);
1486 error = xfs_refcount_insert(cur, &tmp,
1490 if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
1491 error = -EFSCORRUPTED;
1495 case XFS_REFCOUNT_ADJUST_COW_FREE:
1496 /* Removing a CoW reservation, there should be one extent. */
1497 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
1498 error = -EFSCORRUPTED;
1501 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
1502 error = -EFSCORRUPTED;
1505 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
1506 error = -EFSCORRUPTED;
1510 ext.rc_refcount = 0;
1511 trace_xfs_refcount_modify_extent(cur->bc_mp,
1512 cur->bc_ag.pag->pag_agno, &ext);
1513 error = xfs_refcount_delete(cur, &found_rec);
1516 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
1517 error = -EFSCORRUPTED;
1527 trace_xfs_refcount_modify_extent_error(cur->bc_mp,
1528 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
1533 * Add or remove refcount btree entries for CoW reservations.
1536 xfs_refcount_adjust_cow(
1537 struct xfs_btree_cur *cur,
1538 xfs_agblock_t agbno,
1540 enum xfs_refc_adjust_op adj)
1545 agbno += XFS_REFC_COW_START;
1548 * Ensure that no rcextents cross the boundary of the adjustment range.
1550 error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
1554 error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
1559 * Try to merge with the left or right extents of the range.
1561 error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj,
1562 XFS_FIND_RCEXT_COW, &shape_changed);
1566 /* Now that we've taken care of the ends, adjust the middle extents */
1567 error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
1574 trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1580 * Record a CoW allocation in the refcount btree.
1583 __xfs_refcount_cow_alloc(
1584 struct xfs_btree_cur *rcur,
1585 xfs_agblock_t agbno,
1588 trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
1591 /* Add refcount btree reservation */
1592 return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1593 XFS_REFCOUNT_ADJUST_COW_ALLOC);
1597 * Remove a CoW allocation from the refcount btree.
1600 __xfs_refcount_cow_free(
1601 struct xfs_btree_cur *rcur,
1602 xfs_agblock_t agbno,
1605 trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
1608 /* Remove refcount btree reservation */
1609 return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1610 XFS_REFCOUNT_ADJUST_COW_FREE);
1613 /* Record a CoW staging extent in the refcount btree. */
1615 xfs_refcount_alloc_cow_extent(
1616 struct xfs_trans *tp,
1620 struct xfs_mount *mp = tp->t_mountp;
1622 if (!xfs_has_reflink(mp))
1625 __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
1627 /* Add rmap entry */
1628 xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
1629 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1632 /* Forget a CoW staging event in the refcount btree. */
1634 xfs_refcount_free_cow_extent(
1635 struct xfs_trans *tp,
1639 struct xfs_mount *mp = tp->t_mountp;
1641 if (!xfs_has_reflink(mp))
1644 /* Remove rmap entry */
1645 xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
1646 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1647 __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
1650 struct xfs_refcount_recovery {
1651 struct list_head rr_list;
1652 struct xfs_refcount_irec rr_rrec;
1655 /* Stuff an extent on the recovery list. */
1657 xfs_refcount_recover_extent(
1658 struct xfs_btree_cur *cur,
1659 const union xfs_btree_rec *rec,
1662 struct list_head *debris = priv;
1663 struct xfs_refcount_recovery *rr;
1665 if (XFS_IS_CORRUPT(cur->bc_mp,
1666 be32_to_cpu(rec->refc.rc_refcount) != 1))
1667 return -EFSCORRUPTED;
1669 rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
1670 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
1671 list_add_tail(&rr->rr_list, debris);
1676 /* Find and remove leftover CoW reservations. */
1678 xfs_refcount_recover_cow_leftovers(
1679 struct xfs_mount *mp,
1680 struct xfs_perag *pag)
1682 struct xfs_trans *tp;
1683 struct xfs_btree_cur *cur;
1684 struct xfs_buf *agbp;
1685 struct xfs_refcount_recovery *rr, *n;
1686 struct list_head debris;
1687 union xfs_btree_irec low;
1688 union xfs_btree_irec high;
1690 xfs_agblock_t agbno;
1693 if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
1696 INIT_LIST_HEAD(&debris);
1699 * In this first part, we use an empty transaction to gather up
1700 * all the leftover CoW extents so that we can subsequently
1701 * delete them. The empty transaction is used to avoid
1702 * a buffer lock deadlock if there happens to be a loop in the
1703 * refcountbt because we're allowed to re-grab a buffer that is
1704 * already attached to our transaction. When we're done
1705 * recording the CoW debris we cancel the (empty) transaction
1706 * and everything goes away cleanly.
1708 error = xfs_trans_alloc_empty(mp, &tp);
1712 error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
1715 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
1717 /* Find all the leftover CoW staging extents. */
1718 memset(&low, 0, sizeof(low));
1719 memset(&high, 0, sizeof(high));
1720 low.rc.rc_startblock = XFS_REFC_COW_START;
1721 high.rc.rc_startblock = -1U;
1722 error = xfs_btree_query_range(cur, &low, &high,
1723 xfs_refcount_recover_extent, &debris);
1724 xfs_btree_del_cursor(cur, error);
1725 xfs_trans_brelse(tp, agbp);
1726 xfs_trans_cancel(tp);
1730 /* Now iterate the list to free the leftovers */
1731 list_for_each_entry_safe(rr, n, &debris, rr_list) {
1732 /* Set up transaction. */
1733 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1737 trace_xfs_refcount_recover_extent(mp, pag->pag_agno,
1740 /* Free the orphan record */
1741 agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
1742 fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno);
1743 xfs_refcount_free_cow_extent(tp, fsb,
1744 rr->rr_rrec.rc_blockcount);
1746 /* Free the block. */
1747 xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
1749 error = xfs_trans_commit(tp);
1753 list_del(&rr->rr_list);
1759 xfs_trans_cancel(tp);
1761 /* Free the leftover list */
1762 list_for_each_entry_safe(rr, n, &debris, rr_list) {
1763 list_del(&rr->rr_list);
1769 /* Is there a record covering a given extent? */
1771 xfs_refcount_has_record(
1772 struct xfs_btree_cur *cur,
1777 union xfs_btree_irec low;
1778 union xfs_btree_irec high;
1780 memset(&low, 0, sizeof(low));
1781 low.rc.rc_startblock = bno;
1782 memset(&high, 0xFF, sizeof(high));
1783 high.rc.rc_startblock = bno + len - 1;
1785 return xfs_btree_has_record(cur, &low, &high, exists);
1789 xfs_refcount_intent_init_cache(void)
1791 xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
1792 sizeof(struct xfs_refcount_intent),
1795 return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
1799 xfs_refcount_intent_destroy_cache(void)
1801 kmem_cache_destroy(xfs_refcount_intent_cache);
1802 xfs_refcount_intent_cache = NULL;