1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
27 #include "xfs_ag_resv.h"
30 struct kmem_cache *xfs_extfree_item_cache;
32 struct workqueue_struct *xfs_alloc_wq;
34 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
36 #define XFSA_FIXUP_BNO_OK 1
37 #define XFSA_FIXUP_CNT_OK 2
40 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
41 * the beginning of the block for a proper header with the location information
48 unsigned int size = mp->m_sb.sb_sectsize;
51 size -= sizeof(struct xfs_agfl);
53 return size / sizeof(xfs_agblock_t);
60 if (xfs_has_rmapbt(mp))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_has_finobt(mp))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
71 if (xfs_has_reflink(mp))
72 return xfs_refc_block(mp) + 1;
73 if (xfs_has_rmapbt(mp))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_has_finobt(mp))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
81 * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
82 * guarantee that we can refill the AGFL prior to allocating space in a nearly
83 * full AG. Although the space described by the free space btrees, the
84 * blocks used by the freesp btrees themselves, and the blocks owned by the
85 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
86 * free space in the AG drop so low that the free space btrees cannot refill an
87 * empty AGFL up to the minimum level. Rather than grind through empty AGs
88 * until the fs goes down, we subtract this many AG blocks from the incore
89 * fdblocks to ensure user allocation does not overcommit the space the
90 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
91 * withhold space from xfs_mod_fdblocks, so we do not account for that here.
93 #define XFS_ALLOCBT_AGFL_RESERVE 4
96 * Compute the number of blocks that we set aside to guarantee the ability to
97 * refill the AGFL and handle a full bmap btree split.
99 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
100 * AGF buffer (PV 947395), we place constraints on the relationship among
101 * actual allocations for data blocks, freelist blocks, and potential file data
102 * bmap btree blocks. However, these restrictions may result in no actual space
103 * allocated for a delayed extent, for example, a data block in a certain AG is
104 * allocated but there is no additional block for the additional bmap btree
105 * block due to a split of the bmap btree of the file. The result of this may
106 * lead to an infinite loop when the file gets flushed to disk and all delayed
107 * extents need to be actually allocated. To get around this, we explicitly set
108 * aside a few blocks which will not be reserved in delayed allocation.
110 * For each AG, we need to reserve enough blocks to replenish a totally empty
111 * AGFL and 4 more to handle a potential split of the file's bmap btree.
115 struct xfs_mount *mp)
117 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
121 * When deciding how much space to allocate out of an AG, we limit the
122 * allocation maximum size to the size the AG. However, we cannot use all the
123 * blocks in the AG - some are permanently used by metadata. These
124 * blocks are generally:
125 * - the AG superblock, AGF, AGI and AGFL
126 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
127 * the AGI free inode and rmap btree root blocks.
128 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
129 * - the rmapbt root block
131 * The AG headers are sector sized, so the amount of space they take up is
132 * dependent on filesystem geometry. The others are all single blocks.
135 xfs_alloc_ag_max_usable(
136 struct xfs_mount *mp)
140 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
141 blocks += XFS_ALLOCBT_AGFL_RESERVE;
142 blocks += 3; /* AGF, AGI btree root blocks */
143 if (xfs_has_finobt(mp))
144 blocks++; /* finobt root block */
145 if (xfs_has_rmapbt(mp))
146 blocks++; /* rmap root block */
147 if (xfs_has_reflink(mp))
148 blocks++; /* refcount root block */
150 return mp->m_sb.sb_agblocks - blocks;
154 * Lookup the record equal to [bno, len] in the btree given by cur.
156 STATIC int /* error */
158 struct xfs_btree_cur *cur, /* btree cursor */
159 xfs_agblock_t bno, /* starting block of extent */
160 xfs_extlen_t len, /* length of extent */
161 int *stat) /* success/failure */
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
168 cur->bc_ag.abt.active = (*stat == 1);
173 * Lookup the first record greater than or equal to [bno, len]
174 * in the btree given by cur.
178 struct xfs_btree_cur *cur, /* btree cursor */
179 xfs_agblock_t bno, /* starting block of extent */
180 xfs_extlen_t len, /* length of extent */
181 int *stat) /* success/failure */
185 cur->bc_rec.a.ar_startblock = bno;
186 cur->bc_rec.a.ar_blockcount = len;
187 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
188 cur->bc_ag.abt.active = (*stat == 1);
193 * Lookup the first record less than or equal to [bno, len]
194 * in the btree given by cur.
198 struct xfs_btree_cur *cur, /* btree cursor */
199 xfs_agblock_t bno, /* starting block of extent */
200 xfs_extlen_t len, /* length of extent */
201 int *stat) /* success/failure */
204 cur->bc_rec.a.ar_startblock = bno;
205 cur->bc_rec.a.ar_blockcount = len;
206 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
207 cur->bc_ag.abt.active = (*stat == 1);
212 xfs_alloc_cur_active(
213 struct xfs_btree_cur *cur)
215 return cur && cur->bc_ag.abt.active;
219 * Update the record referred to by cur to the value given
221 * This either works (return 0) or gets an EFSCORRUPTED error.
223 STATIC int /* error */
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t bno, /* starting block of extent */
227 xfs_extlen_t len) /* length of extent */
229 union xfs_btree_rec rec;
231 rec.alloc.ar_startblock = cpu_to_be32(bno);
232 rec.alloc.ar_blockcount = cpu_to_be32(len);
233 return xfs_btree_update(cur, &rec);
236 /* Convert the ondisk btree record to its incore representation. */
238 xfs_alloc_btrec_to_irec(
239 const union xfs_btree_rec *rec,
240 struct xfs_alloc_rec_incore *irec)
242 irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
243 irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
246 /* Simple checks for free space records. */
248 xfs_alloc_check_irec(
249 struct xfs_perag *pag,
250 const struct xfs_alloc_rec_incore *irec)
252 if (irec->ar_blockcount == 0)
253 return __this_address;
255 /* check for valid extent range, including overflow */
256 if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
257 return __this_address;
263 xfs_alloc_complain_bad_rec(
264 struct xfs_btree_cur *cur,
266 const struct xfs_alloc_rec_incore *irec)
268 struct xfs_mount *mp = cur->bc_mp;
271 "%s Freespace BTree record corruption in AG %d detected at %pS!",
272 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
273 cur->bc_ag.pag->pag_agno, fa);
275 "start block 0x%x block count 0x%x", irec->ar_startblock,
276 irec->ar_blockcount);
277 return -EFSCORRUPTED;
281 * Get the data from the pointed-to record.
285 struct xfs_btree_cur *cur, /* btree cursor */
286 xfs_agblock_t *bno, /* output: starting block of extent */
287 xfs_extlen_t *len, /* output: length of extent */
288 int *stat) /* output: success/failure */
290 struct xfs_alloc_rec_incore irec;
291 union xfs_btree_rec *rec;
295 error = xfs_btree_get_rec(cur, &rec, stat);
296 if (error || !(*stat))
299 xfs_alloc_btrec_to_irec(rec, &irec);
300 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
302 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
304 *bno = irec.ar_startblock;
305 *len = irec.ar_blockcount;
310 * Compute aligned version of the found extent.
311 * Takes alignment and min length into account.
314 xfs_alloc_compute_aligned(
315 xfs_alloc_arg_t *args, /* allocation argument structure */
316 xfs_agblock_t foundbno, /* starting block in found extent */
317 xfs_extlen_t foundlen, /* length in found extent */
318 xfs_agblock_t *resbno, /* result block number */
319 xfs_extlen_t *reslen, /* result length */
322 xfs_agblock_t bno = foundbno;
323 xfs_extlen_t len = foundlen;
327 /* Trim busy sections out of found extent */
328 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
331 * If we have a largish extent that happens to start before min_agbno,
332 * see if we can shift it into range...
334 if (bno < args->min_agbno && bno + len > args->min_agbno) {
335 diff = args->min_agbno - bno;
342 if (args->alignment > 1 && len >= args->minlen) {
343 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
345 diff = aligned_bno - bno;
347 *resbno = aligned_bno;
348 *reslen = diff >= len ? 0 : len - diff;
358 * Compute best start block and diff for "near" allocations.
359 * freelen >= wantlen already checked by caller.
361 STATIC xfs_extlen_t /* difference value (absolute) */
362 xfs_alloc_compute_diff(
363 xfs_agblock_t wantbno, /* target starting block */
364 xfs_extlen_t wantlen, /* target length */
365 xfs_extlen_t alignment, /* target alignment */
366 int datatype, /* are we allocating data? */
367 xfs_agblock_t freebno, /* freespace's starting block */
368 xfs_extlen_t freelen, /* freespace's length */
369 xfs_agblock_t *newbnop) /* result: best start block from free */
371 xfs_agblock_t freeend; /* end of freespace extent */
372 xfs_agblock_t newbno1; /* return block number */
373 xfs_agblock_t newbno2; /* other new block number */
374 xfs_extlen_t newlen1=0; /* length with newbno1 */
375 xfs_extlen_t newlen2=0; /* length with newbno2 */
376 xfs_agblock_t wantend; /* end of target extent */
377 bool userdata = datatype & XFS_ALLOC_USERDATA;
379 ASSERT(freelen >= wantlen);
380 freeend = freebno + freelen;
381 wantend = wantbno + wantlen;
383 * We want to allocate from the start of a free extent if it is past
384 * the desired block or if we are allocating user data and the free
385 * extent is before desired block. The second case is there to allow
386 * for contiguous allocation from the remaining free space if the file
387 * grows in the short term.
389 if (freebno >= wantbno || (userdata && freeend < wantend)) {
390 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
391 newbno1 = NULLAGBLOCK;
392 } else if (freeend >= wantend && alignment > 1) {
393 newbno1 = roundup(wantbno, alignment);
394 newbno2 = newbno1 - alignment;
395 if (newbno1 >= freeend)
396 newbno1 = NULLAGBLOCK;
398 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
399 if (newbno2 < freebno)
400 newbno2 = NULLAGBLOCK;
402 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
403 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
404 if (newlen1 < newlen2 ||
405 (newlen1 == newlen2 &&
406 XFS_ABSDIFF(newbno1, wantbno) >
407 XFS_ABSDIFF(newbno2, wantbno)))
409 } else if (newbno2 != NULLAGBLOCK)
411 } else if (freeend >= wantend) {
413 } else if (alignment > 1) {
414 newbno1 = roundup(freeend - wantlen, alignment);
415 if (newbno1 > freeend - wantlen &&
416 newbno1 - alignment >= freebno)
417 newbno1 -= alignment;
418 else if (newbno1 >= freeend)
419 newbno1 = NULLAGBLOCK;
421 newbno1 = freeend - wantlen;
423 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
427 * Fix up the length, based on mod and prod.
428 * len should be k * prod + mod for some k.
429 * If len is too small it is returned unchanged.
430 * If len hits maxlen it is left alone.
434 xfs_alloc_arg_t *args) /* allocation argument structure */
439 ASSERT(args->mod < args->prod);
441 ASSERT(rlen >= args->minlen);
442 ASSERT(rlen <= args->maxlen);
443 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
444 (args->mod == 0 && rlen < args->prod))
446 k = rlen % args->prod;
450 rlen = rlen - (k - args->mod);
452 rlen = rlen - args->prod + (args->mod - k);
453 /* casts to (int) catch length underflows */
454 if ((int)rlen < (int)args->minlen)
456 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
457 ASSERT(rlen % args->prod == args->mod);
458 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
459 rlen + args->minleft);
464 * Update the two btrees, logically removing from freespace the extent
465 * starting at rbno, rlen blocks. The extent is contained within the
466 * actual (current) free extent fbno for flen blocks.
467 * Flags are passed in indicating whether the cursors are set to the
470 STATIC int /* error code */
471 xfs_alloc_fixup_trees(
472 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
473 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
474 xfs_agblock_t fbno, /* starting block of free extent */
475 xfs_extlen_t flen, /* length of free extent */
476 xfs_agblock_t rbno, /* starting block of returned extent */
477 xfs_extlen_t rlen, /* length of returned extent */
478 int flags) /* flags, XFSA_FIXUP_... */
480 int error; /* error code */
481 int i; /* operation results */
482 xfs_agblock_t nfbno1; /* first new free startblock */
483 xfs_agblock_t nfbno2; /* second new free startblock */
484 xfs_extlen_t nflen1=0; /* first new free length */
485 xfs_extlen_t nflen2=0; /* second new free length */
486 struct xfs_mount *mp;
491 * Look up the record in the by-size tree if necessary.
493 if (flags & XFSA_FIXUP_CNT_OK) {
495 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
497 if (XFS_IS_CORRUPT(mp,
501 return -EFSCORRUPTED;
504 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
506 if (XFS_IS_CORRUPT(mp, i != 1))
507 return -EFSCORRUPTED;
510 * Look up the record in the by-block tree if necessary.
512 if (flags & XFSA_FIXUP_BNO_OK) {
514 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
516 if (XFS_IS_CORRUPT(mp,
520 return -EFSCORRUPTED;
523 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
525 if (XFS_IS_CORRUPT(mp, i != 1))
526 return -EFSCORRUPTED;
530 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
531 struct xfs_btree_block *bnoblock;
532 struct xfs_btree_block *cntblock;
534 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
535 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
537 if (XFS_IS_CORRUPT(mp,
538 bnoblock->bb_numrecs !=
539 cntblock->bb_numrecs))
540 return -EFSCORRUPTED;
545 * Deal with all four cases: the allocated record is contained
546 * within the freespace record, so we can have new freespace
547 * at either (or both) end, or no freespace remaining.
549 if (rbno == fbno && rlen == flen)
550 nfbno1 = nfbno2 = NULLAGBLOCK;
551 else if (rbno == fbno) {
552 nfbno1 = rbno + rlen;
553 nflen1 = flen - rlen;
554 nfbno2 = NULLAGBLOCK;
555 } else if (rbno + rlen == fbno + flen) {
557 nflen1 = flen - rlen;
558 nfbno2 = NULLAGBLOCK;
561 nflen1 = rbno - fbno;
562 nfbno2 = rbno + rlen;
563 nflen2 = (fbno + flen) - nfbno2;
566 * Delete the entry from the by-size btree.
568 if ((error = xfs_btree_delete(cnt_cur, &i)))
570 if (XFS_IS_CORRUPT(mp, i != 1))
571 return -EFSCORRUPTED;
573 * Add new by-size btree entry(s).
575 if (nfbno1 != NULLAGBLOCK) {
576 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
578 if (XFS_IS_CORRUPT(mp, i != 0))
579 return -EFSCORRUPTED;
580 if ((error = xfs_btree_insert(cnt_cur, &i)))
582 if (XFS_IS_CORRUPT(mp, i != 1))
583 return -EFSCORRUPTED;
585 if (nfbno2 != NULLAGBLOCK) {
586 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
588 if (XFS_IS_CORRUPT(mp, i != 0))
589 return -EFSCORRUPTED;
590 if ((error = xfs_btree_insert(cnt_cur, &i)))
592 if (XFS_IS_CORRUPT(mp, i != 1))
593 return -EFSCORRUPTED;
596 * Fix up the by-block btree entry(s).
598 if (nfbno1 == NULLAGBLOCK) {
600 * No remaining freespace, just delete the by-block tree entry.
602 if ((error = xfs_btree_delete(bno_cur, &i)))
604 if (XFS_IS_CORRUPT(mp, i != 1))
605 return -EFSCORRUPTED;
608 * Update the by-block entry to start later|be shorter.
610 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
613 if (nfbno2 != NULLAGBLOCK) {
615 * 2 resulting free entries, need to add one.
617 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
619 if (XFS_IS_CORRUPT(mp, i != 0))
620 return -EFSCORRUPTED;
621 if ((error = xfs_btree_insert(bno_cur, &i)))
623 if (XFS_IS_CORRUPT(mp, i != 1))
624 return -EFSCORRUPTED;
630 * We do not verify the AGFL contents against AGF-based index counters here,
631 * even though we may have access to the perag that contains shadow copies. We
632 * don't know if the AGF based counters have been checked, and if they have they
633 * still may be inconsistent because they haven't yet been reset on the first
634 * allocation after the AGF has been read in.
636 * This means we can only check that all agfl entries contain valid or null
637 * values because we can't reliably determine the active range to exclude
638 * NULLAGBNO as a valid value.
640 * However, we can't even do that for v4 format filesystems because there are
641 * old versions of mkfs out there that does not initialise the AGFL to known,
642 * verifiable values. HEnce we can't tell the difference between a AGFL block
643 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
645 * As a result, we can only fully validate AGFL block numbers when we pull them
646 * from the freelist in xfs_alloc_get_freelist().
648 static xfs_failaddr_t
652 struct xfs_mount *mp = bp->b_mount;
653 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
654 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
657 if (!xfs_has_crc(mp))
660 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
661 return __this_address;
662 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
663 return __this_address;
665 * during growfs operations, the perag is not fully initialised,
666 * so we can't use it for any useful checking. growfs ensures we can't
667 * use it by using uncached buffers that don't have the perag attached
668 * so we can detect and avoid this problem.
670 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
671 return __this_address;
673 for (i = 0; i < xfs_agfl_size(mp); i++) {
674 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
675 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
676 return __this_address;
679 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
680 return __this_address;
685 xfs_agfl_read_verify(
688 struct xfs_mount *mp = bp->b_mount;
692 * There is no verification of non-crc AGFLs because mkfs does not
693 * initialise the AGFL to zero or NULL. Hence the only valid part of the
694 * AGFL is what the AGF says is active. We can't get to the AGF, so we
695 * can't verify just those entries are valid.
697 if (!xfs_has_crc(mp))
700 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
701 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
703 fa = xfs_agfl_verify(bp);
705 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
710 xfs_agfl_write_verify(
713 struct xfs_mount *mp = bp->b_mount;
714 struct xfs_buf_log_item *bip = bp->b_log_item;
717 /* no verification of non-crc AGFLs */
718 if (!xfs_has_crc(mp))
721 fa = xfs_agfl_verify(bp);
723 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
728 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
730 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
733 const struct xfs_buf_ops xfs_agfl_buf_ops = {
735 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
736 .verify_read = xfs_agfl_read_verify,
737 .verify_write = xfs_agfl_write_verify,
738 .verify_struct = xfs_agfl_verify,
742 * Read in the allocation group free block array.
746 struct xfs_perag *pag,
747 struct xfs_trans *tp,
748 struct xfs_buf **bpp)
750 struct xfs_mount *mp = pag->pag_mount;
754 error = xfs_trans_read_buf(
755 mp, tp, mp->m_ddev_targp,
756 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
757 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
760 xfs_buf_set_ref(bp, XFS_AGFL_REF);
766 xfs_alloc_update_counters(
767 struct xfs_trans *tp,
768 struct xfs_buf *agbp,
771 struct xfs_agf *agf = agbp->b_addr;
773 agbp->b_pag->pagf_freeblks += len;
774 be32_add_cpu(&agf->agf_freeblks, len);
776 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
777 be32_to_cpu(agf->agf_length))) {
778 xfs_buf_mark_corrupt(agbp);
779 return -EFSCORRUPTED;
782 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
787 * Block allocation algorithm and data structures.
789 struct xfs_alloc_cur {
790 struct xfs_btree_cur *cnt; /* btree cursors */
791 struct xfs_btree_cur *bnolt;
792 struct xfs_btree_cur *bnogt;
793 xfs_extlen_t cur_len;/* current search length */
794 xfs_agblock_t rec_bno;/* extent startblock */
795 xfs_extlen_t rec_len;/* extent length */
796 xfs_agblock_t bno; /* alloc bno */
797 xfs_extlen_t len; /* alloc len */
798 xfs_extlen_t diff; /* diff from search bno */
799 unsigned int busy_gen;/* busy state */
804 * Set up cursors, etc. in the extent allocation cursor. This function can be
805 * called multiple times to reset an initialized structure without having to
806 * reallocate cursors.
810 struct xfs_alloc_arg *args,
811 struct xfs_alloc_cur *acur)
816 acur->cur_len = args->maxlen;
826 * Perform an initial cntbt lookup to check for availability of maxlen
827 * extents. If this fails, we'll return -ENOSPC to signal the caller to
828 * attempt a small allocation.
831 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
832 args->agbp, args->pag, XFS_BTNUM_CNT);
833 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
838 * Allocate the bnobt left and right search cursors.
841 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
842 args->agbp, args->pag, XFS_BTNUM_BNO);
844 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
845 args->agbp, args->pag, XFS_BTNUM_BNO);
846 return i == 1 ? 0 : -ENOSPC;
851 struct xfs_alloc_cur *acur,
854 int cur_error = XFS_BTREE_NOERROR;
857 cur_error = XFS_BTREE_ERROR;
860 xfs_btree_del_cursor(acur->cnt, cur_error);
862 xfs_btree_del_cursor(acur->bnolt, cur_error);
864 xfs_btree_del_cursor(acur->bnogt, cur_error);
865 acur->cnt = acur->bnolt = acur->bnogt = NULL;
869 * Check an extent for allocation and track the best available candidate in the
870 * allocation structure. The cursor is deactivated if it has entered an out of
871 * range state based on allocation arguments. Optionally return the extent
872 * extent geometry and allocation status if requested by the caller.
876 struct xfs_alloc_arg *args,
877 struct xfs_alloc_cur *acur,
878 struct xfs_btree_cur *cur,
882 xfs_agblock_t bno, bnoa, bnew;
883 xfs_extlen_t len, lena, diff = -1;
885 unsigned busy_gen = 0;
886 bool deactivate = false;
887 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
891 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
894 if (XFS_IS_CORRUPT(args->mp, i != 1))
895 return -EFSCORRUPTED;
898 * Check minlen and deactivate a cntbt cursor if out of acceptable size
899 * range (i.e., walking backwards looking for a minlen extent).
901 if (len < args->minlen) {
902 deactivate = !isbnobt;
906 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
910 acur->busy_gen = busy_gen;
911 /* deactivate a bnobt cursor outside of locality range */
912 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
913 deactivate = isbnobt;
916 if (lena < args->minlen)
919 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
920 xfs_alloc_fix_len(args);
921 ASSERT(args->len >= args->minlen);
922 if (args->len < acur->len)
926 * We have an aligned record that satisfies minlen and beats or matches
927 * the candidate extent size. Compare locality for near allocation mode.
929 diff = xfs_alloc_compute_diff(args->agbno, args->len,
930 args->alignment, args->datatype,
932 if (bnew == NULLAGBLOCK)
936 * Deactivate a bnobt cursor with worse locality than the current best.
938 if (diff > acur->diff) {
939 deactivate = isbnobt;
943 ASSERT(args->len > acur->len ||
944 (args->len == acur->len && diff <= acur->diff));
948 acur->len = args->len;
953 * We're done if we found a perfect allocation. This only deactivates
954 * the current cursor, but this is just an optimization to terminate a
955 * cntbt search that otherwise runs to the edge of the tree.
957 if (acur->diff == 0 && acur->len == args->maxlen)
961 cur->bc_ag.abt.active = false;
962 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
968 * Complete an allocation of a candidate extent. Remove the extent from both
969 * trees and update the args structure.
972 xfs_alloc_cur_finish(
973 struct xfs_alloc_arg *args,
974 struct xfs_alloc_cur *acur)
976 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
979 ASSERT(acur->cnt && acur->bnolt);
980 ASSERT(acur->bno >= acur->rec_bno);
981 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
982 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
984 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
985 acur->rec_len, acur->bno, acur->len, 0);
989 args->agbno = acur->bno;
990 args->len = acur->len;
993 trace_xfs_alloc_cur(args);
998 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
999 * bno optimized lookup to search for extents with ideal size and locality.
1002 xfs_alloc_cntbt_iter(
1003 struct xfs_alloc_arg *args,
1004 struct xfs_alloc_cur *acur)
1006 struct xfs_btree_cur *cur = acur->cnt;
1008 xfs_extlen_t len, cur_len;
1012 if (!xfs_alloc_cur_active(cur))
1015 /* locality optimized lookup */
1016 cur_len = acur->cur_len;
1017 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1022 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1026 /* check the current record and update search length from it */
1027 error = xfs_alloc_cur_check(args, acur, cur, &i);
1030 ASSERT(len >= acur->cur_len);
1031 acur->cur_len = len;
1034 * We looked up the first record >= [agbno, len] above. The agbno is a
1035 * secondary key and so the current record may lie just before or after
1036 * agbno. If it is past agbno, check the previous record too so long as
1037 * the length matches as it may be closer. Don't check a smaller record
1038 * because that could deactivate our cursor.
1040 if (bno > args->agbno) {
1041 error = xfs_btree_decrement(cur, 0, &i);
1043 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1044 if (!error && i && len == acur->cur_len)
1045 error = xfs_alloc_cur_check(args, acur, cur,
1053 * Increment the search key until we find at least one allocation
1054 * candidate or if the extent we found was larger. Otherwise, double the
1055 * search key to optimize the search. Efficiency is more important here
1056 * than absolute best locality.
1059 if (!acur->len || acur->cur_len >= cur_len)
1062 acur->cur_len = cur_len;
1068 * Deal with the case where only small freespaces remain. Either return the
1069 * contents of the last freespace record, or allocate space from the freelist if
1070 * there is nothing in the tree.
1072 STATIC int /* error */
1073 xfs_alloc_ag_vextent_small(
1074 struct xfs_alloc_arg *args, /* allocation argument structure */
1075 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1076 xfs_agblock_t *fbnop, /* result block number */
1077 xfs_extlen_t *flenp, /* result length */
1078 int *stat) /* status: 0-freelist, 1-normal/none */
1080 struct xfs_agf *agf = args->agbp->b_addr;
1082 xfs_agblock_t fbno = NULLAGBLOCK;
1083 xfs_extlen_t flen = 0;
1087 * If a cntbt cursor is provided, try to allocate the largest record in
1088 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1089 * allocation. Make sure to respect minleft even when pulling from the
1093 error = xfs_btree_decrement(ccur, 0, &i);
1097 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1100 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1101 error = -EFSCORRUPTED;
1107 if (args->minlen != 1 || args->alignment != 1 ||
1108 args->resv == XFS_AG_RESV_AGFL ||
1109 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1112 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1116 if (fbno == NULLAGBLOCK)
1119 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1120 (args->datatype & XFS_ALLOC_NOBUSY));
1122 if (args->datatype & XFS_ALLOC_USERDATA) {
1125 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1126 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1127 args->mp->m_bsize, 0, &bp);
1130 xfs_trans_binval(args->tp, bp);
1132 *fbnop = args->agbno = fbno;
1133 *flenp = args->len = 1;
1134 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1135 error = -EFSCORRUPTED;
1138 args->wasfromfl = 1;
1139 trace_xfs_alloc_small_freelist(args);
1142 * If we're feeding an AGFL block to something that doesn't live in the
1143 * free space, we need to clear out the OWN_AG rmap.
1145 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1146 &XFS_RMAP_OINFO_AG);
1155 * Can't do the allocation, give up.
1157 if (flen < args->minlen) {
1158 args->agbno = NULLAGBLOCK;
1159 trace_xfs_alloc_small_notenough(args);
1165 trace_xfs_alloc_small_done(args);
1169 trace_xfs_alloc_small_error(args);
1174 * Allocate a variable extent at exactly agno/bno.
1175 * Extent's length (returned in *len) will be between minlen and maxlen,
1176 * and of the form k * prod + mod unless there's nothing that large.
1177 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1179 STATIC int /* error */
1180 xfs_alloc_ag_vextent_exact(
1181 xfs_alloc_arg_t *args) /* allocation argument structure */
1183 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1184 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1185 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1187 xfs_agblock_t fbno; /* start block of found extent */
1188 xfs_extlen_t flen; /* length of found extent */
1189 xfs_agblock_t tbno; /* start block of busy extent */
1190 xfs_extlen_t tlen; /* length of busy extent */
1191 xfs_agblock_t tend; /* end block of busy extent */
1192 int i; /* success/failure of operation */
1195 ASSERT(args->alignment == 1);
1198 * Allocate/initialize a cursor for the by-number freespace btree.
1200 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1201 args->pag, XFS_BTNUM_BNO);
1204 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1205 * Look for the closest free block <= bno, it must contain bno
1206 * if any free block does.
1208 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1215 * Grab the freespace record.
1217 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1220 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1221 error = -EFSCORRUPTED;
1224 ASSERT(fbno <= args->agbno);
1227 * Check for overlapping busy extents.
1231 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1234 * Give up if the start of the extent is busy, or the freespace isn't
1235 * long enough for the minimum request.
1237 if (tbno > args->agbno)
1239 if (tlen < args->minlen)
1242 if (tend < args->agbno + args->minlen)
1246 * End of extent will be smaller of the freespace end and the
1247 * maximal requested end.
1249 * Fix the length according to mod and prod if given.
1251 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1253 xfs_alloc_fix_len(args);
1254 ASSERT(args->agbno + args->len <= tend);
1257 * We are allocating agbno for args->len
1258 * Allocate/initialize a cursor for the by-size btree.
1260 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1261 args->pag, XFS_BTNUM_CNT);
1262 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1263 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1264 args->len, XFSA_FIXUP_BNO_OK);
1266 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1270 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1271 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1273 args->wasfromfl = 0;
1274 trace_xfs_alloc_exact_done(args);
1278 /* Didn't find it, return null. */
1279 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1280 args->agbno = NULLAGBLOCK;
1281 trace_xfs_alloc_exact_notfound(args);
1285 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1286 trace_xfs_alloc_exact_error(args);
1291 * Search a given number of btree records in a given direction. Check each
1292 * record against the good extent we've already found.
1295 xfs_alloc_walk_iter(
1296 struct xfs_alloc_arg *args,
1297 struct xfs_alloc_cur *acur,
1298 struct xfs_btree_cur *cur,
1300 bool find_one, /* quit on first candidate */
1301 int count, /* rec count (-1 for infinite) */
1310 * Search so long as the cursor is active or we find a better extent.
1311 * The cursor is deactivated if it extends beyond the range of the
1312 * current allocation candidate.
1314 while (xfs_alloc_cur_active(cur) && count) {
1315 error = xfs_alloc_cur_check(args, acur, cur, &i);
1323 if (!xfs_alloc_cur_active(cur))
1327 error = xfs_btree_increment(cur, 0, &i);
1329 error = xfs_btree_decrement(cur, 0, &i);
1333 cur->bc_ag.abt.active = false;
1343 * Search the by-bno and by-size btrees in parallel in search of an extent with
1344 * ideal locality based on the NEAR mode ->agbno locality hint.
1347 xfs_alloc_ag_vextent_locality(
1348 struct xfs_alloc_arg *args,
1349 struct xfs_alloc_cur *acur,
1352 struct xfs_btree_cur *fbcur = NULL;
1357 ASSERT(acur->len == 0);
1361 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1364 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1367 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1372 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1373 * right and lookup the closest extent to the locality hint for each
1374 * extent size key in the cntbt. The entire search terminates
1375 * immediately on a bnobt hit because that means we've found best case
1376 * locality. Otherwise the search continues until the cntbt cursor runs
1377 * off the end of the tree. If no allocation candidate is found at this
1378 * point, give up on locality, walk backwards from the end of the cntbt
1379 * and take the first available extent.
1381 * The parallel tree searches balance each other out to provide fairly
1382 * consistent performance for various situations. The bnobt search can
1383 * have pathological behavior in the worst case scenario of larger
1384 * allocation requests and fragmented free space. On the other hand, the
1385 * bnobt is able to satisfy most smaller allocation requests much more
1386 * quickly than the cntbt. The cntbt search can sift through fragmented
1387 * free space and sets of free extents for larger allocation requests
1388 * more quickly than the bnobt. Since the locality hint is just a hint
1389 * and we don't want to scan the entire bnobt for perfect locality, the
1390 * cntbt search essentially bounds the bnobt search such that we can
1391 * find good enough locality at reasonable performance in most cases.
1393 while (xfs_alloc_cur_active(acur->bnolt) ||
1394 xfs_alloc_cur_active(acur->bnogt) ||
1395 xfs_alloc_cur_active(acur->cnt)) {
1397 trace_xfs_alloc_cur_lookup(args);
1400 * Search the bnobt left and right. In the case of a hit, finish
1401 * the search in the opposite direction and we're done.
1403 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1408 trace_xfs_alloc_cur_left(args);
1409 fbcur = acur->bnogt;
1413 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1418 trace_xfs_alloc_cur_right(args);
1419 fbcur = acur->bnolt;
1425 * Check the extent with best locality based on the current
1426 * extent size search key and keep track of the best candidate.
1428 error = xfs_alloc_cntbt_iter(args, acur);
1431 if (!xfs_alloc_cur_active(acur->cnt)) {
1432 trace_xfs_alloc_cur_lookup_done(args);
1438 * If we failed to find anything due to busy extents, return empty
1439 * handed so the caller can flush and retry. If no busy extents were
1440 * found, walk backwards from the end of the cntbt as a last resort.
1442 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1443 error = xfs_btree_decrement(acur->cnt, 0, &i);
1447 acur->cnt->bc_ag.abt.active = true;
1454 * Search in the opposite direction for a better entry in the case of
1455 * a bnobt hit or walk backwards from the end of the cntbt.
1458 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1470 /* Check the last block of the cnt btree for allocations. */
1472 xfs_alloc_ag_vextent_lastblock(
1473 struct xfs_alloc_arg *args,
1474 struct xfs_alloc_cur *acur,
1483 /* Randomly don't execute the first algorithm. */
1484 if (get_random_u32_below(2))
1489 * Start from the entry that lookup found, sequence through all larger
1490 * free blocks. If we're actually pointing at a record smaller than
1491 * maxlen, go to the start of this block, and skip all those smaller
1494 if (*len || args->alignment > 1) {
1495 acur->cnt->bc_levels[0].ptr = 1;
1497 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1500 if (XFS_IS_CORRUPT(args->mp, i != 1))
1501 return -EFSCORRUPTED;
1502 if (*len >= args->minlen)
1504 error = xfs_btree_increment(acur->cnt, 0, &i);
1508 ASSERT(*len >= args->minlen);
1513 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1518 * It didn't work. We COULD be in a case where there's a good record
1519 * somewhere, so try again.
1524 trace_xfs_alloc_near_first(args);
1530 * Allocate a variable extent near bno in the allocation group agno.
1531 * Extent's length (returned in len) will be between minlen and maxlen,
1532 * and of the form k * prod + mod unless there's nothing that large.
1533 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1536 xfs_alloc_ag_vextent_near(
1537 struct xfs_alloc_arg *args,
1538 uint32_t alloc_flags)
1540 struct xfs_alloc_cur acur = {};
1541 int error; /* error code */
1542 int i; /* result code, temporary */
1546 /* handle uninitialized agbno range so caller doesn't have to */
1547 if (!args->min_agbno && !args->max_agbno)
1548 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1549 ASSERT(args->min_agbno <= args->max_agbno);
1551 /* clamp agbno to the range if it's outside */
1552 if (args->agbno < args->min_agbno)
1553 args->agbno = args->min_agbno;
1554 if (args->agbno > args->max_agbno)
1555 args->agbno = args->max_agbno;
1557 /* Retry once quickly if we find busy extents before blocking. */
1558 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1563 * Set up cursors and see if there are any free extents as big as
1564 * maxlen. If not, pick the last entry in the tree unless the tree is
1567 error = xfs_alloc_cur_setup(args, &acur);
1568 if (error == -ENOSPC) {
1569 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1573 if (i == 0 || len == 0) {
1574 trace_xfs_alloc_near_noentry(args);
1584 * If the requested extent is large wrt the freespaces available
1585 * in this a.g., then the cursor will be pointing to a btree entry
1586 * near the right edge of the tree. If it's in the last btree leaf
1587 * block, then we just examine all the entries in that block
1588 * that are big enough, and pick the best one.
1590 if (xfs_btree_islastblock(acur.cnt, 0)) {
1591 bool allocated = false;
1593 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1602 * Second algorithm. Combined cntbt and bnobt search to find ideal
1605 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1610 * If we couldn't get anything, give up.
1615 * Our only valid extents must have been busy. Flush and
1616 * retry the allocation again. If we get an -EAGAIN
1617 * error, we're being told that a deadlock was avoided
1618 * and the current transaction needs committing before
1619 * the allocation can be retried.
1621 trace_xfs_alloc_near_busy(args);
1622 error = xfs_extent_busy_flush(args->tp, args->pag,
1623 acur.busy_gen, alloc_flags);
1627 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1630 trace_xfs_alloc_size_neither(args);
1631 args->agbno = NULLAGBLOCK;
1636 /* fix up btrees on a successful allocation */
1637 error = xfs_alloc_cur_finish(args, &acur);
1640 xfs_alloc_cur_close(&acur, error);
1645 * Allocate a variable extent anywhere in the allocation group agno.
1646 * Extent's length (returned in len) will be between minlen and maxlen,
1647 * and of the form k * prod + mod unless there's nothing that large.
1648 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1651 xfs_alloc_ag_vextent_size(
1652 struct xfs_alloc_arg *args,
1653 uint32_t alloc_flags)
1655 struct xfs_agf *agf = args->agbp->b_addr;
1656 struct xfs_btree_cur *bno_cur;
1657 struct xfs_btree_cur *cnt_cur;
1658 xfs_agblock_t fbno; /* start of found freespace */
1659 xfs_extlen_t flen; /* length of found freespace */
1660 xfs_agblock_t rbno; /* returned block number */
1661 xfs_extlen_t rlen; /* length of returned extent */
1667 /* Retry once quickly if we find busy extents before blocking. */
1668 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1671 * Allocate and initialize a cursor for the by-size btree.
1673 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1674 args->pag, XFS_BTNUM_CNT);
1678 * Look for an entry >= maxlen+alignment-1 blocks.
1680 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1681 args->maxlen + args->alignment - 1, &i)))
1685 * If none then we have to settle for a smaller extent. In the case that
1686 * there are no large extents, this will return the last entry in the
1687 * tree unless the tree is empty. In the case that there are only busy
1688 * large extents, this will return the largest small extent unless there
1689 * are no smaller extents available.
1692 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1696 if (i == 0 || flen == 0) {
1697 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1698 trace_xfs_alloc_size_noentry(args);
1702 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1706 * Search for a non-busy extent that is large enough.
1709 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1712 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1713 error = -EFSCORRUPTED;
1717 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1718 &rbno, &rlen, &busy_gen);
1720 if (rlen >= args->maxlen)
1723 error = xfs_btree_increment(cnt_cur, 0, &i);
1730 * Our only valid extents must have been busy. Flush and
1731 * retry the allocation again. If we get an -EAGAIN
1732 * error, we're being told that a deadlock was avoided
1733 * and the current transaction needs committing before
1734 * the allocation can be retried.
1736 trace_xfs_alloc_size_busy(args);
1737 error = xfs_extent_busy_flush(args->tp, args->pag,
1738 busy_gen, alloc_flags);
1742 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1743 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1749 * In the first case above, we got the last entry in the
1750 * by-size btree. Now we check to see if the space hits maxlen
1751 * once aligned; if not, we search left for something better.
1752 * This can't happen in the second case above.
1754 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1755 if (XFS_IS_CORRUPT(args->mp,
1758 rbno + rlen > fbno + flen))) {
1759 error = -EFSCORRUPTED;
1762 if (rlen < args->maxlen) {
1763 xfs_agblock_t bestfbno;
1764 xfs_extlen_t bestflen;
1765 xfs_agblock_t bestrbno;
1766 xfs_extlen_t bestrlen;
1773 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1777 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1780 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1781 error = -EFSCORRUPTED;
1784 if (flen < bestrlen)
1786 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1787 &rbno, &rlen, &busy_gen);
1788 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1789 if (XFS_IS_CORRUPT(args->mp,
1792 rbno + rlen > fbno + flen))) {
1793 error = -EFSCORRUPTED;
1796 if (rlen > bestrlen) {
1801 if (rlen == args->maxlen)
1805 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1808 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1809 error = -EFSCORRUPTED;
1817 args->wasfromfl = 0;
1819 * Fix up the length.
1822 if (rlen < args->minlen) {
1825 * Our only valid extents must have been busy. Flush and
1826 * retry the allocation again. If we get an -EAGAIN
1827 * error, we're being told that a deadlock was avoided
1828 * and the current transaction needs committing before
1829 * the allocation can be retried.
1831 trace_xfs_alloc_size_busy(args);
1832 error = xfs_extent_busy_flush(args->tp, args->pag,
1833 busy_gen, alloc_flags);
1837 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1843 xfs_alloc_fix_len(args);
1846 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1847 error = -EFSCORRUPTED;
1851 * Allocate and initialize a cursor for the by-block tree.
1853 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1854 args->pag, XFS_BTNUM_BNO);
1855 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1856 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1858 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1859 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1860 cnt_cur = bno_cur = NULL;
1863 if (XFS_IS_CORRUPT(args->mp,
1864 args->agbno + args->len >
1865 be32_to_cpu(agf->agf_length))) {
1866 error = -EFSCORRUPTED;
1869 trace_xfs_alloc_size_done(args);
1873 trace_xfs_alloc_size_error(args);
1875 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1877 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1881 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1882 trace_xfs_alloc_size_nominleft(args);
1883 args->agbno = NULLAGBLOCK;
1888 * Free the extent starting at agno/bno for length.
1892 struct xfs_trans *tp,
1893 struct xfs_buf *agbp,
1894 xfs_agnumber_t agno,
1897 const struct xfs_owner_info *oinfo,
1898 enum xfs_ag_resv_type type)
1900 struct xfs_mount *mp;
1901 struct xfs_btree_cur *bno_cur;
1902 struct xfs_btree_cur *cnt_cur;
1903 xfs_agblock_t gtbno; /* start of right neighbor */
1904 xfs_extlen_t gtlen; /* length of right neighbor */
1905 xfs_agblock_t ltbno; /* start of left neighbor */
1906 xfs_extlen_t ltlen; /* length of left neighbor */
1907 xfs_agblock_t nbno; /* new starting block of freesp */
1908 xfs_extlen_t nlen; /* new length of freespace */
1909 int haveleft; /* have a left neighbor */
1910 int haveright; /* have a right neighbor */
1913 struct xfs_perag *pag = agbp->b_pag;
1915 bno_cur = cnt_cur = NULL;
1918 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1919 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1925 * Allocate and initialize a cursor for the by-block btree.
1927 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1929 * Look for a neighboring block on the left (lower block numbers)
1930 * that is contiguous with this space.
1932 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1936 * There is a block to our left.
1938 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1940 if (XFS_IS_CORRUPT(mp, i != 1)) {
1941 error = -EFSCORRUPTED;
1945 * It's not contiguous, though.
1947 if (ltbno + ltlen < bno)
1951 * If this failure happens the request to free this
1952 * space was invalid, it's (partly) already free.
1955 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1956 error = -EFSCORRUPTED;
1962 * Look for a neighboring block on the right (higher block numbers)
1963 * that is contiguous with this space.
1965 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1969 * There is a block to our right.
1971 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1973 if (XFS_IS_CORRUPT(mp, i != 1)) {
1974 error = -EFSCORRUPTED;
1978 * It's not contiguous, though.
1980 if (bno + len < gtbno)
1984 * If this failure happens the request to free this
1985 * space was invalid, it's (partly) already free.
1988 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1989 error = -EFSCORRUPTED;
1995 * Now allocate and initialize a cursor for the by-size tree.
1997 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
1999 * Have both left and right contiguous neighbors.
2000 * Merge all three into a single free block.
2002 if (haveleft && haveright) {
2004 * Delete the old by-size entry on the left.
2006 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2008 if (XFS_IS_CORRUPT(mp, i != 1)) {
2009 error = -EFSCORRUPTED;
2012 if ((error = xfs_btree_delete(cnt_cur, &i)))
2014 if (XFS_IS_CORRUPT(mp, i != 1)) {
2015 error = -EFSCORRUPTED;
2019 * Delete the old by-size entry on the right.
2021 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2023 if (XFS_IS_CORRUPT(mp, i != 1)) {
2024 error = -EFSCORRUPTED;
2027 if ((error = xfs_btree_delete(cnt_cur, &i)))
2029 if (XFS_IS_CORRUPT(mp, i != 1)) {
2030 error = -EFSCORRUPTED;
2034 * Delete the old by-block entry for the right block.
2036 if ((error = xfs_btree_delete(bno_cur, &i)))
2038 if (XFS_IS_CORRUPT(mp, i != 1)) {
2039 error = -EFSCORRUPTED;
2043 * Move the by-block cursor back to the left neighbor.
2045 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2047 if (XFS_IS_CORRUPT(mp, i != 1)) {
2048 error = -EFSCORRUPTED;
2053 * Check that this is the right record: delete didn't
2054 * mangle the cursor.
2057 xfs_agblock_t xxbno;
2060 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2063 if (XFS_IS_CORRUPT(mp,
2067 error = -EFSCORRUPTED;
2073 * Update remaining by-block entry to the new, joined block.
2076 nlen = len + ltlen + gtlen;
2077 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2081 * Have only a left contiguous neighbor.
2082 * Merge it together with the new freespace.
2084 else if (haveleft) {
2086 * Delete the old by-size entry on the left.
2088 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2090 if (XFS_IS_CORRUPT(mp, i != 1)) {
2091 error = -EFSCORRUPTED;
2094 if ((error = xfs_btree_delete(cnt_cur, &i)))
2096 if (XFS_IS_CORRUPT(mp, i != 1)) {
2097 error = -EFSCORRUPTED;
2101 * Back up the by-block cursor to the left neighbor, and
2102 * update its length.
2104 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2106 if (XFS_IS_CORRUPT(mp, i != 1)) {
2107 error = -EFSCORRUPTED;
2112 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2116 * Have only a right contiguous neighbor.
2117 * Merge it together with the new freespace.
2119 else if (haveright) {
2121 * Delete the old by-size entry on the right.
2123 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2125 if (XFS_IS_CORRUPT(mp, i != 1)) {
2126 error = -EFSCORRUPTED;
2129 if ((error = xfs_btree_delete(cnt_cur, &i)))
2131 if (XFS_IS_CORRUPT(mp, i != 1)) {
2132 error = -EFSCORRUPTED;
2136 * Update the starting block and length of the right
2137 * neighbor in the by-block tree.
2141 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2145 * No contiguous neighbors.
2146 * Insert the new freespace into the by-block tree.
2151 if ((error = xfs_btree_insert(bno_cur, &i)))
2153 if (XFS_IS_CORRUPT(mp, i != 1)) {
2154 error = -EFSCORRUPTED;
2158 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2161 * In all cases we need to insert the new freespace in the by-size tree.
2163 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2165 if (XFS_IS_CORRUPT(mp, i != 0)) {
2166 error = -EFSCORRUPTED;
2169 if ((error = xfs_btree_insert(cnt_cur, &i)))
2171 if (XFS_IS_CORRUPT(mp, i != 1)) {
2172 error = -EFSCORRUPTED;
2175 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2179 * Update the freespace totals in the ag and superblock.
2181 error = xfs_alloc_update_counters(tp, agbp, len);
2182 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2186 XFS_STATS_INC(mp, xs_freex);
2187 XFS_STATS_ADD(mp, xs_freeb, len);
2189 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2194 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2196 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2198 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2203 * Visible (exported) allocation/free functions.
2204 * Some of these are used just by xfs_alloc_btree.c and this file.
2208 * Compute and fill in value of m_alloc_maxlevels.
2211 xfs_alloc_compute_maxlevels(
2212 xfs_mount_t *mp) /* file system mount structure */
2214 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2215 (mp->m_sb.sb_agblocks + 1) / 2);
2216 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2220 * Find the length of the longest extent in an AG. The 'need' parameter
2221 * specifies how much space we're going to need for the AGFL and the
2222 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2226 xfs_alloc_longest_free_extent(
2227 struct xfs_perag *pag,
2229 xfs_extlen_t reserved)
2231 xfs_extlen_t delta = 0;
2234 * If the AGFL needs a recharge, we'll have to subtract that from the
2237 if (need > pag->pagf_flcount)
2238 delta = need - pag->pagf_flcount;
2241 * If we cannot maintain others' reservations with space from the
2242 * not-longest freesp extents, we'll have to subtract /that/ from
2243 * the longest extent too.
2245 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2246 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2249 * If the longest extent is long enough to satisfy all the
2250 * reservations and AGFL rules in place, we can return this extent.
2252 if (pag->pagf_longest > delta)
2253 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2254 pag->pagf_longest - delta);
2256 /* Otherwise, let the caller try for 1 block if there's space. */
2257 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2261 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2262 * return the largest possible minimum length.
2265 xfs_alloc_min_freelist(
2266 struct xfs_mount *mp,
2267 struct xfs_perag *pag)
2269 /* AG btrees have at least 1 level. */
2270 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2271 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2272 unsigned int min_free;
2274 ASSERT(mp->m_alloc_maxlevels > 0);
2277 * For a btree shorter than the maximum height, the worst case is that
2278 * every level gets split and a new level is added, then while inserting
2279 * another entry to refill the AGFL, every level under the old root gets
2280 * split again. This is:
2282 * (full height split reservation) + (AGFL refill split height)
2283 * = (current height + 1) + (current height - 1)
2284 * = (new height) + (new height - 2)
2285 * = 2 * new height - 2
2287 * For a btree of maximum height, the worst case is that every level
2288 * under the root gets split, then while inserting another entry to
2289 * refill the AGFL, every level under the root gets split again. This is
2292 * 2 * (current height - 1)
2293 * = 2 * (new height - 1)
2294 * = 2 * new height - 2
2297 /* space needed by-bno freespace btree */
2298 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2299 mp->m_alloc_maxlevels) * 2 - 2;
2300 /* space needed by-size freespace btree */
2301 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2302 mp->m_alloc_maxlevels) * 2 - 2;
2303 /* space needed reverse mapping used space btree */
2304 if (xfs_has_rmapbt(mp))
2305 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2306 mp->m_rmap_maxlevels) * 2 - 2;
2312 * Check if the operation we are fixing up the freelist for should go ahead or
2313 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2314 * is dependent on whether the size and shape of free space available will
2315 * permit the requested allocation to take place.
2318 xfs_alloc_space_available(
2319 struct xfs_alloc_arg *args,
2320 xfs_extlen_t min_free,
2323 struct xfs_perag *pag = args->pag;
2324 xfs_extlen_t alloc_len, longest;
2325 xfs_extlen_t reservation; /* blocks that are still reserved */
2327 xfs_extlen_t agflcount;
2329 if (flags & XFS_ALLOC_FLAG_FREEING)
2332 reservation = xfs_ag_resv_needed(pag, args->resv);
2334 /* do we have enough contiguous free space for the allocation? */
2335 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2336 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2337 if (longest < alloc_len)
2341 * Do we have enough free space remaining for the allocation? Don't
2342 * account extra agfl blocks because we are about to defer free them,
2343 * making them unavailable until the current transaction commits.
2345 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2346 available = (int)(pag->pagf_freeblks + agflcount -
2347 reservation - min_free - args->minleft);
2348 if (available < (int)max(args->total, alloc_len))
2352 * Clamp maxlen to the amount of free space available for the actual
2353 * extent allocation.
2355 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2356 args->maxlen = available;
2357 ASSERT(args->maxlen > 0);
2358 ASSERT(args->maxlen >= args->minlen);
2365 xfs_free_agfl_block(
2366 struct xfs_trans *tp,
2367 xfs_agnumber_t agno,
2368 xfs_agblock_t agbno,
2369 struct xfs_buf *agbp,
2370 struct xfs_owner_info *oinfo)
2375 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2380 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2381 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2382 tp->t_mountp->m_bsize, 0, &bp);
2385 xfs_trans_binval(tp, bp);
2391 * Check the agfl fields of the agf for inconsistency or corruption.
2393 * The original purpose was to detect an agfl header padding mismatch between
2394 * current and early v5 kernels. This problem manifests as a 1-slot size
2395 * difference between the on-disk flcount and the active [first, last] range of
2398 * However, we need to use these same checks to catch agfl count corruptions
2399 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2400 * way, we need to reset the agfl and warn the user.
2402 * Return true if a reset is required before the agfl can be used, false
2406 xfs_agfl_needs_reset(
2407 struct xfs_mount *mp,
2408 struct xfs_agf *agf)
2410 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2411 uint32_t l = be32_to_cpu(agf->agf_fllast);
2412 uint32_t c = be32_to_cpu(agf->agf_flcount);
2413 int agfl_size = xfs_agfl_size(mp);
2417 * The agf read verifier catches severe corruption of these fields.
2418 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2419 * the verifier allows it.
2421 if (f >= agfl_size || l >= agfl_size)
2427 * Check consistency between the on-disk count and the active range. An
2428 * agfl padding mismatch manifests as an inconsistent flcount.
2433 active = agfl_size - f + l + 1;
2441 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2442 * agfl content cannot be trusted. Warn the user that a repair is required to
2443 * recover leaked blocks.
2445 * The purpose of this mechanism is to handle filesystems affected by the agfl
2446 * header padding mismatch problem. A reset keeps the filesystem online with a
2447 * relatively minor free space accounting inconsistency rather than suffer the
2448 * inevitable crash from use of an invalid agfl block.
2452 struct xfs_trans *tp,
2453 struct xfs_buf *agbp,
2454 struct xfs_perag *pag)
2456 struct xfs_mount *mp = tp->t_mountp;
2457 struct xfs_agf *agf = agbp->b_addr;
2459 ASSERT(xfs_perag_agfl_needs_reset(pag));
2460 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2463 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2464 "Please unmount and run xfs_repair.",
2465 pag->pag_agno, pag->pagf_flcount);
2467 agf->agf_flfirst = 0;
2468 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2469 agf->agf_flcount = 0;
2470 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2473 pag->pagf_flcount = 0;
2474 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2478 * Defer an AGFL block free. This is effectively equivalent to
2479 * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2481 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2482 * allocation operations in a transaction. AGFL frees are prone to this problem
2483 * because for one they are always freed one at a time. Further, an immediate
2484 * AGFL block free can cause a btree join and require another block free before
2485 * the real allocation can proceed. Deferring the free disconnects freeing up
2486 * the AGFL slot from freeing the block.
2489 xfs_defer_agfl_block(
2490 struct xfs_trans *tp,
2491 xfs_agnumber_t agno,
2492 xfs_agblock_t agbno,
2493 struct xfs_owner_info *oinfo)
2495 struct xfs_mount *mp = tp->t_mountp;
2496 struct xfs_extent_free_item *xefi;
2497 xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
2499 ASSERT(xfs_extfree_item_cache != NULL);
2500 ASSERT(oinfo != NULL);
2502 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
2503 return -EFSCORRUPTED;
2505 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2506 GFP_KERNEL | __GFP_NOFAIL);
2507 xefi->xefi_startblock = fsbno;
2508 xefi->xefi_blockcount = 1;
2509 xefi->xefi_owner = oinfo->oi_owner;
2510 xefi->xefi_agresv = XFS_AG_RESV_AGFL;
2512 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2514 xfs_extent_free_get_group(mp, xefi);
2515 xfs_defer_add(tp, &xefi->xefi_list, &xfs_agfl_free_defer_type);
2520 * Add the extent to the list of extents to be free at transaction end.
2521 * The list is maintained sorted (by block number).
2524 xfs_defer_extent_free(
2525 struct xfs_trans *tp,
2528 const struct xfs_owner_info *oinfo,
2529 enum xfs_ag_resv_type type,
2531 struct xfs_defer_pending **dfpp)
2533 struct xfs_extent_free_item *xefi;
2534 struct xfs_mount *mp = tp->t_mountp;
2536 xfs_agnumber_t agno;
2537 xfs_agblock_t agbno;
2539 ASSERT(bno != NULLFSBLOCK);
2541 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2542 ASSERT(!isnullstartblock(bno));
2543 agno = XFS_FSB_TO_AGNO(mp, bno);
2544 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2545 ASSERT(agno < mp->m_sb.sb_agcount);
2546 ASSERT(agbno < mp->m_sb.sb_agblocks);
2547 ASSERT(len < mp->m_sb.sb_agblocks);
2548 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2550 ASSERT(xfs_extfree_item_cache != NULL);
2551 ASSERT(type != XFS_AG_RESV_AGFL);
2553 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2554 return -EFSCORRUPTED;
2556 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2557 GFP_KERNEL | __GFP_NOFAIL);
2558 xefi->xefi_startblock = bno;
2559 xefi->xefi_blockcount = (xfs_extlen_t)len;
2560 xefi->xefi_agresv = type;
2562 xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2564 ASSERT(oinfo->oi_offset == 0);
2566 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2567 xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2568 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2569 xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2570 xefi->xefi_owner = oinfo->oi_owner;
2572 xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2574 trace_xfs_bmap_free_defer(mp,
2575 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2576 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2578 xfs_extent_free_get_group(mp, xefi);
2579 *dfpp = xfs_defer_add(tp, &xefi->xefi_list, &xfs_extent_free_defer_type);
2584 xfs_free_extent_later(
2585 struct xfs_trans *tp,
2588 const struct xfs_owner_info *oinfo,
2589 enum xfs_ag_resv_type type,
2592 struct xfs_defer_pending *dontcare = NULL;
2594 return xfs_defer_extent_free(tp, bno, len, oinfo, type, skip_discard,
2599 * Set up automatic freeing of unwritten space in the filesystem.
2601 * This function attached a paused deferred extent free item to the
2602 * transaction. Pausing means that the EFI will be logged in the next
2603 * transaction commit, but the pending EFI will not be finished until the
2604 * pending item is unpaused.
2606 * If the system goes down after the EFI has been persisted to the log but
2607 * before the pending item is unpaused, log recovery will find the EFI, fail to
2608 * find the EFD, and free the space.
2610 * If the pending item is unpaused, the next transaction commit will log an EFD
2611 * without freeing the space.
2613 * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2614 * @args structure are set to the relevant values.
2617 xfs_alloc_schedule_autoreap(
2618 const struct xfs_alloc_arg *args,
2620 struct xfs_alloc_autoreap *aarp)
2624 error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2625 &args->oinfo, args->resv, skip_discard, &aarp->dfp);
2629 xfs_defer_item_pause(args->tp, aarp->dfp);
2634 * Cancel automatic freeing of unwritten space in the filesystem.
2636 * Earlier, we created a paused deferred extent free item and attached it to
2637 * this transaction so that we could automatically roll back a new space
2638 * allocation if the system went down. Now we want to cancel the paused work
2639 * item by marking the EFI stale so we don't actually free the space, unpausing
2640 * the pending item and logging an EFD.
2642 * The caller generally should have already mapped the space into the ondisk
2643 * filesystem. If the reserved space was partially used, the caller must call
2644 * xfs_free_extent_later to create a new EFI to free the unused space.
2647 xfs_alloc_cancel_autoreap(
2648 struct xfs_trans *tp,
2649 struct xfs_alloc_autoreap *aarp)
2651 struct xfs_defer_pending *dfp = aarp->dfp;
2652 struct xfs_extent_free_item *xefi;
2657 list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2658 xefi->xefi_flags |= XFS_EFI_CANCELLED;
2660 xfs_defer_item_unpause(tp, dfp);
2664 * Commit automatic freeing of unwritten space in the filesystem.
2666 * This unpauses an earlier _schedule_autoreap and commits to freeing the
2667 * allocated space. Call this if none of the reserved space was used.
2670 xfs_alloc_commit_autoreap(
2671 struct xfs_trans *tp,
2672 struct xfs_alloc_autoreap *aarp)
2675 xfs_defer_item_unpause(tp, aarp->dfp);
2680 * Check if an AGF has a free extent record whose length is equal to
2684 xfs_exact_minlen_extent_available(
2685 struct xfs_alloc_arg *args,
2686 struct xfs_buf *agbp,
2689 struct xfs_btree_cur *cnt_cur;
2694 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2695 args->pag, XFS_BTNUM_CNT);
2696 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2701 error = -EFSCORRUPTED;
2705 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2709 if (*stat == 1 && flen != args->minlen)
2713 xfs_btree_del_cursor(cnt_cur, error);
2720 * Decide whether to use this allocation group for this allocation.
2721 * If so, fix up the btree freelist's size.
2724 xfs_alloc_fix_freelist(
2725 struct xfs_alloc_arg *args, /* allocation argument structure */
2726 uint32_t alloc_flags)
2728 struct xfs_mount *mp = args->mp;
2729 struct xfs_perag *pag = args->pag;
2730 struct xfs_trans *tp = args->tp;
2731 struct xfs_buf *agbp = NULL;
2732 struct xfs_buf *agflbp = NULL;
2733 struct xfs_alloc_arg targs; /* local allocation arguments */
2734 xfs_agblock_t bno; /* freelist block */
2735 xfs_extlen_t need; /* total blocks needed in freelist */
2738 /* deferred ops (AGFL block frees) require permanent transactions */
2739 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2741 if (!xfs_perag_initialised_agf(pag)) {
2742 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2744 /* Couldn't lock the AGF so skip this AG. */
2745 if (error == -EAGAIN)
2752 * If this is a metadata preferred pag and we are user data then try
2753 * somewhere else if we are not being asked to try harder at this
2756 if (xfs_perag_prefers_metadata(pag) &&
2757 (args->datatype & XFS_ALLOC_USERDATA) &&
2758 (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2759 ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2760 goto out_agbp_relse;
2763 need = xfs_alloc_min_freelist(mp, pag);
2764 if (!xfs_alloc_space_available(args, need, alloc_flags |
2765 XFS_ALLOC_FLAG_CHECK))
2766 goto out_agbp_relse;
2769 * Get the a.g. freespace buffer.
2770 * Can fail if we're not blocking on locks, and it's held.
2773 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2775 /* Couldn't lock the AGF so skip this AG. */
2776 if (error == -EAGAIN)
2782 /* reset a padding mismatched agfl before final free space check */
2783 if (xfs_perag_agfl_needs_reset(pag))
2784 xfs_agfl_reset(tp, agbp, pag);
2786 /* If there isn't enough total space or single-extent, reject it. */
2787 need = xfs_alloc_min_freelist(mp, pag);
2788 if (!xfs_alloc_space_available(args, need, alloc_flags))
2789 goto out_agbp_relse;
2792 if (args->alloc_minlen_only) {
2795 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2797 goto out_agbp_relse;
2801 * Make the freelist shorter if it's too long.
2803 * Note that from this point onwards, we will always release the agf and
2804 * agfl buffers on error. This handles the case where we error out and
2805 * the buffers are clean or may not have been joined to the transaction
2806 * and hence need to be released manually. If they have been joined to
2807 * the transaction, then xfs_trans_brelse() will handle them
2808 * appropriately based on the recursion count and dirty state of the
2811 * XXX (dgc): When we have lots of free space, does this buy us
2812 * anything other than extra overhead when we need to put more blocks
2813 * back on the free list? Maybe we should only do this when space is
2814 * getting low or the AGFL is more than half full?
2816 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2817 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2818 * updating the rmapbt. Both flags are used in xfs_repair while we're
2819 * rebuilding the rmapbt, and neither are used by the kernel. They're
2820 * both required to ensure that rmaps are correctly recorded for the
2821 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2822 * repair/rmap.c in xfsprogs for details.
2824 memset(&targs, 0, sizeof(targs));
2825 /* struct copy below */
2826 if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2827 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2829 targs.oinfo = XFS_RMAP_OINFO_AG;
2830 while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2831 pag->pagf_flcount > need) {
2832 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2834 goto out_agbp_relse;
2836 /* defer agfl frees */
2837 error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2839 goto out_agbp_relse;
2845 targs.agno = args->agno;
2846 targs.alignment = targs.minlen = targs.prod = 1;
2848 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2850 goto out_agbp_relse;
2852 /* Make the freelist longer if it's too short. */
2853 while (pag->pagf_flcount < need) {
2855 targs.maxlen = need - pag->pagf_flcount;
2856 targs.resv = XFS_AG_RESV_AGFL;
2858 /* Allocate as many blocks as possible at once. */
2859 error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2861 goto out_agflbp_relse;
2864 * Stop if we run out. Won't happen if callers are obeying
2865 * the restrictions correctly. Can happen for free calls
2866 * on a completely full ag.
2868 if (targs.agbno == NULLAGBLOCK) {
2869 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2871 goto out_agflbp_relse;
2874 if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2875 error = xfs_rmap_alloc(tp, agbp, pag,
2876 targs.agbno, targs.len, &targs.oinfo);
2878 goto out_agflbp_relse;
2880 error = xfs_alloc_update_counters(tp, agbp,
2881 -((long)(targs.len)));
2883 goto out_agflbp_relse;
2886 * Put each allocated block on the list.
2888 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2889 error = xfs_alloc_put_freelist(pag, tp, agbp,
2892 goto out_agflbp_relse;
2895 xfs_trans_brelse(tp, agflbp);
2900 xfs_trans_brelse(tp, agflbp);
2903 xfs_trans_brelse(tp, agbp);
2910 * Get a block from the freelist.
2911 * Returns with the buffer for the block gotten.
2914 xfs_alloc_get_freelist(
2915 struct xfs_perag *pag,
2916 struct xfs_trans *tp,
2917 struct xfs_buf *agbp,
2918 xfs_agblock_t *bnop,
2921 struct xfs_agf *agf = agbp->b_addr;
2922 struct xfs_buf *agflbp;
2927 struct xfs_mount *mp = tp->t_mountp;
2930 * Freelist is empty, give up.
2932 if (!agf->agf_flcount) {
2933 *bnop = NULLAGBLOCK;
2937 * Read the array of free blocks.
2939 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2945 * Get the block number and update the data structures.
2947 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2948 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2949 if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
2950 return -EFSCORRUPTED;
2952 be32_add_cpu(&agf->agf_flfirst, 1);
2953 xfs_trans_brelse(tp, agflbp);
2954 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2955 agf->agf_flfirst = 0;
2957 ASSERT(!xfs_perag_agfl_needs_reset(pag));
2958 be32_add_cpu(&agf->agf_flcount, -1);
2959 pag->pagf_flcount--;
2961 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2963 be32_add_cpu(&agf->agf_btreeblks, 1);
2964 pag->pagf_btreeblks++;
2965 logflags |= XFS_AGF_BTREEBLKS;
2968 xfs_alloc_log_agf(tp, agbp, logflags);
2975 * Log the given fields from the agf structure.
2979 struct xfs_trans *tp,
2983 int first; /* first byte offset */
2984 int last; /* last byte offset */
2985 static const short offsets[] = {
2986 offsetof(xfs_agf_t, agf_magicnum),
2987 offsetof(xfs_agf_t, agf_versionnum),
2988 offsetof(xfs_agf_t, agf_seqno),
2989 offsetof(xfs_agf_t, agf_length),
2990 offsetof(xfs_agf_t, agf_roots[0]),
2991 offsetof(xfs_agf_t, agf_levels[0]),
2992 offsetof(xfs_agf_t, agf_flfirst),
2993 offsetof(xfs_agf_t, agf_fllast),
2994 offsetof(xfs_agf_t, agf_flcount),
2995 offsetof(xfs_agf_t, agf_freeblks),
2996 offsetof(xfs_agf_t, agf_longest),
2997 offsetof(xfs_agf_t, agf_btreeblks),
2998 offsetof(xfs_agf_t, agf_uuid),
2999 offsetof(xfs_agf_t, agf_rmap_blocks),
3000 offsetof(xfs_agf_t, agf_refcount_blocks),
3001 offsetof(xfs_agf_t, agf_refcount_root),
3002 offsetof(xfs_agf_t, agf_refcount_level),
3003 /* needed so that we don't log the whole rest of the structure: */
3004 offsetof(xfs_agf_t, agf_spare64),
3008 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3010 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3012 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3013 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3017 * Put the block on the freelist for the allocation group.
3020 xfs_alloc_put_freelist(
3021 struct xfs_perag *pag,
3022 struct xfs_trans *tp,
3023 struct xfs_buf *agbp,
3024 struct xfs_buf *agflbp,
3028 struct xfs_mount *mp = tp->t_mountp;
3029 struct xfs_agf *agf = agbp->b_addr;
3037 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3042 be32_add_cpu(&agf->agf_fllast, 1);
3043 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3044 agf->agf_fllast = 0;
3046 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3047 be32_add_cpu(&agf->agf_flcount, 1);
3048 pag->pagf_flcount++;
3050 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3052 be32_add_cpu(&agf->agf_btreeblks, -1);
3053 pag->pagf_btreeblks--;
3054 logflags |= XFS_AGF_BTREEBLKS;
3057 xfs_alloc_log_agf(tp, agbp, logflags);
3059 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3061 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3062 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3063 *blockp = cpu_to_be32(bno);
3064 startoff = (char *)blockp - (char *)agflbp->b_addr;
3066 xfs_alloc_log_agf(tp, agbp, logflags);
3068 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3069 xfs_trans_log_buf(tp, agflbp, startoff,
3070 startoff + sizeof(xfs_agblock_t) - 1);
3075 * Check that this AGF/AGI header's sequence number and length matches the AG
3076 * number and size in fsblocks.
3079 xfs_validate_ag_length(
3084 struct xfs_mount *mp = bp->b_mount;
3086 * During growfs operations, the perag is not fully initialised,
3087 * so we can't use it for any useful checking. growfs ensures we can't
3088 * use it by using uncached buffers that don't have the perag attached
3089 * so we can detect and avoid this problem.
3091 if (bp->b_pag && seqno != bp->b_pag->pag_agno)
3092 return __this_address;
3095 * Only the last AG in the filesystem is allowed to be shorter
3096 * than the AG size recorded in the superblock.
3098 if (length != mp->m_sb.sb_agblocks) {
3100 * During growfs, the new last AG can get here before we
3101 * have updated the superblock. Give it a pass on the seqno
3104 if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3105 return __this_address;
3106 if (length < XFS_MIN_AG_BLOCKS)
3107 return __this_address;
3108 if (length > mp->m_sb.sb_agblocks)
3109 return __this_address;
3116 * Verify the AGF is consistent.
3118 * We do not verify the AGFL indexes in the AGF are fully consistent here
3119 * because of issues with variable on-disk structure sizes. Instead, we check
3120 * the agfl indexes for consistency when we initialise the perag from the AGF
3121 * information after a read completes.
3123 * If the index is inconsistent, then we mark the perag as needing an AGFL
3124 * reset. The first AGFL update performed then resets the AGFL indexes and
3125 * refills the AGFL with known good free blocks, allowing the filesystem to
3126 * continue operating normally at the cost of a few leaked free space blocks.
3128 static xfs_failaddr_t
3132 struct xfs_mount *mp = bp->b_mount;
3133 struct xfs_agf *agf = bp->b_addr;
3135 uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
3136 uint32_t agf_length = be32_to_cpu(agf->agf_length);
3138 if (xfs_has_crc(mp)) {
3139 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3140 return __this_address;
3141 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3142 return __this_address;
3145 if (!xfs_verify_magic(bp, agf->agf_magicnum))
3146 return __this_address;
3148 if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3149 return __this_address;
3152 * Both agf_seqno and agf_length need to validated before anything else
3153 * block number related in the AGF or AGFL can be checked.
3155 fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3159 if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3160 return __this_address;
3161 if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3162 return __this_address;
3163 if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3164 return __this_address;
3166 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3167 be32_to_cpu(agf->agf_freeblks) > agf_length)
3168 return __this_address;
3170 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
3171 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
3172 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
3173 mp->m_alloc_maxlevels ||
3174 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
3175 mp->m_alloc_maxlevels)
3176 return __this_address;
3178 if (xfs_has_lazysbcount(mp) &&
3179 be32_to_cpu(agf->agf_btreeblks) > agf_length)
3180 return __this_address;
3182 if (xfs_has_rmapbt(mp)) {
3183 if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3184 return __this_address;
3186 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
3187 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
3188 mp->m_rmap_maxlevels)
3189 return __this_address;
3192 if (xfs_has_reflink(mp)) {
3193 if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3194 return __this_address;
3196 if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3197 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3198 return __this_address;
3205 xfs_agf_read_verify(
3208 struct xfs_mount *mp = bp->b_mount;
3211 if (xfs_has_crc(mp) &&
3212 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3213 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3215 fa = xfs_agf_verify(bp);
3216 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3217 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3222 xfs_agf_write_verify(
3225 struct xfs_mount *mp = bp->b_mount;
3226 struct xfs_buf_log_item *bip = bp->b_log_item;
3227 struct xfs_agf *agf = bp->b_addr;
3230 fa = xfs_agf_verify(bp);
3232 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3236 if (!xfs_has_crc(mp))
3240 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3242 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3245 const struct xfs_buf_ops xfs_agf_buf_ops = {
3247 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3248 .verify_read = xfs_agf_read_verify,
3249 .verify_write = xfs_agf_write_verify,
3250 .verify_struct = xfs_agf_verify,
3254 * Read in the allocation group header (free/alloc section).
3258 struct xfs_perag *pag,
3259 struct xfs_trans *tp,
3261 struct xfs_buf **agfbpp)
3263 struct xfs_mount *mp = pag->pag_mount;
3266 trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3268 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3269 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3270 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3274 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3279 * Read in the allocation group header (free/alloc section) and initialise the
3280 * perag structure if necessary. If the caller provides @agfbpp, then return the
3281 * locked buffer to the caller, otherwise free it.
3285 struct xfs_perag *pag,
3286 struct xfs_trans *tp,
3288 struct xfs_buf **agfbpp)
3290 struct xfs_buf *agfbp;
3291 struct xfs_agf *agf;
3295 trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3297 /* We don't support trylock when freeing. */
3298 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3299 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3300 error = xfs_read_agf(pag, tp,
3301 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3306 agf = agfbp->b_addr;
3307 if (!xfs_perag_initialised_agf(pag)) {
3308 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3309 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3310 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3311 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3312 pag->pagf_levels[XFS_BTNUM_BNOi] =
3313 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3314 pag->pagf_levels[XFS_BTNUM_CNTi] =
3315 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3316 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3317 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3318 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3319 if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3320 set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3322 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3325 * Update the in-core allocbt counter. Filter out the rmapbt
3326 * subset of the btreeblks counter because the rmapbt is managed
3327 * by perag reservation. Subtract one for the rmapbt root block
3328 * because the rmap counter includes it while the btreeblks
3329 * counter only tracks non-root blocks.
3331 allocbt_blks = pag->pagf_btreeblks;
3332 if (xfs_has_rmapbt(pag->pag_mount))
3333 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3334 if (allocbt_blks > 0)
3335 atomic64_add(allocbt_blks,
3336 &pag->pag_mount->m_allocbt_blks);
3338 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3341 else if (!xfs_is_shutdown(pag->pag_mount)) {
3342 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3343 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3344 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3345 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3346 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3347 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3348 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3349 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3355 xfs_trans_brelse(tp, agfbp);
3360 * Pre-proces allocation arguments to set initial state that we don't require
3361 * callers to set up correctly, as well as bounds check the allocation args
3365 xfs_alloc_vextent_check_args(
3366 struct xfs_alloc_arg *args,
3367 xfs_fsblock_t target,
3368 xfs_agnumber_t *minimum_agno)
3370 struct xfs_mount *mp = args->mp;
3371 xfs_agblock_t agsize;
3373 args->fsbno = NULLFSBLOCK;
3376 if (args->tp->t_highest_agno != NULLAGNUMBER)
3377 *minimum_agno = args->tp->t_highest_agno;
3380 * Just fix this up, for the case where the last a.g. is shorter
3381 * (or there's only one a.g.) and the caller couldn't easily figure
3382 * that out (xfs_bmap_alloc).
3384 agsize = mp->m_sb.sb_agblocks;
3385 if (args->maxlen > agsize)
3386 args->maxlen = agsize;
3387 if (args->alignment == 0)
3388 args->alignment = 1;
3390 ASSERT(args->minlen > 0);
3391 ASSERT(args->maxlen > 0);
3392 ASSERT(args->alignment > 0);
3393 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3395 ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3396 ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3397 ASSERT(args->minlen <= args->maxlen);
3398 ASSERT(args->minlen <= agsize);
3399 ASSERT(args->mod < args->prod);
3401 if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3402 XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3403 args->minlen > args->maxlen || args->minlen > agsize ||
3404 args->mod >= args->prod) {
3405 trace_xfs_alloc_vextent_badargs(args);
3409 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3410 trace_xfs_alloc_vextent_skip_deadlock(args);
3418 * Prepare an AG for allocation. If the AG is not prepared to accept the
3419 * allocation, return failure.
3421 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3422 * modified to hold their own perag references.
3425 xfs_alloc_vextent_prepare_ag(
3426 struct xfs_alloc_arg *args,
3427 uint32_t alloc_flags)
3429 bool need_pag = !args->pag;
3433 args->pag = xfs_perag_get(args->mp, args->agno);
3436 error = xfs_alloc_fix_freelist(args, alloc_flags);
3438 trace_xfs_alloc_vextent_nofix(args);
3440 xfs_perag_put(args->pag);
3441 args->agbno = NULLAGBLOCK;
3445 /* cannot allocate in this AG at all */
3446 trace_xfs_alloc_vextent_noagbp(args);
3447 args->agbno = NULLAGBLOCK;
3450 args->wasfromfl = 0;
3455 * Post-process allocation results to account for the allocation if it succeed
3456 * and set the allocated block number correctly for the caller.
3458 * XXX: we should really be returning ENOSPC for ENOSPC, not
3459 * hiding it behind a "successful" NULLFSBLOCK allocation.
3462 xfs_alloc_vextent_finish(
3463 struct xfs_alloc_arg *args,
3464 xfs_agnumber_t minimum_agno,
3468 struct xfs_mount *mp = args->mp;
3472 * We can end up here with a locked AGF. If we failed, the caller is
3473 * likely going to try to allocate again with different parameters, and
3474 * that can widen the AGs that are searched for free space. If we have
3475 * to do BMBT block allocation, we have to do a new allocation.
3477 * Hence leaving this function with the AGF locked opens up potential
3478 * ABBA AGF deadlocks because a future allocation attempt in this
3479 * transaction may attempt to lock a lower number AGF.
3481 * We can't release the AGF until the transaction is commited, so at
3482 * this point we must update the "first allocation" tracker to point at
3483 * this AG if the tracker is empty or points to a lower AG. This allows
3484 * the next allocation attempt to be modified appropriately to avoid
3488 (args->tp->t_highest_agno == NULLAGNUMBER ||
3489 args->agno > minimum_agno))
3490 args->tp->t_highest_agno = args->agno;
3493 * If the allocation failed with an error or we had an ENOSPC result,
3494 * preserve the returned error whilst also marking the allocation result
3495 * as "no extent allocated". This ensures that callers that fail to
3496 * capture the error will still treat it as a failed allocation.
3498 if (alloc_error || args->agbno == NULLAGBLOCK) {
3499 args->fsbno = NULLFSBLOCK;
3500 error = alloc_error;
3501 goto out_drop_perag;
3504 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3506 ASSERT(args->len >= args->minlen);
3507 ASSERT(args->len <= args->maxlen);
3508 ASSERT(args->agbno % args->alignment == 0);
3509 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3511 /* if not file data, insert new block into the reverse map btree */
3512 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3513 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3514 args->agbno, args->len, &args->oinfo);
3516 goto out_drop_perag;
3519 if (!args->wasfromfl) {
3520 error = xfs_alloc_update_counters(args->tp, args->agbp,
3521 -((long)(args->len)));
3523 goto out_drop_perag;
3525 ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3529 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3531 XFS_STATS_INC(mp, xs_allocx);
3532 XFS_STATS_ADD(mp, xs_allocb, args->len);
3534 trace_xfs_alloc_vextent_finish(args);
3537 if (drop_perag && args->pag) {
3538 xfs_perag_rele(args->pag);
3545 * Allocate within a single AG only. This uses a best-fit length algorithm so if
3546 * you need an exact sized allocation without locality constraints, this is the
3547 * fastest way to do it.
3549 * Caller is expected to hold a perag reference in args->pag.
3552 xfs_alloc_vextent_this_ag(
3553 struct xfs_alloc_arg *args,
3554 xfs_agnumber_t agno)
3556 struct xfs_mount *mp = args->mp;
3557 xfs_agnumber_t minimum_agno;
3558 uint32_t alloc_flags = 0;
3561 ASSERT(args->pag != NULL);
3562 ASSERT(args->pag->pag_agno == agno);
3567 trace_xfs_alloc_vextent_this_ag(args);
3569 error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3572 if (error == -ENOSPC)
3577 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3578 if (!error && args->agbp)
3579 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3581 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3585 * Iterate all AGs trying to allocate an extent starting from @start_ag.
3587 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3588 * allocation attempts in @start_agno have locality information. If we fail to
3589 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3590 * we attempt to allocation in as there is no locality optimisation possible for
3591 * those allocations.
3593 * On return, args->pag may be left referenced if we finish before the "all
3594 * failed" return point. The allocation finish still needs the perag, and
3595 * so the caller will release it once they've finished the allocation.
3597 * When we wrap the AG iteration at the end of the filesystem, we have to be
3598 * careful not to wrap into AGs below ones we already have locked in the
3599 * transaction if we are doing a blocking iteration. This will result in an
3600 * out-of-order locking of AGFs and hence can cause deadlocks.
3603 xfs_alloc_vextent_iterate_ags(
3604 struct xfs_alloc_arg *args,
3605 xfs_agnumber_t minimum_agno,
3606 xfs_agnumber_t start_agno,
3607 xfs_agblock_t target_agbno,
3608 uint32_t alloc_flags)
3610 struct xfs_mount *mp = args->mp;
3611 xfs_agnumber_t restart_agno = minimum_agno;
3612 xfs_agnumber_t agno;
3615 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3618 for_each_perag_wrap_range(mp, start_agno, restart_agno,
3619 mp->m_sb.sb_agcount, agno, args->pag) {
3621 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3625 trace_xfs_alloc_vextent_loopfailed(args);
3630 * Allocation is supposed to succeed now, so break out of the
3631 * loop regardless of whether we succeed or not.
3633 if (args->agno == start_agno && target_agbno) {
3634 args->agbno = target_agbno;
3635 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3638 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3643 xfs_perag_rele(args->pag);
3651 * We didn't find an AG we can alloation from. If we were given
3652 * constraining flags by the caller, drop them and retry the allocation
3653 * without any constraints being set.
3655 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3656 alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3657 restart_agno = minimum_agno;
3661 ASSERT(args->pag == NULL);
3662 trace_xfs_alloc_vextent_allfailed(args);
3667 * Iterate from the AGs from the start AG to the end of the filesystem, trying
3668 * to allocate blocks. It starts with a near allocation attempt in the initial
3669 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3670 * back to zero if allowed by previous allocations in this transaction,
3671 * otherwise will wrap back to the start AG and run a second blocking pass to
3672 * the end of the filesystem.
3675 xfs_alloc_vextent_start_ag(
3676 struct xfs_alloc_arg *args,
3677 xfs_fsblock_t target)
3679 struct xfs_mount *mp = args->mp;
3680 xfs_agnumber_t minimum_agno;
3681 xfs_agnumber_t start_agno;
3682 xfs_agnumber_t rotorstep = xfs_rotorstep;
3683 bool bump_rotor = false;
3684 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3687 ASSERT(args->pag == NULL);
3689 args->agno = NULLAGNUMBER;
3690 args->agbno = NULLAGBLOCK;
3692 trace_xfs_alloc_vextent_start_ag(args);
3694 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3696 if (error == -ENOSPC)
3701 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3702 xfs_is_inode32(mp)) {
3703 target = XFS_AGB_TO_FSB(mp,
3704 ((mp->m_agfrotor / rotorstep) %
3705 mp->m_sb.sb_agcount), 0);
3709 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3710 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3711 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3714 if (args->agno == start_agno)
3715 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3716 (mp->m_sb.sb_agcount * rotorstep);
3718 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3719 (mp->m_sb.sb_agcount * rotorstep);
3722 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3726 * Iterate from the agno indicated via @target through to the end of the
3727 * filesystem attempting blocking allocation. This does not wrap or try a second
3728 * pass, so will not recurse into AGs lower than indicated by the target.
3731 xfs_alloc_vextent_first_ag(
3732 struct xfs_alloc_arg *args,
3733 xfs_fsblock_t target)
3735 struct xfs_mount *mp = args->mp;
3736 xfs_agnumber_t minimum_agno;
3737 xfs_agnumber_t start_agno;
3738 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3741 ASSERT(args->pag == NULL);
3743 args->agno = NULLAGNUMBER;
3744 args->agbno = NULLAGBLOCK;
3746 trace_xfs_alloc_vextent_first_ag(args);
3748 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3750 if (error == -ENOSPC)
3755 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3756 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3757 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3758 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3762 * Allocate at the exact block target or fail. Caller is expected to hold a
3763 * perag reference in args->pag.
3766 xfs_alloc_vextent_exact_bno(
3767 struct xfs_alloc_arg *args,
3768 xfs_fsblock_t target)
3770 struct xfs_mount *mp = args->mp;
3771 xfs_agnumber_t minimum_agno;
3774 ASSERT(args->pag != NULL);
3775 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3777 args->agno = XFS_FSB_TO_AGNO(mp, target);
3778 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3780 trace_xfs_alloc_vextent_exact_bno(args);
3782 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3784 if (error == -ENOSPC)
3789 error = xfs_alloc_vextent_prepare_ag(args, 0);
3790 if (!error && args->agbp)
3791 error = xfs_alloc_ag_vextent_exact(args);
3793 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3797 * Allocate an extent as close to the target as possible. If there are not
3798 * viable candidates in the AG, then fail the allocation.
3800 * Caller may or may not have a per-ag reference in args->pag.
3803 xfs_alloc_vextent_near_bno(
3804 struct xfs_alloc_arg *args,
3805 xfs_fsblock_t target)
3807 struct xfs_mount *mp = args->mp;
3808 xfs_agnumber_t minimum_agno;
3809 bool needs_perag = args->pag == NULL;
3810 uint32_t alloc_flags = 0;
3814 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3816 args->agno = XFS_FSB_TO_AGNO(mp, target);
3817 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3819 trace_xfs_alloc_vextent_near_bno(args);
3821 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3823 if (error == -ENOSPC)
3829 args->pag = xfs_perag_grab(mp, args->agno);
3831 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3832 if (!error && args->agbp)
3833 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3835 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3838 /* Ensure that the freelist is at full capacity. */
3840 xfs_free_extent_fix_freelist(
3841 struct xfs_trans *tp,
3842 struct xfs_perag *pag,
3843 struct xfs_buf **agbp)
3845 struct xfs_alloc_arg args;
3848 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3850 args.mp = tp->t_mountp;
3851 args.agno = pag->pag_agno;
3855 * validate that the block number is legal - the enables us to detect
3856 * and handle a silent filesystem corruption rather than crashing.
3858 if (args.agno >= args.mp->m_sb.sb_agcount)
3859 return -EFSCORRUPTED;
3861 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3871 * Just break up the extent address and hand off to xfs_free_ag_extent
3872 * after fixing up the freelist.
3876 struct xfs_trans *tp,
3877 struct xfs_perag *pag,
3878 xfs_agblock_t agbno,
3880 const struct xfs_owner_info *oinfo,
3881 enum xfs_ag_resv_type type,
3884 struct xfs_mount *mp = tp->t_mountp;
3885 struct xfs_buf *agbp;
3886 struct xfs_agf *agf;
3888 unsigned int busy_flags = 0;
3891 ASSERT(type != XFS_AG_RESV_AGFL);
3893 if (XFS_TEST_ERROR(false, mp,
3894 XFS_ERRTAG_FREE_EXTENT))
3897 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3902 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3903 error = -EFSCORRUPTED;
3907 /* validate the extent size is legal now we have the agf locked */
3908 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3909 error = -EFSCORRUPTED;
3913 error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
3919 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3920 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3924 xfs_trans_brelse(tp, agbp);
3928 struct xfs_alloc_query_range_info {
3929 xfs_alloc_query_range_fn fn;
3933 /* Format btree record and pass to our callback. */
3935 xfs_alloc_query_range_helper(
3936 struct xfs_btree_cur *cur,
3937 const union xfs_btree_rec *rec,
3940 struct xfs_alloc_query_range_info *query = priv;
3941 struct xfs_alloc_rec_incore irec;
3944 xfs_alloc_btrec_to_irec(rec, &irec);
3945 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
3947 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
3949 return query->fn(cur, &irec, query->priv);
3952 /* Find all free space within a given range of blocks. */
3954 xfs_alloc_query_range(
3955 struct xfs_btree_cur *cur,
3956 const struct xfs_alloc_rec_incore *low_rec,
3957 const struct xfs_alloc_rec_incore *high_rec,
3958 xfs_alloc_query_range_fn fn,
3961 union xfs_btree_irec low_brec = { .a = *low_rec };
3962 union xfs_btree_irec high_brec = { .a = *high_rec };
3963 struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
3965 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3966 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3967 xfs_alloc_query_range_helper, &query);
3970 /* Find all free space records. */
3972 xfs_alloc_query_all(
3973 struct xfs_btree_cur *cur,
3974 xfs_alloc_query_range_fn fn,
3977 struct xfs_alloc_query_range_info query;
3979 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3982 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3986 * Scan part of the keyspace of the free space and tell us if the area has no
3987 * records, is fully mapped by records, or is partially filled.
3990 xfs_alloc_has_records(
3991 struct xfs_btree_cur *cur,
3994 enum xbtree_recpacking *outcome)
3996 union xfs_btree_irec low;
3997 union xfs_btree_irec high;
3999 memset(&low, 0, sizeof(low));
4000 low.a.ar_startblock = bno;
4001 memset(&high, 0xFF, sizeof(high));
4002 high.a.ar_startblock = bno + len - 1;
4004 return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4008 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
4009 * error code or XFS_ITER_*.
4013 struct xfs_mount *mp,
4014 struct xfs_agf *agf,
4015 struct xfs_buf *agflbp,
4016 xfs_agfl_walk_fn walk_fn,
4023 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4024 i = be32_to_cpu(agf->agf_flfirst);
4026 /* Nothing to walk in an empty AGFL. */
4027 if (agf->agf_flcount == cpu_to_be32(0))
4030 /* Otherwise, walk from first to last, wrapping as needed. */
4032 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4035 if (i == be32_to_cpu(agf->agf_fllast))
4037 if (++i == xfs_agfl_size(mp))
4045 xfs_extfree_intent_init_cache(void)
4047 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4048 sizeof(struct xfs_extent_free_item),
4051 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4055 xfs_extfree_intent_destroy_cache(void)
4057 kmem_cache_destroy(xfs_extfree_item_cache);
4058 xfs_extfree_item_cache = NULL;