1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_ialloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
22 #include "xfs_trans.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_icreate_item.h"
25 #include "xfs_icache.h"
26 #include "xfs_trace.h"
32 * Lookup a record by ino in the btree given by cur.
36 struct xfs_btree_cur *cur, /* btree cursor */
37 xfs_agino_t ino, /* starting inode of chunk */
38 xfs_lookup_t dir, /* <=, >=, == */
39 int *stat) /* success/failure */
41 cur->bc_rec.i.ir_startino = ino;
42 cur->bc_rec.i.ir_holemask = 0;
43 cur->bc_rec.i.ir_count = 0;
44 cur->bc_rec.i.ir_freecount = 0;
45 cur->bc_rec.i.ir_free = 0;
46 return xfs_btree_lookup(cur, dir, stat);
50 * Update the record referred to by cur to the value given.
51 * This either works (return 0) or gets an EFSCORRUPTED error.
53 STATIC int /* error */
55 struct xfs_btree_cur *cur, /* btree cursor */
56 xfs_inobt_rec_incore_t *irec) /* btree record */
58 union xfs_btree_rec rec;
60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
61 if (xfs_has_sparseinodes(cur->bc_mp)) {
62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
63 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
66 /* ir_holemask/ir_count not supported on-disk */
67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
70 return xfs_btree_update(cur, &rec);
73 /* Convert on-disk btree record to incore inobt record. */
75 xfs_inobt_btrec_to_irec(
77 const union xfs_btree_rec *rec,
78 struct xfs_inobt_rec_incore *irec)
80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
81 if (xfs_has_sparseinodes(mp)) {
82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
83 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
88 * values for full inode chunks.
90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
91 irec->ir_count = XFS_INODES_PER_CHUNK;
93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
98 /* Compute the freecount of an incore inode record. */
100 xfs_inobt_rec_freecount(
101 const struct xfs_inobt_rec_incore *irec)
103 uint64_t realfree = irec->ir_free;
105 if (xfs_inobt_issparse(irec->ir_holemask))
106 realfree &= xfs_inobt_irec_to_allocmask(irec);
107 return hweight64(realfree);
110 /* Simple checks for inode records. */
112 xfs_inobt_check_irec(
113 struct xfs_perag *pag,
114 const struct xfs_inobt_rec_incore *irec)
116 /* Record has to be properly aligned within the AG. */
117 if (!xfs_verify_agino(pag, irec->ir_startino))
118 return __this_address;
119 if (!xfs_verify_agino(pag,
120 irec->ir_startino + XFS_INODES_PER_CHUNK - 1))
121 return __this_address;
122 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
123 irec->ir_count > XFS_INODES_PER_CHUNK)
124 return __this_address;
125 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
126 return __this_address;
128 if (xfs_inobt_rec_freecount(irec) != irec->ir_freecount)
129 return __this_address;
135 xfs_inobt_complain_bad_rec(
136 struct xfs_btree_cur *cur,
138 const struct xfs_inobt_rec_incore *irec)
140 struct xfs_mount *mp = cur->bc_mp;
143 "%s Inode BTree record corruption in AG %d detected at %pS!",
144 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free",
145 cur->bc_ag.pag->pag_agno, fa);
147 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
148 irec->ir_startino, irec->ir_count, irec->ir_freecount,
149 irec->ir_free, irec->ir_holemask);
150 return -EFSCORRUPTED;
154 * Get the data from the pointed-to record.
158 struct xfs_btree_cur *cur,
159 struct xfs_inobt_rec_incore *irec,
162 struct xfs_mount *mp = cur->bc_mp;
163 union xfs_btree_rec *rec;
167 error = xfs_btree_get_rec(cur, &rec, stat);
168 if (error || *stat == 0)
171 xfs_inobt_btrec_to_irec(mp, rec, irec);
172 fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec);
174 return xfs_inobt_complain_bad_rec(cur, fa, irec);
180 * Insert a single inobt record. Cursor must already point to desired location.
183 xfs_inobt_insert_rec(
184 struct xfs_btree_cur *cur,
191 cur->bc_rec.i.ir_holemask = holemask;
192 cur->bc_rec.i.ir_count = count;
193 cur->bc_rec.i.ir_freecount = freecount;
194 cur->bc_rec.i.ir_free = free;
195 return xfs_btree_insert(cur, stat);
199 * Insert records describing a newly allocated inode chunk into the inobt.
203 struct xfs_perag *pag,
204 struct xfs_trans *tp,
205 struct xfs_buf *agbp,
210 struct xfs_btree_cur *cur;
215 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum);
217 for (thisino = newino;
218 thisino < newino + newlen;
219 thisino += XFS_INODES_PER_CHUNK) {
220 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
222 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
227 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
228 XFS_INODES_PER_CHUNK,
229 XFS_INODES_PER_CHUNK,
230 XFS_INOBT_ALL_FREE, &i);
232 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
238 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
244 * Verify that the number of free inodes in the AGI is correct.
248 xfs_check_agi_freecount(
249 struct xfs_btree_cur *cur)
251 if (cur->bc_nlevels == 1) {
252 xfs_inobt_rec_incore_t rec;
257 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
262 error = xfs_inobt_get_rec(cur, &rec, &i);
267 freecount += rec.ir_freecount;
268 error = xfs_btree_increment(cur, 0, &i);
274 if (!xfs_is_shutdown(cur->bc_mp))
275 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
280 #define xfs_check_agi_freecount(cur) 0
284 * Initialise a new set of inodes. When called without a transaction context
285 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
286 * than logging them (which in a transaction context puts them into the AIL
287 * for writeback rather than the xfsbufd queue).
290 xfs_ialloc_inode_init(
291 struct xfs_mount *mp,
292 struct xfs_trans *tp,
293 struct list_head *buffer_list,
297 xfs_agblock_t length,
300 struct xfs_buf *fbuf;
301 struct xfs_dinode *free;
310 * Loop over the new block(s), filling in the inodes. For small block
311 * sizes, manipulate the inodes in buffers which are multiples of the
314 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
317 * Figure out what version number to use in the inodes we create. If
318 * the superblock version has caught up to the one that supports the new
319 * inode format, then use the new inode version. Otherwise use the old
320 * version so that old kernels will continue to be able to use the file
323 * For v3 inodes, we also need to write the inode number into the inode,
324 * so calculate the first inode number of the chunk here as
325 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
326 * across multiple filesystem blocks (such as a cluster) and so cannot
327 * be used in the cluster buffer loop below.
329 * Further, because we are writing the inode directly into the buffer
330 * and calculating a CRC on the entire inode, we have ot log the entire
331 * inode so that the entire range the CRC covers is present in the log.
332 * That means for v3 inode we log the entire buffer rather than just the
335 if (xfs_has_v3inodes(mp)) {
337 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
340 * log the initialisation that is about to take place as an
341 * logical operation. This means the transaction does not
342 * need to log the physical changes to the inode buffers as log
343 * recovery will know what initialisation is actually needed.
344 * Hence we only need to log the buffers as "ordered" buffers so
345 * they track in the AIL as if they were physically logged.
348 xfs_icreate_log(tp, agno, agbno, icount,
349 mp->m_sb.sb_inodesize, length, gen);
353 for (j = 0; j < nbufs; j++) {
357 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
358 (j * M_IGEO(mp)->blocks_per_cluster));
359 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
360 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
361 XBF_UNMAPPED, &fbuf);
365 /* Initialize the inode buffers and log them appropriately. */
366 fbuf->b_ops = &xfs_inode_buf_ops;
367 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
368 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
369 int ioffset = i << mp->m_sb.sb_inodelog;
371 free = xfs_make_iptr(mp, fbuf, i);
372 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
373 free->di_version = version;
374 free->di_gen = cpu_to_be32(gen);
375 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
378 free->di_ino = cpu_to_be64(ino);
380 uuid_copy(&free->di_uuid,
381 &mp->m_sb.sb_meta_uuid);
382 xfs_dinode_calc_crc(mp, free);
384 /* just log the inode core */
385 xfs_trans_log_buf(tp, fbuf, ioffset,
386 ioffset + XFS_DINODE_SIZE(mp) - 1);
392 * Mark the buffer as an inode allocation buffer so it
393 * sticks in AIL at the point of this allocation
394 * transaction. This ensures the they are on disk before
395 * the tail of the log can be moved past this
396 * transaction (i.e. by preventing relogging from moving
397 * it forward in the log).
399 xfs_trans_inode_alloc_buf(tp, fbuf);
402 * Mark the buffer as ordered so that they are
403 * not physically logged in the transaction but
404 * still tracked in the AIL as part of the
405 * transaction and pin the log appropriately.
407 xfs_trans_ordered_buf(tp, fbuf);
410 fbuf->b_flags |= XBF_DONE;
411 xfs_buf_delwri_queue(fbuf, buffer_list);
419 * Align startino and allocmask for a recently allocated sparse chunk such that
420 * they are fit for insertion (or merge) into the on-disk inode btrees.
424 * When enabled, sparse inode support increases the inode alignment from cluster
425 * size to inode chunk size. This means that the minimum range between two
426 * non-adjacent inode records in the inobt is large enough for a full inode
427 * record. This allows for cluster sized, cluster aligned block allocation
428 * without need to worry about whether the resulting inode record overlaps with
429 * another record in the tree. Without this basic rule, we would have to deal
430 * with the consequences of overlap by potentially undoing recent allocations in
431 * the inode allocation codepath.
433 * Because of this alignment rule (which is enforced on mount), there are two
434 * inobt possibilities for newly allocated sparse chunks. One is that the
435 * aligned inode record for the chunk covers a range of inodes not already
436 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
437 * other is that a record already exists at the aligned startino that considers
438 * the newly allocated range as sparse. In the latter case, record content is
439 * merged in hope that sparse inode chunks fill to full chunks over time.
442 xfs_align_sparse_ino(
443 struct xfs_mount *mp,
444 xfs_agino_t *startino,
451 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
452 mod = agbno % mp->m_sb.sb_inoalignmt;
456 /* calculate the inode offset and align startino */
457 offset = XFS_AGB_TO_AGINO(mp, mod);
461 * Since startino has been aligned down, left shift allocmask such that
462 * it continues to represent the same physical inodes relative to the
465 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
469 * Determine whether the source inode record can merge into the target. Both
470 * records must be sparse, the inode ranges must match and there must be no
471 * allocation overlap between the records.
474 __xfs_inobt_can_merge(
475 struct xfs_inobt_rec_incore *trec, /* tgt record */
476 struct xfs_inobt_rec_incore *srec) /* src record */
481 /* records must cover the same inode range */
482 if (trec->ir_startino != srec->ir_startino)
485 /* both records must be sparse */
486 if (!xfs_inobt_issparse(trec->ir_holemask) ||
487 !xfs_inobt_issparse(srec->ir_holemask))
490 /* both records must track some inodes */
491 if (!trec->ir_count || !srec->ir_count)
494 /* can't exceed capacity of a full record */
495 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
498 /* verify there is no allocation overlap */
499 talloc = xfs_inobt_irec_to_allocmask(trec);
500 salloc = xfs_inobt_irec_to_allocmask(srec);
508 * Merge the source inode record into the target. The caller must call
509 * __xfs_inobt_can_merge() to ensure the merge is valid.
512 __xfs_inobt_rec_merge(
513 struct xfs_inobt_rec_incore *trec, /* target */
514 struct xfs_inobt_rec_incore *srec) /* src */
516 ASSERT(trec->ir_startino == srec->ir_startino);
518 /* combine the counts */
519 trec->ir_count += srec->ir_count;
520 trec->ir_freecount += srec->ir_freecount;
523 * Merge the holemask and free mask. For both fields, 0 bits refer to
524 * allocated inodes. We combine the allocated ranges with bitwise AND.
526 trec->ir_holemask &= srec->ir_holemask;
527 trec->ir_free &= srec->ir_free;
531 * Insert a new sparse inode chunk into the associated inode btree. The inode
532 * record for the sparse chunk is pre-aligned to a startino that should match
533 * any pre-existing sparse inode record in the tree. This allows sparse chunks
536 * This function supports two modes of handling preexisting records depending on
537 * the merge flag. If merge is true, the provided record is merged with the
538 * existing record and updated in place. The merged record is returned in nrec.
539 * If merge is false, an existing record is replaced with the provided record.
540 * If no preexisting record exists, the provided record is always inserted.
542 * It is considered corruption if a merge is requested and not possible. Given
543 * the sparse inode alignment constraints, this should never happen.
546 xfs_inobt_insert_sprec(
547 struct xfs_perag *pag,
548 struct xfs_trans *tp,
549 struct xfs_buf *agbp,
551 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
552 bool merge) /* merge or replace */
554 struct xfs_mount *mp = pag->pag_mount;
555 struct xfs_btree_cur *cur;
558 struct xfs_inobt_rec_incore rec;
560 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum);
562 /* the new record is pre-aligned so we know where to look */
563 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
566 /* if nothing there, insert a new record and return */
568 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
569 nrec->ir_count, nrec->ir_freecount,
573 if (XFS_IS_CORRUPT(mp, i != 1)) {
574 error = -EFSCORRUPTED;
582 * A record exists at this startino. Merge or replace the record
583 * depending on what we've been asked to do.
586 error = xfs_inobt_get_rec(cur, &rec, &i);
589 if (XFS_IS_CORRUPT(mp, i != 1)) {
590 error = -EFSCORRUPTED;
593 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
594 error = -EFSCORRUPTED;
599 * This should never fail. If we have coexisting records that
600 * cannot merge, something is seriously wrong.
602 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
603 error = -EFSCORRUPTED;
607 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
608 rec.ir_holemask, nrec->ir_startino,
611 /* merge to nrec to output the updated record */
612 __xfs_inobt_rec_merge(nrec, &rec);
614 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
617 error = xfs_inobt_rec_check_count(mp, nrec);
622 error = xfs_inobt_update(cur, nrec);
627 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
630 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
635 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
636 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
637 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
638 * inode count threshold, or the usual negative error code for other errors.
642 struct xfs_perag *pag,
643 struct xfs_trans *tp,
644 struct xfs_buf *agbp)
647 struct xfs_alloc_arg args;
649 xfs_agino_t newino; /* new first inode's number */
650 xfs_agino_t newlen; /* new number of inodes */
651 int isaligned = 0; /* inode allocation at stripe */
653 /* init. to full chunk */
654 struct xfs_inobt_rec_incore rec;
655 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
656 uint16_t allocmask = (uint16_t) -1;
659 memset(&args, 0, sizeof(args));
661 args.mp = tp->t_mountp;
662 args.fsbno = NULLFSBLOCK;
663 args.oinfo = XFS_RMAP_OINFO_INODES;
667 /* randomly do sparse inode allocations */
668 if (xfs_has_sparseinodes(tp->t_mountp) &&
669 igeo->ialloc_min_blks < igeo->ialloc_blks)
670 do_sparse = get_random_u32_below(2);
674 * Locking will ensure that we don't have two callers in here
677 newlen = igeo->ialloc_inos;
678 if (igeo->maxicount &&
679 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
682 args.minlen = args.maxlen = igeo->ialloc_blks;
684 * First try to allocate inodes contiguous with the last-allocated
685 * chunk of inodes. If the filesystem is striped, this will fill
686 * an entire stripe unit with inodes.
689 newino = be32_to_cpu(agi->agi_newino);
690 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
694 if (likely(newino != NULLAGINO &&
695 (args.agbno < be32_to_cpu(agi->agi_length)))) {
699 * We need to take into account alignment here to ensure that
700 * we don't modify the free list if we fail to have an exact
701 * block. If we don't have an exact match, and every oher
702 * attempt allocation attempt fails, we'll end up cancelling
703 * a dirty transaction and shutting down.
705 * For an exact allocation, alignment must be 1,
706 * however we need to take cluster alignment into account when
707 * fixing up the freelist. Use the minalignslop field to
708 * indicate that extra blocks might be required for alignment,
709 * but not to use them in the actual exact allocation.
712 args.minalignslop = igeo->cluster_align - 1;
714 /* Allow space for the inode btree to split. */
715 args.minleft = igeo->inobt_maxlevels;
716 error = xfs_alloc_vextent_exact_bno(&args,
717 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
723 * This request might have dirtied the transaction if the AG can
724 * satisfy the request, but the exact block was not available.
725 * If the allocation did fail, subsequent requests will relax
726 * the exact agbno requirement and increase the alignment
727 * instead. It is critical that the total size of the request
728 * (len + alignment + slop) does not increase from this point
729 * on, so reset minalignslop to ensure it is not included in
730 * subsequent requests.
732 args.minalignslop = 0;
735 if (unlikely(args.fsbno == NULLFSBLOCK)) {
737 * Set the alignment for the allocation.
738 * If stripe alignment is turned on then align at stripe unit
740 * If the cluster size is smaller than a filesystem block
741 * then we're doing I/O for inodes in filesystem block size
742 * pieces, so don't need alignment anyway.
745 if (igeo->ialloc_align) {
746 ASSERT(!xfs_has_noalign(args.mp));
747 args.alignment = args.mp->m_dalign;
750 args.alignment = igeo->cluster_align;
752 * Allocate a fixed-size extent of inodes.
756 * Allow space for the inode btree to split.
758 args.minleft = igeo->inobt_maxlevels;
759 error = xfs_alloc_vextent_near_bno(&args,
760 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
761 be32_to_cpu(agi->agi_root)));
767 * If stripe alignment is turned on, then try again with cluster
770 if (isaligned && args.fsbno == NULLFSBLOCK) {
771 args.alignment = igeo->cluster_align;
772 error = xfs_alloc_vextent_near_bno(&args,
773 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
774 be32_to_cpu(agi->agi_root)));
780 * Finally, try a sparse allocation if the filesystem supports it and
781 * the sparse allocation length is smaller than a full chunk.
783 if (xfs_has_sparseinodes(args.mp) &&
784 igeo->ialloc_min_blks < igeo->ialloc_blks &&
785 args.fsbno == NULLFSBLOCK) {
787 args.alignment = args.mp->m_sb.sb_spino_align;
790 args.minlen = igeo->ialloc_min_blks;
791 args.maxlen = args.minlen;
794 * The inode record will be aligned to full chunk size. We must
795 * prevent sparse allocation from AG boundaries that result in
796 * invalid inode records, such as records that start at agbno 0
797 * or extend beyond the AG.
799 * Set min agbno to the first aligned, non-zero agbno and max to
800 * the last aligned agbno that is at least one full chunk from
803 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
804 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
805 args.mp->m_sb.sb_inoalignmt) -
808 error = xfs_alloc_vextent_near_bno(&args,
809 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
810 be32_to_cpu(agi->agi_root)));
814 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
815 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
816 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
819 if (args.fsbno == NULLFSBLOCK)
822 ASSERT(args.len == args.minlen);
825 * Stamp and write the inode buffers.
827 * Seed the new inode cluster with a random generation number. This
828 * prevents short-term reuse of generation numbers if a chunk is
829 * freed and then immediately reallocated. We use random numbers
830 * rather than a linear progression to prevent the next generation
831 * number from being easily guessable.
833 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
834 args.agbno, args.len, get_random_u32());
839 * Convert the results.
841 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
843 if (xfs_inobt_issparse(~allocmask)) {
845 * We've allocated a sparse chunk. Align the startino and mask.
847 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
849 rec.ir_startino = newino;
850 rec.ir_holemask = ~allocmask;
851 rec.ir_count = newlen;
852 rec.ir_freecount = newlen;
853 rec.ir_free = XFS_INOBT_ALL_FREE;
856 * Insert the sparse record into the inobt and allow for a merge
857 * if necessary. If a merge does occur, rec is updated to the
860 error = xfs_inobt_insert_sprec(pag, tp, agbp,
861 XFS_BTNUM_INO, &rec, true);
862 if (error == -EFSCORRUPTED) {
864 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
865 XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
867 rec.ir_holemask, rec.ir_count);
868 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
874 * We can't merge the part we've just allocated as for the inobt
875 * due to finobt semantics. The original record may or may not
876 * exist independent of whether physical inodes exist in this
879 * We must update the finobt record based on the inobt record.
880 * rec contains the fully merged and up to date inobt record
881 * from the previous call. Set merge false to replace any
882 * existing record with this one.
884 if (xfs_has_finobt(args.mp)) {
885 error = xfs_inobt_insert_sprec(pag, tp, agbp,
886 XFS_BTNUM_FINO, &rec, false);
891 /* full chunk - insert new records to both btrees */
892 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen,
897 if (xfs_has_finobt(args.mp)) {
898 error = xfs_inobt_insert(pag, tp, agbp, newino,
899 newlen, XFS_BTNUM_FINO);
906 * Update AGI counts and newino.
908 be32_add_cpu(&agi->agi_count, newlen);
909 be32_add_cpu(&agi->agi_freecount, newlen);
910 pag->pagi_freecount += newlen;
911 pag->pagi_count += newlen;
912 agi->agi_newino = cpu_to_be32(newino);
915 * Log allocation group header fields
917 xfs_ialloc_log_agi(tp, agbp,
918 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
920 * Modify/log superblock values for inode count and inode free count.
922 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
923 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
928 * Try to retrieve the next record to the left/right from the current one.
932 struct xfs_btree_cur *cur,
933 xfs_inobt_rec_incore_t *rec,
941 error = xfs_btree_decrement(cur, 0, &i);
943 error = xfs_btree_increment(cur, 0, &i);
949 error = xfs_inobt_get_rec(cur, rec, &i);
952 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
953 return -EFSCORRUPTED;
961 struct xfs_btree_cur *cur,
963 xfs_inobt_rec_incore_t *rec,
969 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
974 error = xfs_inobt_get_rec(cur, rec, &i);
977 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
978 return -EFSCORRUPTED;
985 * Return the offset of the first free inode in the record. If the inode chunk
986 * is sparsely allocated, we convert the record holemask to inode granularity
987 * and mask off the unallocated regions from the inode free mask.
990 xfs_inobt_first_free_inode(
991 struct xfs_inobt_rec_incore *rec)
993 xfs_inofree_t realfree;
995 /* if there are no holes, return the first available offset */
996 if (!xfs_inobt_issparse(rec->ir_holemask))
997 return xfs_lowbit64(rec->ir_free);
999 realfree = xfs_inobt_irec_to_allocmask(rec);
1000 realfree &= rec->ir_free;
1002 return xfs_lowbit64(realfree);
1006 * Allocate an inode using the inobt-only algorithm.
1009 xfs_dialloc_ag_inobt(
1010 struct xfs_perag *pag,
1011 struct xfs_trans *tp,
1012 struct xfs_buf *agbp,
1016 struct xfs_mount *mp = tp->t_mountp;
1017 struct xfs_agi *agi = agbp->b_addr;
1018 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1019 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1020 struct xfs_btree_cur *cur, *tcur;
1021 struct xfs_inobt_rec_incore rec, trec;
1026 int searchdistance = 10;
1028 ASSERT(xfs_perag_initialised_agi(pag));
1029 ASSERT(xfs_perag_allows_inodes(pag));
1030 ASSERT(pag->pagi_freecount > 0);
1033 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO);
1035 * If pagino is 0 (this is the root inode allocation) use newino.
1036 * This must work because we've just allocated some.
1039 pagino = be32_to_cpu(agi->agi_newino);
1041 error = xfs_check_agi_freecount(cur);
1046 * If in the same AG as the parent, try to get near the parent.
1048 if (pagno == pag->pag_agno) {
1049 int doneleft; /* done, to the left */
1050 int doneright; /* done, to the right */
1052 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1055 if (XFS_IS_CORRUPT(mp, i != 1)) {
1056 error = -EFSCORRUPTED;
1060 error = xfs_inobt_get_rec(cur, &rec, &j);
1063 if (XFS_IS_CORRUPT(mp, j != 1)) {
1064 error = -EFSCORRUPTED;
1068 if (rec.ir_freecount > 0) {
1070 * Found a free inode in the same chunk
1071 * as the parent, done.
1078 * In the same AG as parent, but parent's chunk is full.
1081 /* duplicate the cursor, search left & right simultaneously */
1082 error = xfs_btree_dup_cursor(cur, &tcur);
1087 * Skip to last blocks looked up if same parent inode.
1089 if (pagino != NULLAGINO &&
1090 pag->pagl_pagino == pagino &&
1091 pag->pagl_leftrec != NULLAGINO &&
1092 pag->pagl_rightrec != NULLAGINO) {
1093 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1098 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1103 /* search left with tcur, back up 1 record */
1104 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1108 /* search right with cur, go forward 1 record. */
1109 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1115 * Loop until we find an inode chunk with a free inode.
1117 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1118 int useleft; /* using left inode chunk this time */
1120 /* figure out the closer block if both are valid. */
1121 if (!doneleft && !doneright) {
1123 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1124 rec.ir_startino - pagino;
1126 useleft = !doneleft;
1129 /* free inodes to the left? */
1130 if (useleft && trec.ir_freecount) {
1131 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1134 pag->pagl_leftrec = trec.ir_startino;
1135 pag->pagl_rightrec = rec.ir_startino;
1136 pag->pagl_pagino = pagino;
1141 /* free inodes to the right? */
1142 if (!useleft && rec.ir_freecount) {
1143 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1145 pag->pagl_leftrec = trec.ir_startino;
1146 pag->pagl_rightrec = rec.ir_startino;
1147 pag->pagl_pagino = pagino;
1151 /* get next record to check */
1153 error = xfs_ialloc_next_rec(tcur, &trec,
1156 error = xfs_ialloc_next_rec(cur, &rec,
1163 if (searchdistance <= 0) {
1165 * Not in range - save last search
1166 * location and allocate a new inode
1168 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1169 pag->pagl_leftrec = trec.ir_startino;
1170 pag->pagl_rightrec = rec.ir_startino;
1171 pag->pagl_pagino = pagino;
1175 * We've reached the end of the btree. because
1176 * we are only searching a small chunk of the
1177 * btree each search, there is obviously free
1178 * inodes closer to the parent inode than we
1179 * are now. restart the search again.
1181 pag->pagl_pagino = NULLAGINO;
1182 pag->pagl_leftrec = NULLAGINO;
1183 pag->pagl_rightrec = NULLAGINO;
1184 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1185 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1191 * In a different AG from the parent.
1192 * See if the most recently allocated block has any free.
1194 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1195 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1201 error = xfs_inobt_get_rec(cur, &rec, &j);
1205 if (j == 1 && rec.ir_freecount > 0) {
1207 * The last chunk allocated in the group
1208 * still has a free inode.
1216 * None left in the last group, search the whole AG
1218 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1221 if (XFS_IS_CORRUPT(mp, i != 1)) {
1222 error = -EFSCORRUPTED;
1227 error = xfs_inobt_get_rec(cur, &rec, &i);
1230 if (XFS_IS_CORRUPT(mp, i != 1)) {
1231 error = -EFSCORRUPTED;
1234 if (rec.ir_freecount > 0)
1236 error = xfs_btree_increment(cur, 0, &i);
1239 if (XFS_IS_CORRUPT(mp, i != 1)) {
1240 error = -EFSCORRUPTED;
1246 offset = xfs_inobt_first_free_inode(&rec);
1247 ASSERT(offset >= 0);
1248 ASSERT(offset < XFS_INODES_PER_CHUNK);
1249 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1250 XFS_INODES_PER_CHUNK) == 0);
1251 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1252 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1254 error = xfs_inobt_update(cur, &rec);
1257 be32_add_cpu(&agi->agi_freecount, -1);
1258 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1259 pag->pagi_freecount--;
1261 error = xfs_check_agi_freecount(cur);
1265 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1266 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1270 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1272 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1277 * Use the free inode btree to allocate an inode based on distance from the
1278 * parent. Note that the provided cursor may be deleted and replaced.
1281 xfs_dialloc_ag_finobt_near(
1283 struct xfs_btree_cur **ocur,
1284 struct xfs_inobt_rec_incore *rec)
1286 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
1287 struct xfs_btree_cur *rcur; /* right search cursor */
1288 struct xfs_inobt_rec_incore rrec;
1292 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1297 error = xfs_inobt_get_rec(lcur, rec, &i);
1300 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
1301 return -EFSCORRUPTED;
1304 * See if we've landed in the parent inode record. The finobt
1305 * only tracks chunks with at least one free inode, so record
1306 * existence is enough.
1308 if (pagino >= rec->ir_startino &&
1309 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1313 error = xfs_btree_dup_cursor(lcur, &rcur);
1317 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1321 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1324 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1325 error = -EFSCORRUPTED;
1330 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1331 error = -EFSCORRUPTED;
1334 if (i == 1 && j == 1) {
1336 * Both the left and right records are valid. Choose the closer
1337 * inode chunk to the target.
1339 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1340 (rrec.ir_startino - pagino)) {
1342 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1345 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1347 } else if (j == 1) {
1348 /* only the right record is valid */
1350 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1352 } else if (i == 1) {
1353 /* only the left record is valid */
1354 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1360 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1365 * Use the free inode btree to find a free inode based on a newino hint. If
1366 * the hint is NULL, find the first free inode in the AG.
1369 xfs_dialloc_ag_finobt_newino(
1370 struct xfs_agi *agi,
1371 struct xfs_btree_cur *cur,
1372 struct xfs_inobt_rec_incore *rec)
1377 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1378 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1383 error = xfs_inobt_get_rec(cur, rec, &i);
1386 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1387 return -EFSCORRUPTED;
1393 * Find the first inode available in the AG.
1395 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1398 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1399 return -EFSCORRUPTED;
1401 error = xfs_inobt_get_rec(cur, rec, &i);
1404 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1405 return -EFSCORRUPTED;
1411 * Update the inobt based on a modification made to the finobt. Also ensure that
1412 * the records from both trees are equivalent post-modification.
1415 xfs_dialloc_ag_update_inobt(
1416 struct xfs_btree_cur *cur, /* inobt cursor */
1417 struct xfs_inobt_rec_incore *frec, /* finobt record */
1418 int offset) /* inode offset */
1420 struct xfs_inobt_rec_incore rec;
1424 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1427 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1428 return -EFSCORRUPTED;
1430 error = xfs_inobt_get_rec(cur, &rec, &i);
1433 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1434 return -EFSCORRUPTED;
1435 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1436 XFS_INODES_PER_CHUNK) == 0);
1438 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1441 if (XFS_IS_CORRUPT(cur->bc_mp,
1442 rec.ir_free != frec->ir_free ||
1443 rec.ir_freecount != frec->ir_freecount))
1444 return -EFSCORRUPTED;
1446 return xfs_inobt_update(cur, &rec);
1450 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1451 * back to the inobt search algorithm.
1453 * The caller selected an AG for us, and made sure that free inodes are
1458 struct xfs_perag *pag,
1459 struct xfs_trans *tp,
1460 struct xfs_buf *agbp,
1464 struct xfs_mount *mp = tp->t_mountp;
1465 struct xfs_agi *agi = agbp->b_addr;
1466 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1467 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1468 struct xfs_btree_cur *cur; /* finobt cursor */
1469 struct xfs_btree_cur *icur; /* inobt cursor */
1470 struct xfs_inobt_rec_incore rec;
1476 if (!xfs_has_finobt(mp))
1477 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop);
1480 * If pagino is 0 (this is the root inode allocation) use newino.
1481 * This must work because we've just allocated some.
1484 pagino = be32_to_cpu(agi->agi_newino);
1486 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO);
1488 error = xfs_check_agi_freecount(cur);
1493 * The search algorithm depends on whether we're in the same AG as the
1494 * parent. If so, find the closest available inode to the parent. If
1495 * not, consider the agi hint or find the first free inode in the AG.
1497 if (pag->pag_agno == pagno)
1498 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1500 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1504 offset = xfs_inobt_first_free_inode(&rec);
1505 ASSERT(offset >= 0);
1506 ASSERT(offset < XFS_INODES_PER_CHUNK);
1507 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1508 XFS_INODES_PER_CHUNK) == 0);
1509 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1512 * Modify or remove the finobt record.
1514 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1516 if (rec.ir_freecount)
1517 error = xfs_inobt_update(cur, &rec);
1519 error = xfs_btree_delete(cur, &i);
1524 * The finobt has now been updated appropriately. We haven't updated the
1525 * agi and superblock yet, so we can create an inobt cursor and validate
1526 * the original freecount. If all is well, make the equivalent update to
1527 * the inobt using the finobt record and offset information.
1529 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO);
1531 error = xfs_check_agi_freecount(icur);
1535 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1540 * Both trees have now been updated. We must update the perag and
1541 * superblock before we can check the freecount for each btree.
1543 be32_add_cpu(&agi->agi_freecount, -1);
1544 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1545 pag->pagi_freecount--;
1547 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1549 error = xfs_check_agi_freecount(icur);
1552 error = xfs_check_agi_freecount(cur);
1556 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1557 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1562 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1564 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1570 struct xfs_trans **tpp,
1571 struct xfs_buf *agibp)
1573 struct xfs_trans *tp = *tpp;
1574 struct xfs_dquot_acct *dqinfo;
1578 * Hold to on to the agibp across the commit so no other allocation can
1579 * come in and take the free inodes we just allocated for our caller.
1581 xfs_trans_bhold(tp, agibp);
1584 * We want the quota changes to be associated with the next transaction,
1585 * NOT this one. So, detach the dqinfo from this and attach it to the
1588 dqinfo = tp->t_dqinfo;
1589 tp->t_dqinfo = NULL;
1591 error = xfs_trans_roll(&tp);
1593 /* Re-attach the quota info that we detached from prev trx. */
1594 tp->t_dqinfo = dqinfo;
1597 * Join the buffer even on commit error so that the buffer is released
1598 * when the caller cancels the transaction and doesn't have to handle
1599 * this error case specially.
1601 xfs_trans_bjoin(tp, agibp);
1607 xfs_dialloc_good_ag(
1608 struct xfs_perag *pag,
1609 struct xfs_trans *tp,
1614 struct xfs_mount *mp = tp->t_mountp;
1616 xfs_extlen_t longest = 0;
1622 if (!xfs_perag_allows_inodes(pag))
1625 if (!xfs_perag_initialised_agi(pag)) {
1626 error = xfs_ialloc_read_agi(pag, tp, NULL);
1631 if (pag->pagi_freecount)
1636 if (!xfs_perag_initialised_agf(pag)) {
1637 error = xfs_alloc_read_agf(pag, tp, flags, NULL);
1643 * Check that there is enough free space for the file plus a chunk of
1644 * inodes if we need to allocate some. If this is the first pass across
1645 * the AGs, take into account the potential space needed for alignment
1646 * of inode chunks when checking the longest contiguous free space in
1647 * the AG - this prevents us from getting ENOSPC because we have free
1648 * space larger than ialloc_blks but alignment constraints prevent us
1651 * If we can't find an AG with space for full alignment slack to be
1652 * taken into account, we must be near ENOSPC in all AGs. Hence we
1653 * don't include alignment for the second pass and so if we fail
1654 * allocation due to alignment issues then it is most likely a real
1657 * XXX(dgc): this calculation is now bogus thanks to the per-ag
1658 * reservations that xfs_alloc_fix_freelist() now does via
1659 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
1660 * be more than large enough for the check below to succeed, but
1661 * xfs_alloc_space_available() will fail because of the non-zero
1662 * metadata reservation and hence we won't actually be able to allocate
1663 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
1666 ineed = M_IGEO(mp)->ialloc_min_blks;
1667 if (flags && ineed > 1)
1668 ineed += M_IGEO(mp)->cluster_align;
1669 longest = pag->pagf_longest;
1671 longest = pag->pagf_flcount > 0;
1672 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
1674 if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
1681 struct xfs_perag *pag,
1682 struct xfs_trans **tpp,
1687 struct xfs_buf *agbp;
1692 * Then read in the AGI buffer and recheck with the AGI buffer
1695 error = xfs_ialloc_read_agi(pag, *tpp, &agbp);
1699 if (!pag->pagi_freecount) {
1705 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp);
1710 * We successfully allocated space for an inode cluster in this
1711 * AG. Roll the transaction so that we can allocate one of the
1714 ASSERT(pag->pagi_freecount > 0);
1715 error = xfs_dialloc_roll(tpp, agbp);
1720 /* Allocate an inode in the found AG */
1721 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino);
1727 xfs_trans_brelse(*tpp, agbp);
1732 * Allocate an on-disk inode.
1734 * Mode is used to tell whether the new inode is a directory and hence where to
1735 * locate it. The on-disk inode that is allocated will be returned in @new_ino
1736 * on success, otherwise an error will be set to indicate the failure (e.g.
1741 struct xfs_trans **tpp,
1746 struct xfs_mount *mp = (*tpp)->t_mountp;
1747 xfs_agnumber_t agno;
1749 xfs_agnumber_t start_agno;
1750 struct xfs_perag *pag;
1751 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1752 bool ok_alloc = true;
1753 bool low_space = false;
1755 xfs_ino_t ino = NULLFSINO;
1758 * Directories, symlinks, and regular files frequently allocate at least
1759 * one block, so factor that potential expansion when we examine whether
1760 * an AG has enough space for file creation.
1763 start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) %
1766 start_agno = XFS_INO_TO_AGNO(mp, parent);
1767 if (start_agno >= mp->m_maxagi)
1772 * If we have already hit the ceiling of inode blocks then clear
1773 * ok_alloc so we scan all available agi structures for a free
1776 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1777 * which will sacrifice the preciseness but improve the performance.
1779 if (igeo->maxicount &&
1780 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1781 > igeo->maxicount) {
1786 * If we are near to ENOSPC, we want to prefer allocation from AGs that
1787 * have free inodes in them rather than use up free space allocating new
1788 * inode chunks. Hence we turn off allocation for the first non-blocking
1789 * pass through the AGs if we are near ENOSPC to consume free inodes
1790 * that we can immediately allocate, but then we allow allocation on the
1791 * second pass if we fail to find an AG with free inodes in it.
1793 if (percpu_counter_read_positive(&mp->m_fdblocks) <
1794 mp->m_low_space[XFS_LOWSP_1_PCNT]) {
1800 * Loop until we find an allocation group that either has free inodes
1801 * or in which we can allocate some inodes. Iterate through the
1802 * allocation groups upward, wrapping at the end.
1804 flags = XFS_ALLOC_FLAG_TRYLOCK;
1806 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) {
1807 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) {
1808 error = xfs_dialloc_try_ag(pag, tpp, parent,
1810 if (error != -EAGAIN)
1815 if (xfs_is_shutdown(mp)) {
1816 error = -EFSCORRUPTED;
1821 xfs_perag_rele(pag);
1824 if (ino == NULLFSINO) {
1838 * Free the blocks of an inode chunk. We must consider that the inode chunk
1839 * might be sparse and only free the regions that are allocated as part of the
1843 xfs_difree_inode_chunk(
1844 struct xfs_trans *tp,
1845 xfs_agnumber_t agno,
1846 struct xfs_inobt_rec_incore *rec)
1848 struct xfs_mount *mp = tp->t_mountp;
1849 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1851 int startidx, endidx;
1853 xfs_agblock_t agbno;
1855 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1857 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1858 /* not sparse, calculate extent info directly */
1859 return xfs_free_extent_later(tp,
1860 XFS_AGB_TO_FSB(mp, agno, sagbno),
1861 M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
1862 XFS_AG_RESV_NONE, false);
1865 /* holemask is only 16-bits (fits in an unsigned long) */
1866 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1867 holemask[0] = rec->ir_holemask;
1870 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1871 * holemask and convert the start/end index of each range to an extent.
1872 * We start with the start and end index both pointing at the first 0 in
1875 startidx = endidx = find_first_zero_bit(holemask,
1876 XFS_INOBT_HOLEMASK_BITS);
1877 nextbit = startidx + 1;
1878 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1881 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1884 * If the next zero bit is contiguous, update the end index of
1885 * the current range and continue.
1887 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1888 nextbit == endidx + 1) {
1894 * nextbit is not contiguous with the current end index. Convert
1895 * the current start/end to an extent and add it to the free
1898 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1899 mp->m_sb.sb_inopblock;
1900 contigblk = ((endidx - startidx + 1) *
1901 XFS_INODES_PER_HOLEMASK_BIT) /
1902 mp->m_sb.sb_inopblock;
1904 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1905 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1906 error = xfs_free_extent_later(tp,
1907 XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
1908 &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE,
1913 /* reset range to current bit and carry on... */
1914 startidx = endidx = nextbit;
1924 struct xfs_perag *pag,
1925 struct xfs_trans *tp,
1926 struct xfs_buf *agbp,
1928 struct xfs_icluster *xic,
1929 struct xfs_inobt_rec_incore *orec)
1931 struct xfs_mount *mp = pag->pag_mount;
1932 struct xfs_agi *agi = agbp->b_addr;
1933 struct xfs_btree_cur *cur;
1934 struct xfs_inobt_rec_incore rec;
1940 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1941 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1944 * Initialize the cursor.
1946 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO);
1948 error = xfs_check_agi_freecount(cur);
1953 * Look for the entry describing this inode.
1955 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1956 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1960 if (XFS_IS_CORRUPT(mp, i != 1)) {
1961 error = -EFSCORRUPTED;
1964 error = xfs_inobt_get_rec(cur, &rec, &i);
1966 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1970 if (XFS_IS_CORRUPT(mp, i != 1)) {
1971 error = -EFSCORRUPTED;
1975 * Get the offset in the inode chunk.
1977 off = agino - rec.ir_startino;
1978 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1979 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1981 * Mark the inode free & increment the count.
1983 rec.ir_free |= XFS_INOBT_MASK(off);
1987 * When an inode chunk is free, it becomes eligible for removal. Don't
1988 * remove the chunk if the block size is large enough for multiple inode
1989 * chunks (that might not be free).
1991 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
1992 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1993 xic->deleted = true;
1994 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
1996 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1999 * Remove the inode cluster from the AGI B+Tree, adjust the
2000 * AGI and Superblock inode counts, and mark the disk space
2001 * to be freed when the transaction is committed.
2003 ilen = rec.ir_freecount;
2004 be32_add_cpu(&agi->agi_count, -ilen);
2005 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
2006 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
2007 pag->pagi_freecount -= ilen - 1;
2008 pag->pagi_count -= ilen;
2009 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
2010 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
2012 if ((error = xfs_btree_delete(cur, &i))) {
2013 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
2018 error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
2022 xic->deleted = false;
2024 error = xfs_inobt_update(cur, &rec);
2026 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
2032 * Change the inode free counts and log the ag/sb changes.
2034 be32_add_cpu(&agi->agi_freecount, 1);
2035 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2036 pag->pagi_freecount++;
2037 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2040 error = xfs_check_agi_freecount(cur);
2045 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2049 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2054 * Free an inode in the free inode btree.
2058 struct xfs_perag *pag,
2059 struct xfs_trans *tp,
2060 struct xfs_buf *agbp,
2062 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
2064 struct xfs_mount *mp = pag->pag_mount;
2065 struct xfs_btree_cur *cur;
2066 struct xfs_inobt_rec_incore rec;
2067 int offset = agino - ibtrec->ir_startino;
2071 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO);
2073 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2078 * If the record does not exist in the finobt, we must have just
2079 * freed an inode in a previously fully allocated chunk. If not,
2080 * something is out of sync.
2082 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2083 error = -EFSCORRUPTED;
2087 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2089 ibtrec->ir_freecount,
2090 ibtrec->ir_free, &i);
2099 * Read and update the existing record. We could just copy the ibtrec
2100 * across here, but that would defeat the purpose of having redundant
2101 * metadata. By making the modifications independently, we can catch
2102 * corruptions that we wouldn't see if we just copied from one record
2105 error = xfs_inobt_get_rec(cur, &rec, &i);
2108 if (XFS_IS_CORRUPT(mp, i != 1)) {
2109 error = -EFSCORRUPTED;
2113 rec.ir_free |= XFS_INOBT_MASK(offset);
2116 if (XFS_IS_CORRUPT(mp,
2117 rec.ir_free != ibtrec->ir_free ||
2118 rec.ir_freecount != ibtrec->ir_freecount)) {
2119 error = -EFSCORRUPTED;
2124 * The content of inobt records should always match between the inobt
2125 * and finobt. The lifecycle of records in the finobt is different from
2126 * the inobt in that the finobt only tracks records with at least one
2127 * free inode. Hence, if all of the inodes are free and we aren't
2128 * keeping inode chunks permanently on disk, remove the record.
2129 * Otherwise, update the record with the new information.
2131 * Note that we currently can't free chunks when the block size is large
2132 * enough for multiple chunks. Leave the finobt record to remain in sync
2135 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
2136 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
2137 error = xfs_btree_delete(cur, &i);
2142 error = xfs_inobt_update(cur, &rec);
2148 error = xfs_check_agi_freecount(cur);
2152 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2156 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2161 * Free disk inode. Carefully avoids touching the incore inode, all
2162 * manipulations incore are the caller's responsibility.
2163 * The on-disk inode is not changed by this operation, only the
2164 * btree (free inode mask) is changed.
2168 struct xfs_trans *tp,
2169 struct xfs_perag *pag,
2171 struct xfs_icluster *xic)
2174 xfs_agblock_t agbno; /* block number containing inode */
2175 struct xfs_buf *agbp; /* buffer for allocation group header */
2176 xfs_agino_t agino; /* allocation group inode number */
2177 int error; /* error return value */
2178 struct xfs_mount *mp = tp->t_mountp;
2179 struct xfs_inobt_rec_incore rec;/* btree record */
2182 * Break up inode number into its components.
2184 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
2185 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
2186 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
2190 agino = XFS_INO_TO_AGINO(mp, inode);
2191 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2192 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2193 __func__, (unsigned long long)inode,
2194 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2198 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2199 if (agbno >= mp->m_sb.sb_agblocks) {
2200 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2201 __func__, agbno, mp->m_sb.sb_agblocks);
2206 * Get the allocation group header.
2208 error = xfs_ialloc_read_agi(pag, tp, &agbp);
2210 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2216 * Fix up the inode allocation btree.
2218 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec);
2223 * Fix up the free inode btree.
2225 if (xfs_has_finobt(mp)) {
2226 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec);
2239 struct xfs_perag *pag,
2240 struct xfs_trans *tp,
2242 xfs_agblock_t agbno,
2243 xfs_agblock_t *chunk_agbno,
2244 xfs_agblock_t *offset_agbno,
2247 struct xfs_mount *mp = pag->pag_mount;
2248 struct xfs_inobt_rec_incore rec;
2249 struct xfs_btree_cur *cur;
2250 struct xfs_buf *agbp;
2254 error = xfs_ialloc_read_agi(pag, tp, &agbp);
2257 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2258 __func__, error, pag->pag_agno);
2263 * Lookup the inode record for the given agino. If the record cannot be
2264 * found, then it's an invalid inode number and we should abort. Once
2265 * we have a record, we need to ensure it contains the inode number
2266 * we are looking up.
2268 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO);
2269 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2272 error = xfs_inobt_get_rec(cur, &rec, &i);
2273 if (!error && i == 0)
2277 xfs_trans_brelse(tp, agbp);
2278 xfs_btree_del_cursor(cur, error);
2282 /* check that the returned record contains the required inode */
2283 if (rec.ir_startino > agino ||
2284 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2287 /* for untrusted inodes check it is allocated first */
2288 if ((flags & XFS_IGET_UNTRUSTED) &&
2289 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2292 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2293 *offset_agbno = agbno - *chunk_agbno;
2298 * Return the location of the inode in imap, for mapping it into a buffer.
2302 struct xfs_perag *pag,
2303 struct xfs_trans *tp,
2304 xfs_ino_t ino, /* inode to locate */
2305 struct xfs_imap *imap, /* location map structure */
2306 uint flags) /* flags for inode btree lookup */
2308 struct xfs_mount *mp = pag->pag_mount;
2309 xfs_agblock_t agbno; /* block number of inode in the alloc group */
2310 xfs_agino_t agino; /* inode number within alloc group */
2311 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
2312 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
2313 int error; /* error code */
2314 int offset; /* index of inode in its buffer */
2315 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
2317 ASSERT(ino != NULLFSINO);
2320 * Split up the inode number into its parts.
2322 agino = XFS_INO_TO_AGINO(mp, ino);
2323 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2324 if (agbno >= mp->m_sb.sb_agblocks ||
2325 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2329 * Don't output diagnostic information for untrusted inodes
2330 * as they can be invalid without implying corruption.
2332 if (flags & XFS_IGET_UNTRUSTED)
2334 if (agbno >= mp->m_sb.sb_agblocks) {
2336 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2337 __func__, (unsigned long long)agbno,
2338 (unsigned long)mp->m_sb.sb_agblocks);
2340 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2342 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2344 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2352 * For bulkstat and handle lookups, we have an untrusted inode number
2353 * that we have to verify is valid. We cannot do this just by reading
2354 * the inode buffer as it may have been unlinked and removed leaving
2355 * inodes in stale state on disk. Hence we have to do a btree lookup
2356 * in all cases where an untrusted inode number is passed.
2358 if (flags & XFS_IGET_UNTRUSTED) {
2359 error = xfs_imap_lookup(pag, tp, agino, agbno,
2360 &chunk_agbno, &offset_agbno, flags);
2367 * If the inode cluster size is the same as the blocksize or
2368 * smaller we get to the buffer by simple arithmetics.
2370 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2371 offset = XFS_INO_TO_OFFSET(mp, ino);
2372 ASSERT(offset < mp->m_sb.sb_inopblock);
2374 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
2375 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2376 imap->im_boffset = (unsigned short)(offset <<
2377 mp->m_sb.sb_inodelog);
2382 * If the inode chunks are aligned then use simple maths to
2383 * find the location. Otherwise we have to do a btree
2384 * lookup to find the location.
2386 if (M_IGEO(mp)->inoalign_mask) {
2387 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2388 chunk_agbno = agbno - offset_agbno;
2390 error = xfs_imap_lookup(pag, tp, agino, agbno,
2391 &chunk_agbno, &offset_agbno, flags);
2397 ASSERT(agbno >= chunk_agbno);
2398 cluster_agbno = chunk_agbno +
2399 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2400 M_IGEO(mp)->blocks_per_cluster);
2401 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2402 XFS_INO_TO_OFFSET(mp, ino);
2404 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
2405 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2406 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2409 * If the inode number maps to a block outside the bounds
2410 * of the file system then return NULL rather than calling
2411 * read_buf and panicing when we get an error from the
2414 if ((imap->im_blkno + imap->im_len) >
2415 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2417 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2418 __func__, (unsigned long long) imap->im_blkno,
2419 (unsigned long long) imap->im_len,
2420 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2427 * Log specified fields for the ag hdr (inode section). The growth of the agi
2428 * structure over time requires that we interpret the buffer as two logical
2429 * regions delineated by the end of the unlinked list. This is due to the size
2430 * of the hash table and its location in the middle of the agi.
2432 * For example, a request to log a field before agi_unlinked and a field after
2433 * agi_unlinked could cause us to log the entire hash table and use an excessive
2434 * amount of log space. To avoid this behavior, log the region up through
2435 * agi_unlinked in one call and the region after agi_unlinked through the end of
2436 * the structure in another.
2440 struct xfs_trans *tp,
2444 int first; /* first byte number */
2445 int last; /* last byte number */
2446 static const short offsets[] = { /* field starting offsets */
2447 /* keep in sync with bit definitions */
2448 offsetof(xfs_agi_t, agi_magicnum),
2449 offsetof(xfs_agi_t, agi_versionnum),
2450 offsetof(xfs_agi_t, agi_seqno),
2451 offsetof(xfs_agi_t, agi_length),
2452 offsetof(xfs_agi_t, agi_count),
2453 offsetof(xfs_agi_t, agi_root),
2454 offsetof(xfs_agi_t, agi_level),
2455 offsetof(xfs_agi_t, agi_freecount),
2456 offsetof(xfs_agi_t, agi_newino),
2457 offsetof(xfs_agi_t, agi_dirino),
2458 offsetof(xfs_agi_t, agi_unlinked),
2459 offsetof(xfs_agi_t, agi_free_root),
2460 offsetof(xfs_agi_t, agi_free_level),
2461 offsetof(xfs_agi_t, agi_iblocks),
2465 struct xfs_agi *agi = bp->b_addr;
2467 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2471 * Compute byte offsets for the first and last fields in the first
2472 * region and log the agi buffer. This only logs up through
2475 if (fields & XFS_AGI_ALL_BITS_R1) {
2476 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2478 xfs_trans_log_buf(tp, bp, first, last);
2482 * Mask off the bits in the first region and calculate the first and
2483 * last field offsets for any bits in the second region.
2485 fields &= ~XFS_AGI_ALL_BITS_R1;
2487 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2489 xfs_trans_log_buf(tp, bp, first, last);
2493 static xfs_failaddr_t
2497 struct xfs_mount *mp = bp->b_mount;
2498 struct xfs_agi *agi = bp->b_addr;
2500 uint32_t agi_seqno = be32_to_cpu(agi->agi_seqno);
2501 uint32_t agi_length = be32_to_cpu(agi->agi_length);
2504 if (xfs_has_crc(mp)) {
2505 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2506 return __this_address;
2507 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
2508 return __this_address;
2512 * Validate the magic number of the agi block.
2514 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2515 return __this_address;
2516 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2517 return __this_address;
2519 fa = xfs_validate_ag_length(bp, agi_seqno, agi_length);
2523 if (be32_to_cpu(agi->agi_level) < 1 ||
2524 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
2525 return __this_address;
2527 if (xfs_has_finobt(mp) &&
2528 (be32_to_cpu(agi->agi_free_level) < 1 ||
2529 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
2530 return __this_address;
2532 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2533 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2535 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2536 return __this_address;
2543 xfs_agi_read_verify(
2546 struct xfs_mount *mp = bp->b_mount;
2549 if (xfs_has_crc(mp) &&
2550 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2551 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2553 fa = xfs_agi_verify(bp);
2554 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2555 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2560 xfs_agi_write_verify(
2563 struct xfs_mount *mp = bp->b_mount;
2564 struct xfs_buf_log_item *bip = bp->b_log_item;
2565 struct xfs_agi *agi = bp->b_addr;
2568 fa = xfs_agi_verify(bp);
2570 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2574 if (!xfs_has_crc(mp))
2578 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2579 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2582 const struct xfs_buf_ops xfs_agi_buf_ops = {
2584 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2585 .verify_read = xfs_agi_read_verify,
2586 .verify_write = xfs_agi_write_verify,
2587 .verify_struct = xfs_agi_verify,
2591 * Read in the allocation group header (inode allocation section)
2595 struct xfs_perag *pag,
2596 struct xfs_trans *tp,
2597 struct xfs_buf **agibpp)
2599 struct xfs_mount *mp = pag->pag_mount;
2602 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno);
2604 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2605 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
2606 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops);
2610 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF);
2612 xfs_buf_set_ref(*agibpp, XFS_AGI_REF);
2617 * Read in the agi and initialise the per-ag data. If the caller supplies a
2618 * @agibpp, return the locked AGI buffer to them, otherwise release it.
2621 xfs_ialloc_read_agi(
2622 struct xfs_perag *pag,
2623 struct xfs_trans *tp,
2624 struct xfs_buf **agibpp)
2626 struct xfs_buf *agibp;
2627 struct xfs_agi *agi;
2630 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
2632 error = xfs_read_agi(pag, tp, &agibp);
2636 agi = agibp->b_addr;
2637 if (!xfs_perag_initialised_agi(pag)) {
2638 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2639 pag->pagi_count = be32_to_cpu(agi->agi_count);
2640 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
2644 * It's possible for these to be out of sync if
2645 * we are in the middle of a forced shutdown.
2647 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2648 xfs_is_shutdown(pag->pag_mount));
2652 xfs_trans_brelse(tp, agibp);
2656 /* How many inodes are backed by inode clusters ondisk? */
2658 xfs_ialloc_count_ondisk(
2659 struct xfs_btree_cur *cur,
2662 unsigned int *allocated)
2664 struct xfs_inobt_rec_incore irec;
2665 unsigned int ret = 0;
2669 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2673 while (has_record) {
2674 unsigned int i, hole_idx;
2676 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2679 if (irec.ir_startino > high)
2682 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
2683 if (irec.ir_startino + i < low)
2685 if (irec.ir_startino + i > high)
2688 hole_idx = i / XFS_INODES_PER_HOLEMASK_BIT;
2689 if (!(irec.ir_holemask & (1U << hole_idx)))
2693 error = xfs_btree_increment(cur, 0, &has_record);
2702 /* Is there an inode record covering a given extent? */
2704 xfs_ialloc_has_inodes_at_extent(
2705 struct xfs_btree_cur *cur,
2708 enum xbtree_recpacking *outcome)
2711 xfs_agino_t last_agino;
2712 unsigned int allocated;
2715 agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2716 last_agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2718 error = xfs_ialloc_count_ondisk(cur, agino, last_agino, &allocated);
2723 *outcome = XBTREE_RECPACKING_EMPTY;
2724 else if (allocated == last_agino - agino + 1)
2725 *outcome = XBTREE_RECPACKING_FULL;
2727 *outcome = XBTREE_RECPACKING_SPARSE;
2731 struct xfs_ialloc_count_inodes {
2733 xfs_agino_t freecount;
2736 /* Record inode counts across all inobt records. */
2738 xfs_ialloc_count_inodes_rec(
2739 struct xfs_btree_cur *cur,
2740 const union xfs_btree_rec *rec,
2743 struct xfs_inobt_rec_incore irec;
2744 struct xfs_ialloc_count_inodes *ci = priv;
2747 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2748 fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec);
2750 return xfs_inobt_complain_bad_rec(cur, fa, &irec);
2752 ci->count += irec.ir_count;
2753 ci->freecount += irec.ir_freecount;
2758 /* Count allocated and free inodes under an inobt. */
2760 xfs_ialloc_count_inodes(
2761 struct xfs_btree_cur *cur,
2763 xfs_agino_t *freecount)
2765 struct xfs_ialloc_count_inodes ci = {0};
2768 ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2769 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2774 *freecount = ci.freecount;
2779 * Initialize inode-related geometry information.
2781 * Compute the inode btree min and max levels and set maxicount.
2783 * Set the inode cluster size. This may still be overridden by the file
2784 * system block size if it is larger than the chosen cluster size.
2786 * For v5 filesystems, scale the cluster size with the inode size to keep a
2787 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2788 * inode alignment value appropriately for larger cluster sizes.
2790 * Then compute the inode cluster alignment information.
2793 xfs_ialloc_setup_geometry(
2794 struct xfs_mount *mp)
2796 struct xfs_sb *sbp = &mp->m_sb;
2797 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2801 igeo->new_diflags2 = 0;
2802 if (xfs_has_bigtime(mp))
2803 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
2804 if (xfs_has_large_extent_counts(mp))
2805 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64;
2807 /* Compute inode btree geometry. */
2808 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2809 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2810 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2811 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2812 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2814 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2816 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2818 if (sbp->sb_spino_align)
2819 igeo->ialloc_min_blks = sbp->sb_spino_align;
2821 igeo->ialloc_min_blks = igeo->ialloc_blks;
2823 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2824 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2825 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2827 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk());
2830 * Set the maximum inode count for this filesystem, being careful not
2831 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2832 * users should never get here due to failing sb verification, but
2833 * certain users (xfs_db) need to be usable even with corrupt metadata.
2835 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2837 * Make sure the maximum inode count is a multiple
2838 * of the units we allocate inodes in.
2840 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2841 do_div(icount, 100);
2842 do_div(icount, igeo->ialloc_blks);
2843 igeo->maxicount = XFS_FSB_TO_INO(mp,
2844 icount * igeo->ialloc_blks);
2846 igeo->maxicount = 0;
2850 * Compute the desired size of an inode cluster buffer size, which
2851 * starts at 8K and (on v5 filesystems) scales up with larger inode
2854 * Preserve the desired inode cluster size because the sparse inodes
2855 * feature uses that desired size (not the actual size) to compute the
2856 * sparse inode alignment. The mount code validates this value, so we
2857 * cannot change the behavior.
2859 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2860 if (xfs_has_v3inodes(mp)) {
2861 int new_size = igeo->inode_cluster_size_raw;
2863 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2864 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2865 igeo->inode_cluster_size_raw = new_size;
2868 /* Calculate inode cluster ratios. */
2869 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2870 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2871 igeo->inode_cluster_size_raw);
2873 igeo->blocks_per_cluster = 1;
2874 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2875 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2877 /* Calculate inode cluster alignment. */
2878 if (xfs_has_align(mp) &&
2879 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2880 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2882 igeo->cluster_align = 1;
2883 igeo->inoalign_mask = igeo->cluster_align - 1;
2884 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2887 * If we are using stripe alignment, check whether
2888 * the stripe unit is a multiple of the inode alignment
2890 if (mp->m_dalign && igeo->inoalign_mask &&
2891 !(mp->m_dalign & igeo->inoalign_mask))
2892 igeo->ialloc_align = mp->m_dalign;
2894 igeo->ialloc_align = 0;
2897 /* Compute the location of the root directory inode that is laid out by mkfs. */
2899 xfs_ialloc_calc_rootino(
2900 struct xfs_mount *mp,
2903 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2904 xfs_agblock_t first_bno;
2907 * Pre-calculate the geometry of AG 0. We know what it looks like
2908 * because libxfs knows how to create allocation groups now.
2910 * first_bno is the first block in which mkfs could possibly have
2911 * allocated the root directory inode, once we factor in the metadata
2912 * that mkfs formats before it. Namely, the four AG headers...
2914 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
2916 /* ...the two free space btree roots... */
2919 /* ...the inode btree root... */
2922 /* ...the initial AGFL... */
2923 first_bno += xfs_alloc_min_freelist(mp, NULL);
2925 /* ...the free inode btree root... */
2926 if (xfs_has_finobt(mp))
2929 /* ...the reverse mapping btree root... */
2930 if (xfs_has_rmapbt(mp))
2933 /* ...the reference count btree... */
2934 if (xfs_has_reflink(mp))
2938 * ...and the log, if it is allocated in the first allocation group.
2940 * This can happen with filesystems that only have a single
2941 * allocation group, or very odd geometries created by old mkfs
2942 * versions on very small filesystems.
2944 if (xfs_ag_contains_log(mp, 0))
2945 first_bno += mp->m_sb.sb_logblocks;
2948 * Now round first_bno up to whatever allocation alignment is given
2949 * by the filesystem or was passed in.
2951 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0)
2952 first_bno = roundup(first_bno, sunit);
2953 else if (xfs_has_align(mp) &&
2954 mp->m_sb.sb_inoalignmt > 1)
2955 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
2957 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
2961 * Ensure there are not sparse inode clusters that cross the new EOAG.
2963 * This is a no-op for non-spinode filesystems since clusters are always fully
2964 * allocated and checking the bnobt suffices. However, a spinode filesystem
2965 * could have a record where the upper inodes are free blocks. If those blocks
2966 * were removed from the filesystem, the inode record would extend beyond EOAG,
2967 * which will be flagged as corruption.
2970 xfs_ialloc_check_shrink(
2971 struct xfs_perag *pag,
2972 struct xfs_trans *tp,
2973 struct xfs_buf *agibp,
2974 xfs_agblock_t new_length)
2976 struct xfs_inobt_rec_incore rec;
2977 struct xfs_btree_cur *cur;
2982 if (!xfs_has_sparseinodes(pag->pag_mount))
2985 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO);
2987 /* Look up the inobt record that would correspond to the new EOFS. */
2988 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length);
2989 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
2993 error = xfs_inobt_get_rec(cur, &rec, &has);
2998 error = -EFSCORRUPTED;
3002 /* If the record covers inodes that would be beyond EOFS, bail out. */
3003 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
3008 xfs_btree_del_cursor(cur, error);