1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
17 #include "xfs_ialloc.h"
18 #include "xfs_alloc.h"
19 #include "xfs_rtalloc.h"
21 #include "xfs_trans.h"
22 #include "xfs_trans_priv.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_fsops.h"
27 #include "xfs_icache.h"
28 #include "xfs_sysfs.h"
29 #include "xfs_rmap_btree.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_reflink.h"
32 #include "xfs_extent_busy.h"
33 #include "xfs_health.h"
34 #include "xfs_trace.h"
36 static DEFINE_MUTEX(xfs_uuid_table_mutex);
37 static int xfs_uuid_table_size;
38 static uuid_t *xfs_uuid_table;
41 xfs_uuid_table_free(void)
43 if (xfs_uuid_table_size == 0)
45 kmem_free(xfs_uuid_table);
46 xfs_uuid_table = NULL;
47 xfs_uuid_table_size = 0;
51 * See if the UUID is unique among mounted XFS filesystems.
52 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
58 uuid_t *uuid = &mp->m_sb.sb_uuid;
61 /* Publish UUID in struct super_block */
62 uuid_copy(&mp->m_super->s_uuid, uuid);
64 if (mp->m_flags & XFS_MOUNT_NOUUID)
67 if (uuid_is_null(uuid)) {
68 xfs_warn(mp, "Filesystem has null UUID - can't mount");
72 mutex_lock(&xfs_uuid_table_mutex);
73 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
74 if (uuid_is_null(&xfs_uuid_table[i])) {
78 if (uuid_equal(uuid, &xfs_uuid_table[i]))
83 xfs_uuid_table = krealloc(xfs_uuid_table,
84 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
85 GFP_KERNEL | __GFP_NOFAIL);
86 hole = xfs_uuid_table_size++;
88 xfs_uuid_table[hole] = *uuid;
89 mutex_unlock(&xfs_uuid_table_mutex);
94 mutex_unlock(&xfs_uuid_table_mutex);
95 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
101 struct xfs_mount *mp)
103 uuid_t *uuid = &mp->m_sb.sb_uuid;
106 if (mp->m_flags & XFS_MOUNT_NOUUID)
109 mutex_lock(&xfs_uuid_table_mutex);
110 for (i = 0; i < xfs_uuid_table_size; i++) {
111 if (uuid_is_null(&xfs_uuid_table[i]))
113 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
115 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
118 ASSERT(i < xfs_uuid_table_size);
119 mutex_unlock(&xfs_uuid_table_mutex);
125 struct rcu_head *head)
127 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
129 ASSERT(atomic_read(&pag->pag_ref) == 0);
134 * Free up the per-ag resources associated with the mount structure.
141 struct xfs_perag *pag;
143 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
144 spin_lock(&mp->m_perag_lock);
145 pag = radix_tree_delete(&mp->m_perag_tree, agno);
146 spin_unlock(&mp->m_perag_lock);
148 ASSERT(atomic_read(&pag->pag_ref) == 0);
149 xfs_iunlink_destroy(pag);
150 xfs_buf_hash_destroy(pag);
151 call_rcu(&pag->rcu_head, __xfs_free_perag);
156 * Check size of device based on the (data/realtime) block count.
157 * Note: this check is used by the growfs code as well as mount.
160 xfs_sb_validate_fsb_count(
164 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
165 ASSERT(sbp->sb_blocklog >= BBSHIFT);
167 /* Limited by ULONG_MAX of page cache index */
168 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
174 xfs_initialize_perag(
176 xfs_agnumber_t agcount,
177 xfs_agnumber_t *maxagi)
179 xfs_agnumber_t index;
180 xfs_agnumber_t first_initialised = NULLAGNUMBER;
185 * Walk the current per-ag tree so we don't try to initialise AGs
186 * that already exist (growfs case). Allocate and insert all the
187 * AGs we don't find ready for initialisation.
189 for (index = 0; index < agcount; index++) {
190 pag = xfs_perag_get(mp, index);
196 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
199 goto out_unwind_new_pags;
201 pag->pag_agno = index;
203 spin_lock_init(&pag->pag_ici_lock);
204 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
206 error = xfs_buf_hash_init(pag);
209 init_waitqueue_head(&pag->pagb_wait);
210 spin_lock_init(&pag->pagb_lock);
212 pag->pagb_tree = RB_ROOT;
214 error = radix_tree_preload(GFP_NOFS);
216 goto out_hash_destroy;
218 spin_lock(&mp->m_perag_lock);
219 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
221 spin_unlock(&mp->m_perag_lock);
222 radix_tree_preload_end();
224 goto out_hash_destroy;
226 spin_unlock(&mp->m_perag_lock);
227 radix_tree_preload_end();
228 /* first new pag is fully initialized */
229 if (first_initialised == NULLAGNUMBER)
230 first_initialised = index;
231 error = xfs_iunlink_init(pag);
233 goto out_hash_destroy;
234 spin_lock_init(&pag->pag_state_lock);
237 index = xfs_set_inode_alloc(mp, agcount);
242 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
246 xfs_buf_hash_destroy(pag);
250 /* unwind any prior newly initialized pags */
251 for (index = first_initialised; index < agcount; index++) {
252 pag = radix_tree_delete(&mp->m_perag_tree, index);
255 xfs_buf_hash_destroy(pag);
256 xfs_iunlink_destroy(pag);
265 * Does the initial read of the superblock.
269 struct xfs_mount *mp,
272 unsigned int sector_size;
274 struct xfs_sb *sbp = &mp->m_sb;
276 int loud = !(flags & XFS_MFSI_QUIET);
277 const struct xfs_buf_ops *buf_ops;
279 ASSERT(mp->m_sb_bp == NULL);
280 ASSERT(mp->m_ddev_targp != NULL);
283 * For the initial read, we must guess at the sector
284 * size based on the block device. It's enough to
285 * get the sb_sectsize out of the superblock and
286 * then reread with the proper length.
287 * We don't verify it yet, because it may not be complete.
289 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
293 * Allocate a (locked) buffer to hold the superblock. This will be kept
294 * around at all times to optimize access to the superblock. Therefore,
295 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
299 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
300 BTOBB(sector_size), XBF_NO_IOACCT, &bp,
304 xfs_warn(mp, "SB validate failed with error %d.", error);
305 /* bad CRC means corrupted metadata */
306 if (error == -EFSBADCRC)
307 error = -EFSCORRUPTED;
312 * Initialize the mount structure from the superblock.
314 xfs_sb_from_disk(sbp, bp->b_addr);
317 * If we haven't validated the superblock, do so now before we try
318 * to check the sector size and reread the superblock appropriately.
320 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
322 xfs_warn(mp, "Invalid superblock magic number");
328 * We must be able to do sector-sized and sector-aligned IO.
330 if (sector_size > sbp->sb_sectsize) {
332 xfs_warn(mp, "device supports %u byte sectors (not %u)",
333 sector_size, sbp->sb_sectsize);
338 if (buf_ops == NULL) {
340 * Re-read the superblock so the buffer is correctly sized,
341 * and properly verified.
344 sector_size = sbp->sb_sectsize;
345 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
349 xfs_reinit_percpu_counters(mp);
351 /* no need to be quiet anymore, so reset the buf ops */
352 bp->b_ops = &xfs_sb_buf_ops;
364 * If the sunit/swidth change would move the precomputed root inode value, we
365 * must reject the ondisk change because repair will stumble over that.
366 * However, we allow the mount to proceed because we never rejected this
367 * combination before. Returns true to update the sb, false otherwise.
370 xfs_check_new_dalign(
371 struct xfs_mount *mp,
375 struct xfs_sb *sbp = &mp->m_sb;
378 calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
379 trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
381 if (sbp->sb_rootino == calc_ino) {
387 "Cannot change stripe alignment; would require moving root inode.");
390 * XXX: Next time we add a new incompat feature, this should start
391 * returning -EINVAL to fail the mount. Until then, spit out a warning
392 * that we're ignoring the administrator's instructions.
394 xfs_warn(mp, "Skipping superblock stripe alignment update.");
400 * If we were provided with new sunit/swidth values as mount options, make sure
401 * that they pass basic alignment and superblock feature checks, and convert
402 * them into the same units (FSB) that everything else expects. This step
403 * /must/ be done before computing the inode geometry.
406 xfs_validate_new_dalign(
407 struct xfs_mount *mp)
409 if (mp->m_dalign == 0)
413 * If stripe unit and stripe width are not multiples
414 * of the fs blocksize turn off alignment.
416 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
417 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
419 "alignment check failed: sunit/swidth vs. blocksize(%d)",
420 mp->m_sb.sb_blocksize);
424 * Convert the stripe unit and width to FSBs.
426 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
427 if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
429 "alignment check failed: sunit/swidth vs. agsize(%d)",
430 mp->m_sb.sb_agblocks);
432 } else if (mp->m_dalign) {
433 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
436 "alignment check failed: sunit(%d) less than bsize(%d)",
437 mp->m_dalign, mp->m_sb.sb_blocksize);
442 if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
444 "cannot change alignment: superblock does not support data alignment");
451 /* Update alignment values based on mount options and sb values. */
453 xfs_update_alignment(
454 struct xfs_mount *mp)
456 struct xfs_sb *sbp = &mp->m_sb;
462 if (sbp->sb_unit == mp->m_dalign &&
463 sbp->sb_width == mp->m_swidth)
466 error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
467 if (error || !update_sb)
470 sbp->sb_unit = mp->m_dalign;
471 sbp->sb_width = mp->m_swidth;
472 mp->m_update_sb = true;
473 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
474 xfs_sb_version_hasdalign(&mp->m_sb)) {
475 mp->m_dalign = sbp->sb_unit;
476 mp->m_swidth = sbp->sb_width;
483 * precalculate the low space thresholds for dynamic speculative preallocation.
486 xfs_set_low_space_thresholds(
487 struct xfs_mount *mp)
491 for (i = 0; i < XFS_LOWSP_MAX; i++) {
492 uint64_t space = mp->m_sb.sb_dblocks;
495 mp->m_low_space[i] = space * (i + 1);
500 * Check that the data (and log if separate) is an ok size.
504 struct xfs_mount *mp)
510 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
511 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
512 xfs_warn(mp, "filesystem size mismatch detected");
515 error = xfs_buf_read_uncached(mp->m_ddev_targp,
516 d - XFS_FSS_TO_BB(mp, 1),
517 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
519 xfs_warn(mp, "last sector read failed");
524 if (mp->m_logdev_targp == mp->m_ddev_targp)
527 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
528 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
529 xfs_warn(mp, "log size mismatch detected");
532 error = xfs_buf_read_uncached(mp->m_logdev_targp,
533 d - XFS_FSB_TO_BB(mp, 1),
534 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
536 xfs_warn(mp, "log device read failed");
544 * Clear the quotaflags in memory and in the superblock.
547 xfs_mount_reset_sbqflags(
548 struct xfs_mount *mp)
552 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
553 if (mp->m_sb.sb_qflags == 0)
555 spin_lock(&mp->m_sb_lock);
556 mp->m_sb.sb_qflags = 0;
557 spin_unlock(&mp->m_sb_lock);
559 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
562 return xfs_sync_sb(mp, false);
566 xfs_default_resblks(xfs_mount_t *mp)
571 * We default to 5% or 8192 fsbs of space reserved, whichever is
572 * smaller. This is intended to cover concurrent allocation
573 * transactions when we initially hit enospc. These each require a 4
574 * block reservation. Hence by default we cover roughly 2000 concurrent
575 * allocation reservations.
577 resblks = mp->m_sb.sb_dblocks;
579 resblks = min_t(uint64_t, resblks, 8192);
583 /* Ensure the summary counts are correct. */
585 xfs_check_summary_counts(
586 struct xfs_mount *mp)
589 * The AG0 superblock verifier rejects in-progress filesystems,
590 * so we should never see the flag set this far into mounting.
592 if (mp->m_sb.sb_inprogress) {
593 xfs_err(mp, "sb_inprogress set after log recovery??");
595 return -EFSCORRUPTED;
599 * Now the log is mounted, we know if it was an unclean shutdown or
600 * not. If it was, with the first phase of recovery has completed, we
601 * have consistent AG blocks on disk. We have not recovered EFIs yet,
602 * but they are recovered transactionally in the second recovery phase
605 * If the log was clean when we mounted, we can check the summary
606 * counters. If any of them are obviously incorrect, we can recompute
607 * them from the AGF headers in the next step.
609 if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
610 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
611 !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
612 mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
613 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
616 * We can safely re-initialise incore superblock counters from the
617 * per-ag data. These may not be correct if the filesystem was not
618 * cleanly unmounted, so we waited for recovery to finish before doing
621 * If the filesystem was cleanly unmounted or the previous check did
622 * not flag anything weird, then we can trust the values in the
623 * superblock to be correct and we don't need to do anything here.
624 * Otherwise, recalculate the summary counters.
626 if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
627 XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
628 !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
631 return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
635 * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
636 * internal inode structures can be sitting in the CIL and AIL at this point,
637 * so we need to unpin them, write them back and/or reclaim them before unmount
640 * An inode cluster that has been freed can have its buffer still pinned in
641 * memory because the transaction is still sitting in a iclog. The stale inodes
642 * on that buffer will be pinned to the buffer until the transaction hits the
643 * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
644 * may never see the pinned buffer, so nothing will push out the iclog and
647 * Hence we need to force the log to unpin everything first. However, log
648 * forces don't wait for the discards they issue to complete, so we have to
649 * explicitly wait for them to complete here as well.
651 * Then we can tell the world we are unmounting so that error handling knows
652 * that the filesystem is going away and we should error out anything that we
653 * have been retrying in the background. This will prevent never-ending
654 * retries in AIL pushing from hanging the unmount.
656 * Finally, we can push the AIL to clean all the remaining dirty objects, then
657 * reclaim the remaining inodes that are still in memory at this point in time.
660 xfs_unmount_flush_inodes(
661 struct xfs_mount *mp)
663 xfs_log_force(mp, XFS_LOG_SYNC);
664 xfs_extent_busy_wait_all(mp);
665 flush_workqueue(xfs_discard_wq);
667 mp->m_flags |= XFS_MOUNT_UNMOUNTING;
669 xfs_ail_push_all_sync(mp->m_ail);
670 cancel_delayed_work_sync(&mp->m_reclaim_work);
671 xfs_reclaim_inodes(mp);
672 xfs_health_unmount(mp);
676 * This function does the following on an initial mount of a file system:
677 * - reads the superblock from disk and init the mount struct
678 * - if we're a 32-bit kernel, do a size check on the superblock
679 * so we don't mount terabyte filesystems
680 * - init mount struct realtime fields
681 * - allocate inode hash table for fs
682 * - init directory manager
683 * - perform recovery and init the log manager
687 struct xfs_mount *mp)
689 struct xfs_sb *sbp = &(mp->m_sb);
690 struct xfs_inode *rip;
691 struct xfs_ino_geometry *igeo = M_IGEO(mp);
697 xfs_sb_mount_common(mp, sbp);
700 * Check for a mismatched features2 values. Older kernels read & wrote
701 * into the wrong sb offset for sb_features2 on some platforms due to
702 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
703 * which made older superblock reading/writing routines swap it as a
706 * For backwards compatibility, we make both slots equal.
708 * If we detect a mismatched field, we OR the set bits into the existing
709 * features2 field in case it has already been modified; we don't want
710 * to lose any features. We then update the bad location with the ORed
711 * value so that older kernels will see any features2 flags. The
712 * superblock writeback code ensures the new sb_features2 is copied to
713 * sb_bad_features2 before it is logged or written to disk.
715 if (xfs_sb_has_mismatched_features2(sbp)) {
716 xfs_warn(mp, "correcting sb_features alignment problem");
717 sbp->sb_features2 |= sbp->sb_bad_features2;
718 mp->m_update_sb = true;
721 * Re-check for ATTR2 in case it was found in bad_features2
724 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
725 !(mp->m_flags & XFS_MOUNT_NOATTR2))
726 mp->m_flags |= XFS_MOUNT_ATTR2;
729 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
730 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
731 xfs_sb_version_removeattr2(&mp->m_sb);
732 mp->m_update_sb = true;
734 /* update sb_versionnum for the clearing of the morebits */
735 if (!sbp->sb_features2)
736 mp->m_update_sb = true;
739 /* always use v2 inodes by default now */
740 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
741 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
742 mp->m_update_sb = true;
746 * If we were given new sunit/swidth options, do some basic validation
747 * checks and convert the incore dalign and swidth values to the
748 * same units (FSB) that everything else uses. This /must/ happen
749 * before computing the inode geometry.
751 error = xfs_validate_new_dalign(mp);
755 xfs_alloc_compute_maxlevels(mp);
756 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
757 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
758 xfs_ialloc_setup_geometry(mp);
759 xfs_rmapbt_compute_maxlevels(mp);
760 xfs_refcountbt_compute_maxlevels(mp);
763 * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
764 * is NOT aligned turn off m_dalign since allocator alignment is within
765 * an ag, therefore ag has to be aligned at stripe boundary. Note that
766 * we must compute the free space and rmap btree geometry before doing
769 error = xfs_update_alignment(mp);
773 /* enable fail_at_unmount as default */
774 mp->m_fail_unmount = true;
776 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
777 NULL, mp->m_super->s_id);
781 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
782 &mp->m_kobj, "stats");
784 goto out_remove_sysfs;
786 error = xfs_error_sysfs_init(mp);
790 error = xfs_errortag_init(mp);
792 goto out_remove_error_sysfs;
794 error = xfs_uuid_mount(mp);
796 goto out_remove_errortag;
799 * Update the preferred write size based on the information from the
800 * on-disk superblock.
802 mp->m_allocsize_log =
803 max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
804 mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
806 /* set the low space thresholds for dynamic preallocation */
807 xfs_set_low_space_thresholds(mp);
810 * If enabled, sparse inode chunk alignment is expected to match the
811 * cluster size. Full inode chunk alignment must match the chunk size,
812 * but that is checked on sb read verification...
814 if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
815 mp->m_sb.sb_spino_align !=
816 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
818 "Sparse inode block alignment (%u) must match cluster size (%llu).",
819 mp->m_sb.sb_spino_align,
820 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
822 goto out_remove_uuid;
826 * Check that the data (and log if separate) is an ok size.
828 error = xfs_check_sizes(mp);
830 goto out_remove_uuid;
833 * Initialize realtime fields in the mount structure
835 error = xfs_rtmount_init(mp);
837 xfs_warn(mp, "RT mount failed");
838 goto out_remove_uuid;
842 * Copies the low order bits of the timestamp and the randomly
843 * set "sequence" number out of a UUID.
846 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
847 get_unaligned_be16(&sbp->sb_uuid.b[4]);
848 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
850 error = xfs_da_mount(mp);
852 xfs_warn(mp, "Failed dir/attr init: %d", error);
853 goto out_remove_uuid;
857 * Initialize the precomputed transaction reservations values.
862 * Allocate and initialize the per-ag data.
864 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
866 xfs_warn(mp, "Failed per-ag init: %d", error);
870 if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
871 xfs_warn(mp, "no log defined");
872 error = -EFSCORRUPTED;
877 * Log's mount-time initialization. The first part of recovery can place
878 * some items on the AIL, to be handled when recovery is finished or
881 error = xfs_log_mount(mp, mp->m_logdev_targp,
882 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
883 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
885 xfs_warn(mp, "log mount failed");
889 /* Make sure the summary counts are ok. */
890 error = xfs_check_summary_counts(mp);
892 goto out_log_dealloc;
895 * Get and sanity-check the root inode.
896 * Save the pointer to it in the mount structure.
898 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
899 XFS_ILOCK_EXCL, &rip);
902 "Failed to read root inode 0x%llx, error %d",
903 sbp->sb_rootino, -error);
904 goto out_log_dealloc;
909 if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
910 xfs_warn(mp, "corrupted root inode %llu: not a directory",
911 (unsigned long long)rip->i_ino);
912 xfs_iunlock(rip, XFS_ILOCK_EXCL);
913 error = -EFSCORRUPTED;
916 mp->m_rootip = rip; /* save it */
918 xfs_iunlock(rip, XFS_ILOCK_EXCL);
921 * Initialize realtime inode pointers in the mount structure
923 error = xfs_rtmount_inodes(mp);
926 * Free up the root inode.
928 xfs_warn(mp, "failed to read RT inodes");
933 * If this is a read-only mount defer the superblock updates until
934 * the next remount into writeable mode. Otherwise we would never
935 * perform the update e.g. for the root filesystem.
937 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
938 error = xfs_sync_sb(mp, false);
940 xfs_warn(mp, "failed to write sb changes");
946 * Initialise the XFS quota management subsystem for this mount
948 if (XFS_IS_QUOTA_RUNNING(mp)) {
949 error = xfs_qm_newmount(mp, "amount, "aflags);
953 ASSERT(!XFS_IS_QUOTA_ON(mp));
956 * If a file system had quotas running earlier, but decided to
957 * mount without -o uquota/pquota/gquota options, revoke the
958 * quotachecked license.
960 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
961 xfs_notice(mp, "resetting quota flags");
962 error = xfs_mount_reset_sbqflags(mp);
969 * Finish recovering the file system. This part needed to be delayed
970 * until after the root and real-time bitmap inodes were consistently
971 * read in. Temporarily create per-AG space reservations for metadata
972 * btree shape changes because space freeing transactions (for inode
973 * inactivation) require the per-AG reservation in lieu of reserving
976 error = xfs_fs_reserve_ag_blocks(mp);
977 if (error && error == -ENOSPC)
979 "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
980 error = xfs_log_mount_finish(mp);
981 xfs_fs_unreserve_ag_blocks(mp);
983 xfs_warn(mp, "log mount finish failed");
988 * Now the log is fully replayed, we can transition to full read-only
989 * mode for read-only mounts. This will sync all the metadata and clean
990 * the log so that the recovery we just performed does not have to be
991 * replayed again on the next mount.
993 * We use the same quiesce mechanism as the rw->ro remount, as they are
994 * semantically identical operations.
996 if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
998 xfs_quiesce_attr(mp);
1002 * Complete the quota initialisation, post-log-replay component.
1005 ASSERT(mp->m_qflags == 0);
1006 mp->m_qflags = quotaflags;
1008 xfs_qm_mount_quotas(mp);
1012 * Now we are mounted, reserve a small amount of unused space for
1013 * privileged transactions. This is needed so that transaction
1014 * space required for critical operations can dip into this pool
1015 * when at ENOSPC. This is needed for operations like create with
1016 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1017 * are not allowed to use this reserved space.
1019 * This may drive us straight to ENOSPC on mount, but that implies
1020 * we were already there on the last unmount. Warn if this occurs.
1022 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1023 resblks = xfs_default_resblks(mp);
1024 error = xfs_reserve_blocks(mp, &resblks, NULL);
1027 "Unable to allocate reserve blocks. Continuing without reserve pool.");
1029 /* Recover any CoW blocks that never got remapped. */
1030 error = xfs_reflink_recover_cow(mp);
1033 "Error %d recovering leftover CoW allocations.", error);
1034 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1038 /* Reserve AG blocks for future btree expansion. */
1039 error = xfs_fs_reserve_ag_blocks(mp);
1040 if (error && error != -ENOSPC)
1047 xfs_fs_unreserve_ag_blocks(mp);
1049 xfs_qm_unmount_quotas(mp);
1051 xfs_rtunmount_inodes(mp);
1054 /* Clean out dquots that might be in memory after quotacheck. */
1057 * Flush all inode reclamation work and flush the log.
1058 * We have to do this /after/ rtunmount and qm_unmount because those
1059 * two will have scheduled delayed reclaim for the rt/quota inodes.
1061 * This is slightly different from the unmountfs call sequence
1062 * because we could be tearing down a partially set up mount. In
1063 * particular, if log_mount_finish fails we bail out without calling
1064 * qm_unmount_quotas and therefore rely on qm_unmount to release the
1067 xfs_unmount_flush_inodes(mp);
1069 xfs_log_mount_cancel(mp);
1071 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1072 xfs_wait_buftarg(mp->m_logdev_targp);
1073 xfs_wait_buftarg(mp->m_ddev_targp);
1079 xfs_uuid_unmount(mp);
1080 out_remove_errortag:
1081 xfs_errortag_del(mp);
1082 out_remove_error_sysfs:
1083 xfs_error_sysfs_del(mp);
1085 xfs_sysfs_del(&mp->m_stats.xs_kobj);
1087 xfs_sysfs_del(&mp->m_kobj);
1093 * This flushes out the inodes,dquots and the superblock, unmounts the
1094 * log and makes sure that incore structures are freed.
1098 struct xfs_mount *mp)
1103 xfs_stop_block_reaping(mp);
1104 xfs_fs_unreserve_ag_blocks(mp);
1105 xfs_qm_unmount_quotas(mp);
1106 xfs_rtunmount_inodes(mp);
1107 xfs_irele(mp->m_rootip);
1109 xfs_unmount_flush_inodes(mp);
1114 * Unreserve any blocks we have so that when we unmount we don't account
1115 * the reserved free space as used. This is really only necessary for
1116 * lazy superblock counting because it trusts the incore superblock
1117 * counters to be absolutely correct on clean unmount.
1119 * We don't bother correcting this elsewhere for lazy superblock
1120 * counting because on mount of an unclean filesystem we reconstruct the
1121 * correct counter value and this is irrelevant.
1123 * For non-lazy counter filesystems, this doesn't matter at all because
1124 * we only every apply deltas to the superblock and hence the incore
1125 * value does not matter....
1128 error = xfs_reserve_blocks(mp, &resblks, NULL);
1130 xfs_warn(mp, "Unable to free reserved block pool. "
1131 "Freespace may not be correct on next mount.");
1133 error = xfs_log_sbcount(mp);
1135 xfs_warn(mp, "Unable to update superblock counters. "
1136 "Freespace may not be correct on next mount.");
1139 xfs_log_unmount(mp);
1141 xfs_uuid_unmount(mp);
1144 xfs_errortag_clearall(mp);
1148 xfs_errortag_del(mp);
1149 xfs_error_sysfs_del(mp);
1150 xfs_sysfs_del(&mp->m_stats.xs_kobj);
1151 xfs_sysfs_del(&mp->m_kobj);
1155 * Determine whether modifications can proceed. The caller specifies the minimum
1156 * freeze level for which modifications should not be allowed. This allows
1157 * certain operations to proceed while the freeze sequence is in progress, if
1162 struct xfs_mount *mp,
1165 ASSERT(level > SB_UNFROZEN);
1166 if ((mp->m_super->s_writers.frozen >= level) ||
1167 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1176 * Sync the superblock counters to disk.
1178 * Note this code can be called during the process of freezing, so we use the
1179 * transaction allocator that does not block when the transaction subsystem is
1180 * in its frozen state.
1183 xfs_log_sbcount(xfs_mount_t *mp)
1185 if (!xfs_log_writable(mp))
1189 * we don't need to do this if we are updating the superblock
1190 * counters on every modification.
1192 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1195 return xfs_sync_sb(mp, true);
1199 * Deltas for the block count can vary from 1 to very large, but lock contention
1200 * only occurs on frequent small block count updates such as in the delayed
1201 * allocation path for buffered writes (page a time updates). Hence we set
1202 * a large batch count (1024) to minimise global counter updates except when
1203 * we get near to ENOSPC and we have to be very accurate with our updates.
1205 #define XFS_FDBLOCKS_BATCH 1024
1208 struct xfs_mount *mp,
1218 * If the reserve pool is depleted, put blocks back into it
1219 * first. Most of the time the pool is full.
1221 if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1222 percpu_counter_add(&mp->m_fdblocks, delta);
1226 spin_lock(&mp->m_sb_lock);
1227 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1229 if (res_used > delta) {
1230 mp->m_resblks_avail += delta;
1233 mp->m_resblks_avail = mp->m_resblks;
1234 percpu_counter_add(&mp->m_fdblocks, delta);
1236 spin_unlock(&mp->m_sb_lock);
1241 * Taking blocks away, need to be more accurate the closer we
1244 * If the counter has a value of less than 2 * max batch size,
1245 * then make everything serialise as we are real close to
1248 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1249 XFS_FDBLOCKS_BATCH) < 0)
1252 batch = XFS_FDBLOCKS_BATCH;
1254 percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
1255 if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
1256 XFS_FDBLOCKS_BATCH) >= 0) {
1262 * lock up the sb for dipping into reserves before releasing the space
1263 * that took us to ENOSPC.
1265 spin_lock(&mp->m_sb_lock);
1266 percpu_counter_add(&mp->m_fdblocks, -delta);
1268 goto fdblocks_enospc;
1270 lcounter = (long long)mp->m_resblks_avail + delta;
1271 if (lcounter >= 0) {
1272 mp->m_resblks_avail = lcounter;
1273 spin_unlock(&mp->m_sb_lock);
1277 "Reserve blocks depleted! Consider increasing reserve pool size.");
1280 spin_unlock(&mp->m_sb_lock);
1286 struct xfs_mount *mp,
1292 spin_lock(&mp->m_sb_lock);
1293 lcounter = mp->m_sb.sb_frextents + delta;
1297 mp->m_sb.sb_frextents = lcounter;
1298 spin_unlock(&mp->m_sb_lock);
1303 * Used to free the superblock along various error paths.
1307 struct xfs_mount *mp)
1309 struct xfs_buf *bp = mp->m_sb_bp;
1317 * If the underlying (data/log/rt) device is readonly, there are some
1318 * operations that cannot proceed.
1321 xfs_dev_is_read_only(
1322 struct xfs_mount *mp,
1325 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1326 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1327 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1328 xfs_notice(mp, "%s required on read-only device.", message);
1329 xfs_notice(mp, "write access unavailable, cannot proceed.");
1335 /* Force the summary counters to be recalculated at next mount. */
1337 xfs_force_summary_recalc(
1338 struct xfs_mount *mp)
1340 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1343 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
1347 * Update the in-core delayed block counter.
1349 * We prefer to update the counter without having to take a spinlock for every
1350 * counter update (i.e. batching). Each change to delayed allocation
1351 * reservations can change can easily exceed the default percpu counter
1352 * batching, so we use a larger batch factor here.
1354 * Note that we don't currently have any callers requiring fast summation
1355 * (e.g. percpu_counter_read) so we can use a big batch value here.
1357 #define XFS_DELALLOC_BATCH (4096)
1360 struct xfs_mount *mp,
1363 percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
1364 XFS_DELALLOC_BATCH);