1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "scrub/stats.h"
48 #include <linux/magic.h>
49 #include <linux/fs_context.h>
50 #include <linux/fs_parser.h>
52 static const struct super_operations xfs_super_operations;
54 static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */
55 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
57 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
67 xfs_mount_set_dax_mode(
69 enum xfs_dax_mode mode)
73 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
76 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
77 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
80 mp->m_features |= XFS_FEAT_DAX_NEVER;
81 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
86 static const struct constant_table dax_param_enums[] = {
87 {"inode", XFS_DAX_INODE },
88 {"always", XFS_DAX_ALWAYS },
89 {"never", XFS_DAX_NEVER },
94 * Table driven mount option parser.
97 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
98 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
99 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
100 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
101 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
102 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
103 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
104 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
105 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
108 static const struct fs_parameter_spec xfs_fs_parameters[] = {
109 fsparam_u32("logbufs", Opt_logbufs),
110 fsparam_string("logbsize", Opt_logbsize),
111 fsparam_string("logdev", Opt_logdev),
112 fsparam_string("rtdev", Opt_rtdev),
113 fsparam_flag("wsync", Opt_wsync),
114 fsparam_flag("noalign", Opt_noalign),
115 fsparam_flag("swalloc", Opt_swalloc),
116 fsparam_u32("sunit", Opt_sunit),
117 fsparam_u32("swidth", Opt_swidth),
118 fsparam_flag("nouuid", Opt_nouuid),
119 fsparam_flag("grpid", Opt_grpid),
120 fsparam_flag("nogrpid", Opt_nogrpid),
121 fsparam_flag("bsdgroups", Opt_bsdgroups),
122 fsparam_flag("sysvgroups", Opt_sysvgroups),
123 fsparam_string("allocsize", Opt_allocsize),
124 fsparam_flag("norecovery", Opt_norecovery),
125 fsparam_flag("inode64", Opt_inode64),
126 fsparam_flag("inode32", Opt_inode32),
127 fsparam_flag("ikeep", Opt_ikeep),
128 fsparam_flag("noikeep", Opt_noikeep),
129 fsparam_flag("largeio", Opt_largeio),
130 fsparam_flag("nolargeio", Opt_nolargeio),
131 fsparam_flag("attr2", Opt_attr2),
132 fsparam_flag("noattr2", Opt_noattr2),
133 fsparam_flag("filestreams", Opt_filestreams),
134 fsparam_flag("quota", Opt_quota),
135 fsparam_flag("noquota", Opt_noquota),
136 fsparam_flag("usrquota", Opt_usrquota),
137 fsparam_flag("grpquota", Opt_grpquota),
138 fsparam_flag("prjquota", Opt_prjquota),
139 fsparam_flag("uquota", Opt_uquota),
140 fsparam_flag("gquota", Opt_gquota),
141 fsparam_flag("pquota", Opt_pquota),
142 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
143 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
144 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
145 fsparam_flag("qnoenforce", Opt_qnoenforce),
146 fsparam_flag("discard", Opt_discard),
147 fsparam_flag("nodiscard", Opt_nodiscard),
148 fsparam_flag("dax", Opt_dax),
149 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
153 struct proc_xfs_info {
163 static struct proc_xfs_info xfs_info_set[] = {
164 /* the few simple ones we can get from the mount struct */
165 { XFS_FEAT_IKEEP, ",ikeep" },
166 { XFS_FEAT_WSYNC, ",wsync" },
167 { XFS_FEAT_NOALIGN, ",noalign" },
168 { XFS_FEAT_SWALLOC, ",swalloc" },
169 { XFS_FEAT_NOUUID, ",nouuid" },
170 { XFS_FEAT_NORECOVERY, ",norecovery" },
171 { XFS_FEAT_ATTR2, ",attr2" },
172 { XFS_FEAT_FILESTREAMS, ",filestreams" },
173 { XFS_FEAT_GRPID, ",grpid" },
174 { XFS_FEAT_DISCARD, ",discard" },
175 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
176 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
177 { XFS_FEAT_DAX_NEVER, ",dax=never" },
180 struct xfs_mount *mp = XFS_M(root->d_sb);
181 struct proc_xfs_info *xfs_infop;
183 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
184 if (mp->m_features & xfs_infop->flag)
185 seq_puts(m, xfs_infop->str);
188 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
190 if (xfs_has_allocsize(mp))
191 seq_printf(m, ",allocsize=%dk",
192 (1 << mp->m_allocsize_log) >> 10);
194 if (mp->m_logbufs > 0)
195 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
196 if (mp->m_logbsize > 0)
197 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
200 seq_show_option(m, "logdev", mp->m_logname);
202 seq_show_option(m, "rtdev", mp->m_rtname);
204 if (mp->m_dalign > 0)
205 seq_printf(m, ",sunit=%d",
206 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
207 if (mp->m_swidth > 0)
208 seq_printf(m, ",swidth=%d",
209 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
211 if (mp->m_qflags & XFS_UQUOTA_ENFD)
212 seq_puts(m, ",usrquota");
213 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
214 seq_puts(m, ",uqnoenforce");
216 if (mp->m_qflags & XFS_PQUOTA_ENFD)
217 seq_puts(m, ",prjquota");
218 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
219 seq_puts(m, ",pqnoenforce");
221 if (mp->m_qflags & XFS_GQUOTA_ENFD)
222 seq_puts(m, ",grpquota");
223 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
224 seq_puts(m, ",gqnoenforce");
226 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
227 seq_puts(m, ",noquota");
233 xfs_set_inode_alloc_perag(
234 struct xfs_perag *pag,
236 xfs_agnumber_t max_metadata)
238 if (!xfs_is_inode32(pag->pag_mount)) {
239 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
240 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
244 if (ino > XFS_MAXINUMBER_32) {
245 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
246 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
250 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
251 if (pag->pag_agno < max_metadata)
252 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
254 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
259 * Set parameters for inode allocation heuristics, taking into account
260 * filesystem size and inode32/inode64 mount options; i.e. specifically
261 * whether or not XFS_FEAT_SMALL_INUMS is set.
263 * Inode allocation patterns are altered only if inode32 is requested
264 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
265 * If altered, XFS_OPSTATE_INODE32 is set as well.
267 * An agcount independent of that in the mount structure is provided
268 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
269 * to the potentially higher ag count.
271 * Returns the maximum AG index which may contain inodes.
275 struct xfs_mount *mp,
276 xfs_agnumber_t agcount)
278 xfs_agnumber_t index;
279 xfs_agnumber_t maxagi = 0;
280 xfs_sb_t *sbp = &mp->m_sb;
281 xfs_agnumber_t max_metadata;
286 * Calculate how much should be reserved for inodes to meet
287 * the max inode percentage. Used only for inode32.
289 if (M_IGEO(mp)->maxicount) {
292 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
294 icount += sbp->sb_agblocks - 1;
295 do_div(icount, sbp->sb_agblocks);
296 max_metadata = icount;
298 max_metadata = agcount;
301 /* Get the last possible inode in the filesystem */
302 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
303 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
306 * If user asked for no more than 32-bit inodes, and the fs is
307 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
308 * the allocator to accommodate the request.
310 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
311 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
313 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
315 for (index = 0; index < agcount; index++) {
316 struct xfs_perag *pag;
318 ino = XFS_AGINO_TO_INO(mp, index, agino);
320 pag = xfs_perag_get(mp, index);
321 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
326 return xfs_is_inode32(mp) ? maxagi : agcount;
330 xfs_setup_dax_always(
331 struct xfs_mount *mp)
333 if (!mp->m_ddev_targp->bt_daxdev &&
334 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
336 "DAX unsupported by block device. Turning off DAX.");
340 if (mp->m_super->s_blocksize != PAGE_SIZE) {
342 "DAX not supported for blocksize. Turning off DAX.");
346 if (xfs_has_reflink(mp) &&
347 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
349 "DAX and reflink cannot work with multi-partitions!");
356 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
364 struct bdev_handle **handlep)
368 *handlep = bdev_open_by_path(name,
369 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
370 mp->m_super, &fs_holder_ops);
371 if (IS_ERR(*handlep)) {
372 error = PTR_ERR(*handlep);
374 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
381 xfs_shutdown_devices(
382 struct xfs_mount *mp)
385 * Udev is triggered whenever anyone closes a block device or unmounts
386 * a file systemm on a block device.
387 * The default udev rules invoke blkid to read the fs super and create
388 * symlinks to the bdev under /dev/disk. For this, it uses buffered
389 * reads through the page cache.
391 * xfs_db also uses buffered reads to examine metadata. There is no
392 * coordination between xfs_db and udev, which means that they can run
393 * concurrently. Note there is no coordination between the kernel and
396 * On a system with 64k pages, the page cache can cache the superblock
397 * and the root inode (and hence the root directory) with the same 64k
398 * page. If udev spawns blkid after the mkfs and the system is busy
399 * enough that it is still running when xfs_db starts up, they'll both
400 * read from the same page in the pagecache.
402 * The unmount writes updated inode metadata to disk directly. The XFS
403 * buffer cache does not use the bdev pagecache, so it needs to
404 * invalidate that pagecache on unmount. If the above scenario occurs,
405 * the pagecache no longer reflects what's on disk, xfs_db reads the
406 * stale metadata, and fails to find /a. Most of the time this succeeds
407 * because closing a bdev invalidates the page cache, but when processes
408 * race, everyone loses.
410 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
411 blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
412 invalidate_bdev(mp->m_logdev_targp->bt_bdev);
414 if (mp->m_rtdev_targp) {
415 blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
416 invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
418 blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
419 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
423 * The file system configurations are:
424 * (1) device (partition) with data and internal log
425 * (2) logical volume with data and log subvolumes.
426 * (3) logical volume with data, log, and realtime subvolumes.
428 * We only have to handle opening the log and realtime volumes here if
429 * they are present. The data subvolume has already been opened by
430 * get_sb_bdev() and is stored in sb->s_bdev.
434 struct xfs_mount *mp)
436 struct super_block *sb = mp->m_super;
437 struct block_device *ddev = sb->s_bdev;
438 struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL;
442 * Open real time and log devices - order is important.
445 error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
451 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
453 goto out_close_logdev;
455 if (rtdev_handle->bdev == ddev ||
457 rtdev_handle->bdev == logdev_handle->bdev)) {
459 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
461 goto out_close_rtdev;
466 * Setup xfs_mount buffer target pointers
469 mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
470 if (!mp->m_ddev_targp)
471 goto out_close_rtdev;
474 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
475 if (!mp->m_rtdev_targp)
476 goto out_free_ddev_targ;
479 if (logdev_handle && logdev_handle->bdev != ddev) {
480 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
481 if (!mp->m_logdev_targp)
482 goto out_free_rtdev_targ;
484 mp->m_logdev_targp = mp->m_ddev_targp;
485 /* Handle won't be used, drop it */
487 bdev_release(logdev_handle);
493 if (mp->m_rtdev_targp)
494 xfs_free_buftarg(mp->m_rtdev_targp);
496 xfs_free_buftarg(mp->m_ddev_targp);
499 bdev_release(rtdev_handle);
502 bdev_release(logdev_handle);
507 * Setup xfs_mount buffer target pointers based on superblock
511 struct xfs_mount *mp)
515 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
519 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
520 unsigned int log_sector_size = BBSIZE;
522 if (xfs_has_sector(mp))
523 log_sector_size = mp->m_sb.sb_logsectsize;
524 error = xfs_setsize_buftarg(mp->m_logdev_targp,
529 if (mp->m_rtdev_targp) {
530 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
531 mp->m_sb.sb_sectsize);
540 xfs_init_mount_workqueues(
541 struct xfs_mount *mp)
543 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
544 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
545 1, mp->m_super->s_id);
546 if (!mp->m_buf_workqueue)
549 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
550 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
551 0, mp->m_super->s_id);
552 if (!mp->m_unwritten_workqueue)
553 goto out_destroy_buf;
555 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
556 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
557 0, mp->m_super->s_id);
558 if (!mp->m_reclaim_workqueue)
559 goto out_destroy_unwritten;
561 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
562 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
563 0, mp->m_super->s_id);
564 if (!mp->m_blockgc_wq)
565 goto out_destroy_reclaim;
567 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
568 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
569 1, mp->m_super->s_id);
570 if (!mp->m_inodegc_wq)
571 goto out_destroy_blockgc;
573 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
574 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
575 if (!mp->m_sync_workqueue)
576 goto out_destroy_inodegc;
581 destroy_workqueue(mp->m_inodegc_wq);
583 destroy_workqueue(mp->m_blockgc_wq);
585 destroy_workqueue(mp->m_reclaim_workqueue);
586 out_destroy_unwritten:
587 destroy_workqueue(mp->m_unwritten_workqueue);
589 destroy_workqueue(mp->m_buf_workqueue);
595 xfs_destroy_mount_workqueues(
596 struct xfs_mount *mp)
598 destroy_workqueue(mp->m_sync_workqueue);
599 destroy_workqueue(mp->m_blockgc_wq);
600 destroy_workqueue(mp->m_inodegc_wq);
601 destroy_workqueue(mp->m_reclaim_workqueue);
602 destroy_workqueue(mp->m_unwritten_workqueue);
603 destroy_workqueue(mp->m_buf_workqueue);
607 xfs_flush_inodes_worker(
608 struct work_struct *work)
610 struct xfs_mount *mp = container_of(work, struct xfs_mount,
611 m_flush_inodes_work);
612 struct super_block *sb = mp->m_super;
614 if (down_read_trylock(&sb->s_umount)) {
616 up_read(&sb->s_umount);
621 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
622 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
623 * for IO to complete so that we effectively throttle multiple callers to the
624 * rate at which IO is completing.
628 struct xfs_mount *mp)
631 * If flush_work() returns true then that means we waited for a flush
632 * which was already in progress. Don't bother running another scan.
634 if (flush_work(&mp->m_flush_inodes_work))
637 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
638 flush_work(&mp->m_flush_inodes_work);
641 /* Catch misguided souls that try to use this interface on XFS */
642 STATIC struct inode *
644 struct super_block *sb)
651 * Now that the generic code is guaranteed not to be accessing
652 * the linux inode, we can inactivate and reclaim the inode.
655 xfs_fs_destroy_inode(
658 struct xfs_inode *ip = XFS_I(inode);
660 trace_xfs_destroy_inode(ip);
662 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
663 XFS_STATS_INC(ip->i_mount, vn_rele);
664 XFS_STATS_INC(ip->i_mount, vn_remove);
665 xfs_inode_mark_reclaimable(ip);
673 struct xfs_inode *ip = XFS_I(inode);
674 struct xfs_mount *mp = ip->i_mount;
675 struct xfs_trans *tp;
677 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
681 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
682 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
683 * in flags possibly together with I_DIRTY_SYNC.
685 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
688 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
690 xfs_ilock(ip, XFS_ILOCK_EXCL);
691 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
692 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
693 xfs_trans_commit(tp);
697 * Slab object creation initialisation for the XFS inode.
698 * This covers only the idempotent fields in the XFS inode;
699 * all other fields need to be initialised on allocation
700 * from the slab. This avoids the need to repeatedly initialise
701 * fields in the xfs inode that left in the initialise state
702 * when freeing the inode.
705 xfs_fs_inode_init_once(
708 struct xfs_inode *ip = inode;
710 memset(ip, 0, sizeof(struct xfs_inode));
713 inode_init_once(VFS_I(ip));
716 atomic_set(&ip->i_pincount, 0);
717 spin_lock_init(&ip->i_flags_lock);
719 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
720 "xfsino", ip->i_ino);
724 * We do an unlocked check for XFS_IDONTCACHE here because we are already
725 * serialised against cache hits here via the inode->i_lock and igrab() in
726 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
727 * racing with us, and it avoids needing to grab a spinlock here for every inode
728 * we drop the final reference on.
734 struct xfs_inode *ip = XFS_I(inode);
737 * If this unlinked inode is in the middle of recovery, don't
738 * drop the inode just yet; log recovery will take care of
739 * that. See the comment for this inode flag.
741 if (ip->i_flags & XFS_IRECOVERY) {
742 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
746 return generic_drop_inode(inode);
751 struct xfs_mount *mp)
753 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
754 xfs_free_buftarg(mp->m_logdev_targp);
755 if (mp->m_rtdev_targp)
756 xfs_free_buftarg(mp->m_rtdev_targp);
757 if (mp->m_ddev_targp)
758 xfs_free_buftarg(mp->m_ddev_targp);
760 debugfs_remove(mp->m_debugfs);
762 kfree(mp->m_logname);
768 struct super_block *sb,
771 struct xfs_mount *mp = XFS_M(sb);
774 trace_xfs_fs_sync_fs(mp, __return_address);
777 * Doing anything during the async pass would be counterproductive.
782 error = xfs_log_force(mp, XFS_LOG_SYNC);
788 * The disk must be active because we're syncing.
789 * We schedule log work now (now that the disk is
790 * active) instead of later (when it might not be).
792 flush_delayed_work(&mp->m_log->l_work);
796 * If we are called with page faults frozen out, it means we are about
797 * to freeze the transaction subsystem. Take the opportunity to shut
798 * down inodegc because once SB_FREEZE_FS is set it's too late to
799 * prevent inactivation races with freeze. The fs doesn't get called
800 * again by the freezing process until after SB_FREEZE_FS has been set,
801 * so it's now or never. Same logic applies to speculative allocation
802 * garbage collection.
804 * We don't care if this is a normal syncfs call that does this or
805 * freeze that does this - we can run this multiple times without issue
806 * and we won't race with a restart because a restart can only occur
807 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
809 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
810 xfs_inodegc_stop(mp);
811 xfs_blockgc_stop(mp);
819 struct dentry *dentry,
820 struct kstatfs *statp)
822 struct xfs_mount *mp = XFS_M(dentry->d_sb);
823 xfs_sb_t *sbp = &mp->m_sb;
824 struct xfs_inode *ip = XFS_I(d_inode(dentry));
825 uint64_t fakeinos, id;
833 * Expedite background inodegc but don't wait. We do not want to block
834 * here waiting hours for a billion extent file to be truncated.
836 xfs_inodegc_push(mp);
838 statp->f_type = XFS_SUPER_MAGIC;
839 statp->f_namelen = MAXNAMELEN - 1;
841 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
842 statp->f_fsid = u64_to_fsid(id);
844 icount = percpu_counter_sum(&mp->m_icount);
845 ifree = percpu_counter_sum(&mp->m_ifree);
846 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
848 spin_lock(&mp->m_sb_lock);
849 statp->f_bsize = sbp->sb_blocksize;
850 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
851 statp->f_blocks = sbp->sb_dblocks - lsize;
852 spin_unlock(&mp->m_sb_lock);
854 /* make sure statp->f_bfree does not underflow */
855 statp->f_bfree = max_t(int64_t, 0,
856 fdblocks - xfs_fdblocks_unavailable(mp));
857 statp->f_bavail = statp->f_bfree;
859 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
860 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
861 if (M_IGEO(mp)->maxicount)
862 statp->f_files = min_t(typeof(statp->f_files),
864 M_IGEO(mp)->maxicount);
866 /* If sb_icount overshot maxicount, report actual allocation */
867 statp->f_files = max_t(typeof(statp->f_files),
871 /* make sure statp->f_ffree does not underflow */
872 ffree = statp->f_files - (icount - ifree);
873 statp->f_ffree = max_t(int64_t, ffree, 0);
876 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
877 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
878 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
879 xfs_qm_statvfs(ip, statp);
881 if (XFS_IS_REALTIME_MOUNT(mp) &&
882 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
885 statp->f_blocks = sbp->sb_rblocks;
886 freertx = percpu_counter_sum_positive(&mp->m_frextents);
887 statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
894 xfs_save_resvblks(struct xfs_mount *mp)
896 mp->m_resblks_save = mp->m_resblks;
897 xfs_reserve_blocks(mp, 0);
901 xfs_restore_resvblks(struct xfs_mount *mp)
905 if (mp->m_resblks_save) {
906 resblks = mp->m_resblks_save;
907 mp->m_resblks_save = 0;
909 resblks = xfs_default_resblks(mp);
911 xfs_reserve_blocks(mp, resblks);
915 * Second stage of a freeze. The data is already frozen so we only
916 * need to take care of the metadata. Once that's done sync the superblock
917 * to the log to dirty it in case of a crash while frozen. This ensures that we
918 * will recover the unlinked inode lists on the next mount.
922 struct super_block *sb)
924 struct xfs_mount *mp = XFS_M(sb);
929 * The filesystem is now frozen far enough that memory reclaim
930 * cannot safely operate on the filesystem. Hence we need to
931 * set a GFP_NOFS context here to avoid recursion deadlocks.
933 flags = memalloc_nofs_save();
934 xfs_save_resvblks(mp);
935 ret = xfs_log_quiesce(mp);
936 memalloc_nofs_restore(flags);
939 * For read-write filesystems, we need to restart the inodegc on error
940 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
941 * going to be run to restart it now. We are at SB_FREEZE_FS level
942 * here, so we can restart safely without racing with a stop in
945 if (ret && !xfs_is_readonly(mp)) {
946 xfs_blockgc_start(mp);
947 xfs_inodegc_start(mp);
955 struct super_block *sb)
957 struct xfs_mount *mp = XFS_M(sb);
959 xfs_restore_resvblks(mp);
960 xfs_log_work_queue(mp);
963 * Don't reactivate the inodegc worker on a readonly filesystem because
964 * inodes are sent directly to reclaim. Don't reactivate the blockgc
965 * worker because there are no speculative preallocations on a readonly
968 if (!xfs_is_readonly(mp)) {
969 xfs_blockgc_start(mp);
970 xfs_inodegc_start(mp);
977 * This function fills in xfs_mount_t fields based on mount args.
978 * Note: the superblock _has_ now been read in.
982 struct xfs_mount *mp)
984 /* Fail a mount where the logbuf is smaller than the log stripe */
985 if (xfs_has_logv2(mp)) {
986 if (mp->m_logbsize <= 0 &&
987 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
988 mp->m_logbsize = mp->m_sb.sb_logsunit;
989 } else if (mp->m_logbsize > 0 &&
990 mp->m_logbsize < mp->m_sb.sb_logsunit) {
992 "logbuf size must be greater than or equal to log stripe size");
996 /* Fail a mount if the logbuf is larger than 32K */
997 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
999 "logbuf size for version 1 logs must be 16K or 32K");
1005 * V5 filesystems always use attr2 format for attributes.
1007 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1008 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1009 "attr2 is always enabled for V5 filesystems.");
1014 * prohibit r/w mounts of read-only filesystems
1016 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1018 "cannot mount a read-only filesystem as read-write");
1022 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1023 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1024 !xfs_has_pquotino(mp)) {
1026 "Super block does not support project and group quota together");
1034 xfs_init_percpu_counters(
1035 struct xfs_mount *mp)
1039 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1043 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1047 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1051 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1055 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1062 percpu_counter_destroy(&mp->m_delalloc_blks);
1064 percpu_counter_destroy(&mp->m_fdblocks);
1066 percpu_counter_destroy(&mp->m_ifree);
1068 percpu_counter_destroy(&mp->m_icount);
1073 xfs_reinit_percpu_counters(
1074 struct xfs_mount *mp)
1076 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1077 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1078 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1079 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1083 xfs_destroy_percpu_counters(
1084 struct xfs_mount *mp)
1086 percpu_counter_destroy(&mp->m_icount);
1087 percpu_counter_destroy(&mp->m_ifree);
1088 percpu_counter_destroy(&mp->m_fdblocks);
1089 ASSERT(xfs_is_shutdown(mp) ||
1090 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1091 percpu_counter_destroy(&mp->m_delalloc_blks);
1092 percpu_counter_destroy(&mp->m_frextents);
1096 xfs_inodegc_init_percpu(
1097 struct xfs_mount *mp)
1099 struct xfs_inodegc *gc;
1102 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1106 for_each_possible_cpu(cpu) {
1107 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1110 init_llist_head(&gc->list);
1113 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1119 xfs_inodegc_free_percpu(
1120 struct xfs_mount *mp)
1124 free_percpu(mp->m_inodegc);
1129 struct super_block *sb)
1131 struct xfs_mount *mp = XFS_M(sb);
1133 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1134 xfs_filestream_unmount(mp);
1138 xchk_mount_stats_free(mp);
1139 free_percpu(mp->m_stats.xs_stats);
1140 xfs_inodegc_free_percpu(mp);
1141 xfs_destroy_percpu_counters(mp);
1142 xfs_destroy_mount_workqueues(mp);
1143 xfs_shutdown_devices(mp);
1147 xfs_fs_nr_cached_objects(
1148 struct super_block *sb,
1149 struct shrink_control *sc)
1151 /* Paranoia: catch incorrect calls during mount setup or teardown */
1152 if (WARN_ON_ONCE(!sb->s_fs_info))
1154 return xfs_reclaim_inodes_count(XFS_M(sb));
1158 xfs_fs_free_cached_objects(
1159 struct super_block *sb,
1160 struct shrink_control *sc)
1162 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1167 struct super_block *sb)
1169 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1172 static const struct super_operations xfs_super_operations = {
1173 .alloc_inode = xfs_fs_alloc_inode,
1174 .destroy_inode = xfs_fs_destroy_inode,
1175 .dirty_inode = xfs_fs_dirty_inode,
1176 .drop_inode = xfs_fs_drop_inode,
1177 .put_super = xfs_fs_put_super,
1178 .sync_fs = xfs_fs_sync_fs,
1179 .freeze_fs = xfs_fs_freeze,
1180 .unfreeze_fs = xfs_fs_unfreeze,
1181 .statfs = xfs_fs_statfs,
1182 .show_options = xfs_fs_show_options,
1183 .nr_cached_objects = xfs_fs_nr_cached_objects,
1184 .free_cached_objects = xfs_fs_free_cached_objects,
1185 .shutdown = xfs_fs_shutdown,
1194 int last, shift_left_factor = 0, _res;
1198 value = kstrdup(s, GFP_KERNEL);
1202 last = strlen(value) - 1;
1203 if (value[last] == 'K' || value[last] == 'k') {
1204 shift_left_factor = 10;
1207 if (value[last] == 'M' || value[last] == 'm') {
1208 shift_left_factor = 20;
1211 if (value[last] == 'G' || value[last] == 'g') {
1212 shift_left_factor = 30;
1216 if (kstrtoint(value, base, &_res))
1219 *res = _res << shift_left_factor;
1224 xfs_fs_warn_deprecated(
1225 struct fs_context *fc,
1226 struct fs_parameter *param,
1230 /* Don't print the warning if reconfiguring and current mount point
1231 * already had the flag set
1233 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1234 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1236 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1240 * Set mount state from a mount option.
1242 * NOTE: mp->m_super is NULL here!
1246 struct fs_context *fc,
1247 struct fs_parameter *param)
1249 struct xfs_mount *parsing_mp = fc->s_fs_info;
1250 struct fs_parse_result result;
1254 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1260 parsing_mp->m_logbufs = result.uint_32;
1263 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1267 kfree(parsing_mp->m_logname);
1268 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1269 if (!parsing_mp->m_logname)
1273 kfree(parsing_mp->m_rtname);
1274 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1275 if (!parsing_mp->m_rtname)
1279 if (suffix_kstrtoint(param->string, 10, &size))
1281 parsing_mp->m_allocsize_log = ffs(size) - 1;
1282 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1286 parsing_mp->m_features |= XFS_FEAT_GRPID;
1289 case Opt_sysvgroups:
1290 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1293 parsing_mp->m_features |= XFS_FEAT_WSYNC;
1295 case Opt_norecovery:
1296 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1299 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1302 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1305 parsing_mp->m_dalign = result.uint_32;
1308 parsing_mp->m_swidth = result.uint_32;
1311 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1314 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1317 parsing_mp->m_features |= XFS_FEAT_NOUUID;
1320 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1323 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1325 case Opt_filestreams:
1326 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1329 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1330 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1335 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1337 case Opt_qnoenforce:
1338 case Opt_uqnoenforce:
1339 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1340 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1344 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1346 case Opt_pqnoenforce:
1347 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1348 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1352 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1354 case Opt_gqnoenforce:
1355 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1356 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1359 parsing_mp->m_features |= XFS_FEAT_DISCARD;
1362 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1364 #ifdef CONFIG_FS_DAX
1366 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1369 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1372 /* Following mount options will be removed in September 2025 */
1374 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1375 parsing_mp->m_features |= XFS_FEAT_IKEEP;
1378 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1379 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1382 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1383 parsing_mp->m_features |= XFS_FEAT_ATTR2;
1386 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1387 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1390 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1398 xfs_fs_validate_params(
1399 struct xfs_mount *mp)
1401 /* No recovery flag requires a read-only mount */
1402 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1403 xfs_warn(mp, "no-recovery mounts must be read-only.");
1408 * We have not read the superblock at this point, so only the attr2
1409 * mount option can set the attr2 feature by this stage.
1411 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1412 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1417 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1419 "sunit and swidth options incompatible with the noalign option");
1423 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1424 xfs_warn(mp, "quota support not available in this kernel.");
1428 if ((mp->m_dalign && !mp->m_swidth) ||
1429 (!mp->m_dalign && mp->m_swidth)) {
1430 xfs_warn(mp, "sunit and swidth must be specified together");
1434 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1436 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1437 mp->m_swidth, mp->m_dalign);
1441 if (mp->m_logbufs != -1 &&
1442 mp->m_logbufs != 0 &&
1443 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1444 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1445 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1446 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1450 if (mp->m_logbsize != -1 &&
1451 mp->m_logbsize != 0 &&
1452 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1453 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1454 !is_power_of_2(mp->m_logbsize))) {
1456 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1461 if (xfs_has_allocsize(mp) &&
1462 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1463 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1464 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1465 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1475 struct dentry *parent)
1477 struct dentry *child;
1479 /* Apparently we're expected to ignore error returns?? */
1480 child = debugfs_create_dir(name, parent);
1489 struct super_block *sb,
1490 struct fs_context *fc)
1492 struct xfs_mount *mp = sb->s_fs_info;
1494 int flags = 0, error;
1499 * Copy VFS mount flags from the context now that all parameter parsing
1500 * is guaranteed to have been completed by either the old mount API or
1501 * the newer fsopen/fsconfig API.
1503 if (fc->sb_flags & SB_RDONLY)
1504 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1505 if (fc->sb_flags & SB_DIRSYNC)
1506 mp->m_features |= XFS_FEAT_DIRSYNC;
1507 if (fc->sb_flags & SB_SYNCHRONOUS)
1508 mp->m_features |= XFS_FEAT_WSYNC;
1510 error = xfs_fs_validate_params(mp);
1514 sb_min_blocksize(sb, BBSIZE);
1515 sb->s_xattr = xfs_xattr_handlers;
1516 sb->s_export_op = &xfs_export_operations;
1517 #ifdef CONFIG_XFS_QUOTA
1518 sb->s_qcop = &xfs_quotactl_operations;
1519 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1521 sb->s_op = &xfs_super_operations;
1524 * Delay mount work if the debug hook is set. This is debug
1525 * instrumention to coordinate simulation of xfs mount failures with
1526 * VFS superblock operations
1528 if (xfs_globals.mount_delay) {
1529 xfs_notice(mp, "Delaying mount for %d seconds.",
1530 xfs_globals.mount_delay);
1531 msleep(xfs_globals.mount_delay * 1000);
1534 if (fc->sb_flags & SB_SILENT)
1535 flags |= XFS_MFSI_QUIET;
1537 error = xfs_open_devices(mp);
1542 mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1545 mp->m_debugfs = NULL;
1548 error = xfs_init_mount_workqueues(mp);
1550 goto out_shutdown_devices;
1552 error = xfs_init_percpu_counters(mp);
1554 goto out_destroy_workqueues;
1556 error = xfs_inodegc_init_percpu(mp);
1558 goto out_destroy_counters;
1560 /* Allocate stats memory before we do operations that might use it */
1561 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1562 if (!mp->m_stats.xs_stats) {
1564 goto out_destroy_inodegc;
1567 error = xchk_mount_stats_alloc(mp);
1569 goto out_free_stats;
1571 error = xfs_readsb(mp, flags);
1573 goto out_free_scrub_stats;
1575 error = xfs_finish_flags(mp);
1579 error = xfs_setup_devices(mp);
1583 /* V4 support is undergoing deprecation. */
1584 if (!xfs_has_crc(mp)) {
1585 #ifdef CONFIG_XFS_SUPPORT_V4
1587 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1590 "Deprecated V4 format (crc=0) not supported by kernel.");
1596 /* ASCII case insensitivity is undergoing deprecation. */
1597 if (xfs_has_asciici(mp)) {
1598 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1600 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1603 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1609 /* Filesystem claims it needs repair, so refuse the mount. */
1610 if (xfs_has_needsrepair(mp)) {
1611 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1612 error = -EFSCORRUPTED;
1617 * Don't touch the filesystem if a user tool thinks it owns the primary
1618 * superblock. mkfs doesn't clear the flag from secondary supers, so
1619 * we don't check them at all.
1621 if (mp->m_sb.sb_inprogress) {
1622 xfs_warn(mp, "Offline file system operation in progress!");
1623 error = -EFSCORRUPTED;
1628 * Until this is fixed only page-sized or smaller data blocks work.
1630 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1632 "File system with blocksize %d bytes. "
1633 "Only pagesize (%ld) or less will currently work.",
1634 mp->m_sb.sb_blocksize, PAGE_SIZE);
1639 /* Ensure this filesystem fits in the page cache limits */
1640 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1641 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1643 "file system too large to be mounted on this system.");
1649 * XFS block mappings use 54 bits to store the logical block offset.
1650 * This should suffice to handle the maximum file size that the VFS
1651 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1652 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1653 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1654 * to check this assertion.
1656 * Avoid integer overflow by comparing the maximum bmbt offset to the
1657 * maximum pagecache offset in units of fs blocks.
1659 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1661 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1662 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1668 error = xfs_filestream_mount(mp);
1673 * we must configure the block size in the superblock before we run the
1674 * full mount process as the mount process can lookup and cache inodes.
1676 sb->s_magic = XFS_SUPER_MAGIC;
1677 sb->s_blocksize = mp->m_sb.sb_blocksize;
1678 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1679 sb->s_maxbytes = MAX_LFS_FILESIZE;
1680 sb->s_max_links = XFS_MAXLINK;
1681 sb->s_time_gran = 1;
1682 if (xfs_has_bigtime(mp)) {
1683 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1684 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1686 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1687 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1689 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1690 sb->s_iflags |= SB_I_CGROUPWB;
1692 set_posix_acl_flag(sb);
1694 /* version 5 superblocks support inode version counters. */
1695 if (xfs_has_crc(mp))
1696 sb->s_flags |= SB_I_VERSION;
1698 if (xfs_has_dax_always(mp)) {
1699 error = xfs_setup_dax_always(mp);
1701 goto out_filestream_unmount;
1704 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1706 "mounting with \"discard\" option, but the device does not support discard");
1707 mp->m_features &= ~XFS_FEAT_DISCARD;
1710 if (xfs_has_reflink(mp)) {
1711 if (mp->m_sb.sb_rblocks) {
1713 "reflink not compatible with realtime device!");
1715 goto out_filestream_unmount;
1718 if (xfs_globals.always_cow) {
1719 xfs_info(mp, "using DEBUG-only always_cow mode.");
1720 mp->m_always_cow = true;
1724 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1726 "reverse mapping btree not compatible with realtime device!");
1728 goto out_filestream_unmount;
1731 error = xfs_mountfs(mp);
1733 goto out_filestream_unmount;
1735 root = igrab(VFS_I(mp->m_rootip));
1740 sb->s_root = d_make_root(root);
1748 out_filestream_unmount:
1749 xfs_filestream_unmount(mp);
1752 out_free_scrub_stats:
1753 xchk_mount_stats_free(mp);
1755 free_percpu(mp->m_stats.xs_stats);
1756 out_destroy_inodegc:
1757 xfs_inodegc_free_percpu(mp);
1758 out_destroy_counters:
1759 xfs_destroy_percpu_counters(mp);
1760 out_destroy_workqueues:
1761 xfs_destroy_mount_workqueues(mp);
1762 out_shutdown_devices:
1763 xfs_shutdown_devices(mp);
1767 xfs_filestream_unmount(mp);
1774 struct fs_context *fc)
1776 return get_tree_bdev(fc, xfs_fs_fill_super);
1781 struct xfs_mount *mp)
1783 struct xfs_sb *sbp = &mp->m_sb;
1786 if (xfs_has_norecovery(mp)) {
1788 "ro->rw transition prohibited on norecovery mount");
1792 if (xfs_sb_is_v5(sbp) &&
1793 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1795 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1796 (sbp->sb_features_ro_compat &
1797 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1801 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1804 * If this is the first remount to writeable state we might have some
1805 * superblock changes to update.
1807 if (mp->m_update_sb) {
1808 error = xfs_sync_sb(mp, false);
1810 xfs_warn(mp, "failed to write sb changes");
1813 mp->m_update_sb = false;
1817 * Fill out the reserve pool if it is empty. Use the stashed value if
1818 * it is non-zero, otherwise go with the default.
1820 xfs_restore_resvblks(mp);
1821 xfs_log_work_queue(mp);
1822 xfs_blockgc_start(mp);
1824 /* Create the per-AG metadata reservation pool .*/
1825 error = xfs_fs_reserve_ag_blocks(mp);
1826 if (error && error != -ENOSPC)
1829 /* Re-enable the background inode inactivation worker. */
1830 xfs_inodegc_start(mp);
1837 struct xfs_mount *mp)
1839 struct xfs_icwalk icw = {
1840 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1844 /* Flush all the dirty data to disk. */
1845 error = sync_filesystem(mp->m_super);
1850 * Cancel background eofb scanning so it cannot race with the final
1851 * log force+buftarg wait and deadlock the remount.
1853 xfs_blockgc_stop(mp);
1856 * Clear out all remaining COW staging extents and speculative post-EOF
1857 * preallocations so that we don't leave inodes requiring inactivation
1858 * cleanups during reclaim on a read-only mount. We must process every
1859 * cached inode, so this requires a synchronous cache scan.
1861 error = xfs_blockgc_free_space(mp, &icw);
1863 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1868 * Stop the inodegc background worker. xfs_fs_reconfigure already
1869 * flushed all pending inodegc work when it sync'd the filesystem.
1870 * The VFS holds s_umount, so we know that inodes cannot enter
1871 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1872 * we send inodes straight to reclaim, so no inodes will be queued.
1874 xfs_inodegc_stop(mp);
1876 /* Free the per-AG metadata reservation pool. */
1877 error = xfs_fs_unreserve_ag_blocks(mp);
1879 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1884 * Before we sync the metadata, we need to free up the reserve block
1885 * pool so that the used block count in the superblock on disk is
1886 * correct at the end of the remount. Stash the current* reserve pool
1887 * size so that if we get remounted rw, we can return it to the same
1890 xfs_save_resvblks(mp);
1893 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1899 * Logically we would return an error here to prevent users from believing
1900 * they might have changed mount options using remount which can't be changed.
1902 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1903 * arguments in some cases so we can't blindly reject options, but have to
1904 * check for each specified option if it actually differs from the currently
1905 * set option and only reject it if that's the case.
1907 * Until that is implemented we return success for every remount request, and
1908 * silently ignore all options that we can't actually change.
1912 struct fs_context *fc)
1914 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1915 struct xfs_mount *new_mp = fc->s_fs_info;
1916 int flags = fc->sb_flags;
1919 /* version 5 superblocks always support version counters. */
1920 if (xfs_has_crc(mp))
1921 fc->sb_flags |= SB_I_VERSION;
1923 error = xfs_fs_validate_params(new_mp);
1927 /* inode32 -> inode64 */
1928 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1929 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1930 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1933 /* inode64 -> inode32 */
1934 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1935 mp->m_features |= XFS_FEAT_SMALL_INUMS;
1936 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1940 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1941 error = xfs_remount_rw(mp);
1947 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1948 error = xfs_remount_ro(mp);
1958 struct fs_context *fc)
1960 struct xfs_mount *mp = fc->s_fs_info;
1963 * mp is stored in the fs_context when it is initialized.
1964 * mp is transferred to the superblock on a successful mount,
1965 * but if an error occurs before the transfer we have to free
1972 static const struct fs_context_operations xfs_context_ops = {
1973 .parse_param = xfs_fs_parse_param,
1974 .get_tree = xfs_fs_get_tree,
1975 .reconfigure = xfs_fs_reconfigure,
1976 .free = xfs_fs_free,
1980 * WARNING: do not initialise any parameters in this function that depend on
1981 * mount option parsing having already been performed as this can be called from
1982 * fsopen() before any parameters have been set.
1984 static int xfs_init_fs_context(
1985 struct fs_context *fc)
1987 struct xfs_mount *mp;
1989 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1993 spin_lock_init(&mp->m_sb_lock);
1994 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1995 spin_lock_init(&mp->m_perag_lock);
1996 mutex_init(&mp->m_growlock);
1997 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1998 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1999 mp->m_kobj.kobject.kset = xfs_kset;
2001 * We don't create the finobt per-ag space reservation until after log
2002 * recovery, so we must set this to true so that an ifree transaction
2003 * started during log recovery will not depend on space reservations
2004 * for finobt expansion.
2006 mp->m_finobt_nores = true;
2009 * These can be overridden by the mount option parsing.
2012 mp->m_logbsize = -1;
2013 mp->m_allocsize_log = 16; /* 64k */
2016 fc->ops = &xfs_context_ops;
2023 struct super_block *sb)
2025 kill_block_super(sb);
2026 xfs_mount_free(XFS_M(sb));
2029 static struct file_system_type xfs_fs_type = {
2030 .owner = THIS_MODULE,
2032 .init_fs_context = xfs_init_fs_context,
2033 .parameters = xfs_fs_parameters,
2034 .kill_sb = xfs_kill_sb,
2035 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2037 MODULE_ALIAS_FS("xfs");
2040 xfs_init_caches(void)
2044 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2045 SLAB_HWCACHE_ALIGN |
2046 SLAB_RECLAIM_ACCOUNT |
2052 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2053 sizeof(struct xlog_ticket),
2055 if (!xfs_log_ticket_cache)
2056 goto out_destroy_buf_cache;
2058 error = xfs_btree_init_cur_caches();
2060 goto out_destroy_log_ticket_cache;
2062 error = xfs_defer_init_item_caches();
2064 goto out_destroy_btree_cur_cache;
2066 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2067 sizeof(struct xfs_da_state),
2069 if (!xfs_da_state_cache)
2070 goto out_destroy_defer_item_cache;
2072 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2073 sizeof(struct xfs_ifork),
2075 if (!xfs_ifork_cache)
2076 goto out_destroy_da_state_cache;
2078 xfs_trans_cache = kmem_cache_create("xfs_trans",
2079 sizeof(struct xfs_trans),
2081 if (!xfs_trans_cache)
2082 goto out_destroy_ifork_cache;
2086 * The size of the cache-allocated buf log item is the maximum
2087 * size possible under XFS. This wastes a little bit of memory,
2088 * but it is much faster.
2090 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2091 sizeof(struct xfs_buf_log_item),
2093 if (!xfs_buf_item_cache)
2094 goto out_destroy_trans_cache;
2096 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2097 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2100 goto out_destroy_buf_item_cache;
2102 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2103 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2106 goto out_destroy_efd_cache;
2108 xfs_inode_cache = kmem_cache_create("xfs_inode",
2109 sizeof(struct xfs_inode), 0,
2110 (SLAB_HWCACHE_ALIGN |
2111 SLAB_RECLAIM_ACCOUNT |
2112 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2113 xfs_fs_inode_init_once);
2114 if (!xfs_inode_cache)
2115 goto out_destroy_efi_cache;
2117 xfs_ili_cache = kmem_cache_create("xfs_ili",
2118 sizeof(struct xfs_inode_log_item), 0,
2119 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2122 goto out_destroy_inode_cache;
2124 xfs_icreate_cache = kmem_cache_create("xfs_icr",
2125 sizeof(struct xfs_icreate_item),
2127 if (!xfs_icreate_cache)
2128 goto out_destroy_ili_cache;
2130 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2131 sizeof(struct xfs_rud_log_item),
2134 goto out_destroy_icreate_cache;
2136 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2137 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2140 goto out_destroy_rud_cache;
2142 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2143 sizeof(struct xfs_cud_log_item),
2146 goto out_destroy_rui_cache;
2148 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2149 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2152 goto out_destroy_cud_cache;
2154 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2155 sizeof(struct xfs_bud_log_item),
2158 goto out_destroy_cui_cache;
2160 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2161 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2164 goto out_destroy_bud_cache;
2166 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2167 sizeof(struct xfs_attrd_log_item),
2169 if (!xfs_attrd_cache)
2170 goto out_destroy_bui_cache;
2172 xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2173 sizeof(struct xfs_attri_log_item),
2175 if (!xfs_attri_cache)
2176 goto out_destroy_attrd_cache;
2178 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2179 sizeof(struct xfs_iunlink_item),
2181 if (!xfs_iunlink_cache)
2182 goto out_destroy_attri_cache;
2186 out_destroy_attri_cache:
2187 kmem_cache_destroy(xfs_attri_cache);
2188 out_destroy_attrd_cache:
2189 kmem_cache_destroy(xfs_attrd_cache);
2190 out_destroy_bui_cache:
2191 kmem_cache_destroy(xfs_bui_cache);
2192 out_destroy_bud_cache:
2193 kmem_cache_destroy(xfs_bud_cache);
2194 out_destroy_cui_cache:
2195 kmem_cache_destroy(xfs_cui_cache);
2196 out_destroy_cud_cache:
2197 kmem_cache_destroy(xfs_cud_cache);
2198 out_destroy_rui_cache:
2199 kmem_cache_destroy(xfs_rui_cache);
2200 out_destroy_rud_cache:
2201 kmem_cache_destroy(xfs_rud_cache);
2202 out_destroy_icreate_cache:
2203 kmem_cache_destroy(xfs_icreate_cache);
2204 out_destroy_ili_cache:
2205 kmem_cache_destroy(xfs_ili_cache);
2206 out_destroy_inode_cache:
2207 kmem_cache_destroy(xfs_inode_cache);
2208 out_destroy_efi_cache:
2209 kmem_cache_destroy(xfs_efi_cache);
2210 out_destroy_efd_cache:
2211 kmem_cache_destroy(xfs_efd_cache);
2212 out_destroy_buf_item_cache:
2213 kmem_cache_destroy(xfs_buf_item_cache);
2214 out_destroy_trans_cache:
2215 kmem_cache_destroy(xfs_trans_cache);
2216 out_destroy_ifork_cache:
2217 kmem_cache_destroy(xfs_ifork_cache);
2218 out_destroy_da_state_cache:
2219 kmem_cache_destroy(xfs_da_state_cache);
2220 out_destroy_defer_item_cache:
2221 xfs_defer_destroy_item_caches();
2222 out_destroy_btree_cur_cache:
2223 xfs_btree_destroy_cur_caches();
2224 out_destroy_log_ticket_cache:
2225 kmem_cache_destroy(xfs_log_ticket_cache);
2226 out_destroy_buf_cache:
2227 kmem_cache_destroy(xfs_buf_cache);
2233 xfs_destroy_caches(void)
2236 * Make sure all delayed rcu free are flushed before we
2240 kmem_cache_destroy(xfs_iunlink_cache);
2241 kmem_cache_destroy(xfs_attri_cache);
2242 kmem_cache_destroy(xfs_attrd_cache);
2243 kmem_cache_destroy(xfs_bui_cache);
2244 kmem_cache_destroy(xfs_bud_cache);
2245 kmem_cache_destroy(xfs_cui_cache);
2246 kmem_cache_destroy(xfs_cud_cache);
2247 kmem_cache_destroy(xfs_rui_cache);
2248 kmem_cache_destroy(xfs_rud_cache);
2249 kmem_cache_destroy(xfs_icreate_cache);
2250 kmem_cache_destroy(xfs_ili_cache);
2251 kmem_cache_destroy(xfs_inode_cache);
2252 kmem_cache_destroy(xfs_efi_cache);
2253 kmem_cache_destroy(xfs_efd_cache);
2254 kmem_cache_destroy(xfs_buf_item_cache);
2255 kmem_cache_destroy(xfs_trans_cache);
2256 kmem_cache_destroy(xfs_ifork_cache);
2257 kmem_cache_destroy(xfs_da_state_cache);
2258 xfs_defer_destroy_item_caches();
2259 xfs_btree_destroy_cur_caches();
2260 kmem_cache_destroy(xfs_log_ticket_cache);
2261 kmem_cache_destroy(xfs_buf_cache);
2265 xfs_init_workqueues(void)
2268 * The allocation workqueue can be used in memory reclaim situations
2269 * (writepage path), and parallelism is only limited by the number of
2270 * AGs in all the filesystems mounted. Hence use the default large
2271 * max_active value for this workqueue.
2273 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2274 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2278 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2280 if (!xfs_discard_wq)
2281 goto out_free_alloc_wq;
2285 destroy_workqueue(xfs_alloc_wq);
2290 xfs_destroy_workqueues(void)
2292 destroy_workqueue(xfs_discard_wq);
2293 destroy_workqueue(xfs_alloc_wq);
2301 xfs_check_ondisk_structs();
2303 error = xfs_dahash_test();
2307 printk(KERN_INFO XFS_VERSION_STRING " with "
2308 XFS_BUILD_OPTIONS " enabled\n");
2312 error = xfs_init_caches();
2316 error = xfs_init_workqueues();
2318 goto out_destroy_caches;
2320 error = xfs_mru_cache_init();
2322 goto out_destroy_wq;
2324 error = xfs_init_procfs();
2326 goto out_mru_cache_uninit;
2328 error = xfs_sysctl_register();
2330 goto out_cleanup_procfs;
2332 xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2334 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2337 goto out_debugfs_unregister;
2340 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2342 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2343 if (!xfsstats.xs_stats) {
2345 goto out_kset_unregister;
2348 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2351 goto out_free_stats;
2353 error = xchk_global_stats_setup(xfs_debugfs);
2355 goto out_remove_stats_kobj;
2358 xfs_dbg_kobj.kobject.kset = xfs_kset;
2359 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2361 goto out_remove_scrub_stats;
2364 error = xfs_qm_init();
2366 goto out_remove_dbg_kobj;
2368 error = register_filesystem(&xfs_fs_type);
2375 out_remove_dbg_kobj:
2377 xfs_sysfs_del(&xfs_dbg_kobj);
2378 out_remove_scrub_stats:
2380 xchk_global_stats_teardown();
2381 out_remove_stats_kobj:
2382 xfs_sysfs_del(&xfsstats.xs_kobj);
2384 free_percpu(xfsstats.xs_stats);
2385 out_kset_unregister:
2386 kset_unregister(xfs_kset);
2387 out_debugfs_unregister:
2388 debugfs_remove(xfs_debugfs);
2389 xfs_sysctl_unregister();
2391 xfs_cleanup_procfs();
2392 out_mru_cache_uninit:
2393 xfs_mru_cache_uninit();
2395 xfs_destroy_workqueues();
2397 xfs_destroy_caches();
2406 unregister_filesystem(&xfs_fs_type);
2408 xfs_sysfs_del(&xfs_dbg_kobj);
2410 xchk_global_stats_teardown();
2411 xfs_sysfs_del(&xfsstats.xs_kobj);
2412 free_percpu(xfsstats.xs_stats);
2413 kset_unregister(xfs_kset);
2414 debugfs_remove(xfs_debugfs);
2415 xfs_sysctl_unregister();
2416 xfs_cleanup_procfs();
2417 xfs_mru_cache_uninit();
2418 xfs_destroy_workqueues();
2419 xfs_destroy_caches();
2420 xfs_uuid_table_free();
2423 module_init(init_xfs_fs);
2424 module_exit(exit_xfs_fs);
2426 MODULE_AUTHOR("Silicon Graphics, Inc.");
2427 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2428 MODULE_LICENSE("GPL");