1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
41 #include <linux/magic.h>
42 #include <linux/fs_context.h>
43 #include <linux/fs_parser.h>
45 static const struct super_operations xfs_super_operations;
47 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
49 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
52 #ifdef CONFIG_HOTPLUG_CPU
53 static LIST_HEAD(xfs_mount_list);
54 static DEFINE_SPINLOCK(xfs_mount_list_lock);
56 static inline void xfs_mount_list_add(struct xfs_mount *mp)
58 spin_lock(&xfs_mount_list_lock);
59 list_add(&mp->m_mount_list, &xfs_mount_list);
60 spin_unlock(&xfs_mount_list_lock);
63 static inline void xfs_mount_list_del(struct xfs_mount *mp)
65 spin_lock(&xfs_mount_list_lock);
66 list_del(&mp->m_mount_list);
67 spin_unlock(&xfs_mount_list_lock);
69 #else /* !CONFIG_HOTPLUG_CPU */
70 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
71 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
81 xfs_mount_set_dax_mode(
83 enum xfs_dax_mode mode)
87 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
90 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
91 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
94 mp->m_features |= XFS_FEAT_DAX_NEVER;
95 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
100 static const struct constant_table dax_param_enums[] = {
101 {"inode", XFS_DAX_INODE },
102 {"always", XFS_DAX_ALWAYS },
103 {"never", XFS_DAX_NEVER },
108 * Table driven mount option parser.
111 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
112 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
113 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
114 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
115 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
116 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
117 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
118 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
119 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
122 static const struct fs_parameter_spec xfs_fs_parameters[] = {
123 fsparam_u32("logbufs", Opt_logbufs),
124 fsparam_string("logbsize", Opt_logbsize),
125 fsparam_string("logdev", Opt_logdev),
126 fsparam_string("rtdev", Opt_rtdev),
127 fsparam_flag("wsync", Opt_wsync),
128 fsparam_flag("noalign", Opt_noalign),
129 fsparam_flag("swalloc", Opt_swalloc),
130 fsparam_u32("sunit", Opt_sunit),
131 fsparam_u32("swidth", Opt_swidth),
132 fsparam_flag("nouuid", Opt_nouuid),
133 fsparam_flag("grpid", Opt_grpid),
134 fsparam_flag("nogrpid", Opt_nogrpid),
135 fsparam_flag("bsdgroups", Opt_bsdgroups),
136 fsparam_flag("sysvgroups", Opt_sysvgroups),
137 fsparam_string("allocsize", Opt_allocsize),
138 fsparam_flag("norecovery", Opt_norecovery),
139 fsparam_flag("inode64", Opt_inode64),
140 fsparam_flag("inode32", Opt_inode32),
141 fsparam_flag("ikeep", Opt_ikeep),
142 fsparam_flag("noikeep", Opt_noikeep),
143 fsparam_flag("largeio", Opt_largeio),
144 fsparam_flag("nolargeio", Opt_nolargeio),
145 fsparam_flag("attr2", Opt_attr2),
146 fsparam_flag("noattr2", Opt_noattr2),
147 fsparam_flag("filestreams", Opt_filestreams),
148 fsparam_flag("quota", Opt_quota),
149 fsparam_flag("noquota", Opt_noquota),
150 fsparam_flag("usrquota", Opt_usrquota),
151 fsparam_flag("grpquota", Opt_grpquota),
152 fsparam_flag("prjquota", Opt_prjquota),
153 fsparam_flag("uquota", Opt_uquota),
154 fsparam_flag("gquota", Opt_gquota),
155 fsparam_flag("pquota", Opt_pquota),
156 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
157 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
158 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
159 fsparam_flag("qnoenforce", Opt_qnoenforce),
160 fsparam_flag("discard", Opt_discard),
161 fsparam_flag("nodiscard", Opt_nodiscard),
162 fsparam_flag("dax", Opt_dax),
163 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
167 struct proc_xfs_info {
177 static struct proc_xfs_info xfs_info_set[] = {
178 /* the few simple ones we can get from the mount struct */
179 { XFS_FEAT_IKEEP, ",ikeep" },
180 { XFS_FEAT_WSYNC, ",wsync" },
181 { XFS_FEAT_NOALIGN, ",noalign" },
182 { XFS_FEAT_SWALLOC, ",swalloc" },
183 { XFS_FEAT_NOUUID, ",nouuid" },
184 { XFS_FEAT_NORECOVERY, ",norecovery" },
185 { XFS_FEAT_ATTR2, ",attr2" },
186 { XFS_FEAT_FILESTREAMS, ",filestreams" },
187 { XFS_FEAT_GRPID, ",grpid" },
188 { XFS_FEAT_DISCARD, ",discard" },
189 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
190 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
191 { XFS_FEAT_DAX_NEVER, ",dax=never" },
194 struct xfs_mount *mp = XFS_M(root->d_sb);
195 struct proc_xfs_info *xfs_infop;
197 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
198 if (mp->m_features & xfs_infop->flag)
199 seq_puts(m, xfs_infop->str);
202 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
204 if (xfs_has_allocsize(mp))
205 seq_printf(m, ",allocsize=%dk",
206 (1 << mp->m_allocsize_log) >> 10);
208 if (mp->m_logbufs > 0)
209 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
210 if (mp->m_logbsize > 0)
211 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
214 seq_show_option(m, "logdev", mp->m_logname);
216 seq_show_option(m, "rtdev", mp->m_rtname);
218 if (mp->m_dalign > 0)
219 seq_printf(m, ",sunit=%d",
220 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
221 if (mp->m_swidth > 0)
222 seq_printf(m, ",swidth=%d",
223 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
225 if (mp->m_qflags & XFS_UQUOTA_ENFD)
226 seq_puts(m, ",usrquota");
227 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
228 seq_puts(m, ",uqnoenforce");
230 if (mp->m_qflags & XFS_PQUOTA_ENFD)
231 seq_puts(m, ",prjquota");
232 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
233 seq_puts(m, ",pqnoenforce");
235 if (mp->m_qflags & XFS_GQUOTA_ENFD)
236 seq_puts(m, ",grpquota");
237 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
238 seq_puts(m, ",gqnoenforce");
240 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
241 seq_puts(m, ",noquota");
247 * Set parameters for inode allocation heuristics, taking into account
248 * filesystem size and inode32/inode64 mount options; i.e. specifically
249 * whether or not XFS_FEAT_SMALL_INUMS is set.
251 * Inode allocation patterns are altered only if inode32 is requested
252 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
253 * If altered, XFS_OPSTATE_INODE32 is set as well.
255 * An agcount independent of that in the mount structure is provided
256 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
257 * to the potentially higher ag count.
259 * Returns the maximum AG index which may contain inodes.
263 struct xfs_mount *mp,
264 xfs_agnumber_t agcount)
266 xfs_agnumber_t index;
267 xfs_agnumber_t maxagi = 0;
268 xfs_sb_t *sbp = &mp->m_sb;
269 xfs_agnumber_t max_metadata;
274 * Calculate how much should be reserved for inodes to meet
275 * the max inode percentage. Used only for inode32.
277 if (M_IGEO(mp)->maxicount) {
280 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
282 icount += sbp->sb_agblocks - 1;
283 do_div(icount, sbp->sb_agblocks);
284 max_metadata = icount;
286 max_metadata = agcount;
289 /* Get the last possible inode in the filesystem */
290 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
291 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
294 * If user asked for no more than 32-bit inodes, and the fs is
295 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
296 * the allocator to accommodate the request.
298 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
299 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
301 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
303 for (index = 0; index < agcount; index++) {
304 struct xfs_perag *pag;
306 ino = XFS_AGINO_TO_INO(mp, index, agino);
308 pag = xfs_perag_get(mp, index);
310 if (xfs_is_inode32(mp)) {
311 if (ino > XFS_MAXINUMBER_32) {
312 pag->pagi_inodeok = 0;
313 pag->pagf_metadata = 0;
315 pag->pagi_inodeok = 1;
317 if (index < max_metadata)
318 pag->pagf_metadata = 1;
320 pag->pagf_metadata = 0;
323 pag->pagi_inodeok = 1;
324 pag->pagf_metadata = 0;
330 return xfs_is_inode32(mp) ? maxagi : agcount;
335 struct super_block *sb,
336 struct xfs_buftarg *bt)
338 return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
339 bdev_nr_sectors(bt->bt_bdev));
346 struct block_device **bdevp)
350 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
352 if (IS_ERR(*bdevp)) {
353 error = PTR_ERR(*bdevp);
354 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
362 struct block_device *bdev)
365 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
370 struct xfs_mount *mp)
372 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
374 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
375 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
376 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
378 xfs_free_buftarg(mp->m_logdev_targp);
379 xfs_blkdev_put(logdev);
380 fs_put_dax(dax_logdev);
382 if (mp->m_rtdev_targp) {
383 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
384 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
386 xfs_free_buftarg(mp->m_rtdev_targp);
387 xfs_blkdev_put(rtdev);
388 fs_put_dax(dax_rtdev);
390 xfs_free_buftarg(mp->m_ddev_targp);
391 fs_put_dax(dax_ddev);
395 * The file system configurations are:
396 * (1) device (partition) with data and internal log
397 * (2) logical volume with data and log subvolumes.
398 * (3) logical volume with data, log, and realtime subvolumes.
400 * We only have to handle opening the log and realtime volumes here if
401 * they are present. The data subvolume has already been opened by
402 * get_sb_bdev() and is stored in sb->s_bdev.
406 struct xfs_mount *mp)
408 struct block_device *ddev = mp->m_super->s_bdev;
409 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
410 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
411 struct block_device *logdev = NULL, *rtdev = NULL;
415 * Open real time and log devices - order is important.
418 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
421 dax_logdev = fs_dax_get_by_bdev(logdev);
425 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
427 goto out_close_logdev;
429 if (rtdev == ddev || rtdev == logdev) {
431 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
433 goto out_close_rtdev;
435 dax_rtdev = fs_dax_get_by_bdev(rtdev);
439 * Setup xfs_mount buffer target pointers
442 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
443 if (!mp->m_ddev_targp)
444 goto out_close_rtdev;
447 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
448 if (!mp->m_rtdev_targp)
449 goto out_free_ddev_targ;
452 if (logdev && logdev != ddev) {
453 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
454 if (!mp->m_logdev_targp)
455 goto out_free_rtdev_targ;
457 mp->m_logdev_targp = mp->m_ddev_targp;
463 if (mp->m_rtdev_targp)
464 xfs_free_buftarg(mp->m_rtdev_targp);
466 xfs_free_buftarg(mp->m_ddev_targp);
468 xfs_blkdev_put(rtdev);
469 fs_put_dax(dax_rtdev);
471 if (logdev && logdev != ddev) {
472 xfs_blkdev_put(logdev);
473 fs_put_dax(dax_logdev);
476 fs_put_dax(dax_ddev);
481 * Setup xfs_mount buffer target pointers based on superblock
485 struct xfs_mount *mp)
489 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
493 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
494 unsigned int log_sector_size = BBSIZE;
496 if (xfs_has_sector(mp))
497 log_sector_size = mp->m_sb.sb_logsectsize;
498 error = xfs_setsize_buftarg(mp->m_logdev_targp,
503 if (mp->m_rtdev_targp) {
504 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
505 mp->m_sb.sb_sectsize);
514 xfs_init_mount_workqueues(
515 struct xfs_mount *mp)
517 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
518 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
519 1, mp->m_super->s_id);
520 if (!mp->m_buf_workqueue)
523 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
524 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
525 0, mp->m_super->s_id);
526 if (!mp->m_unwritten_workqueue)
527 goto out_destroy_buf;
529 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
530 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
531 0, mp->m_super->s_id);
532 if (!mp->m_reclaim_workqueue)
533 goto out_destroy_unwritten;
535 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
536 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
537 0, mp->m_super->s_id);
538 if (!mp->m_blockgc_wq)
539 goto out_destroy_reclaim;
541 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
542 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
543 1, mp->m_super->s_id);
544 if (!mp->m_inodegc_wq)
545 goto out_destroy_blockgc;
547 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
548 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
549 if (!mp->m_sync_workqueue)
550 goto out_destroy_inodegc;
555 destroy_workqueue(mp->m_inodegc_wq);
557 destroy_workqueue(mp->m_blockgc_wq);
559 destroy_workqueue(mp->m_reclaim_workqueue);
560 out_destroy_unwritten:
561 destroy_workqueue(mp->m_unwritten_workqueue);
563 destroy_workqueue(mp->m_buf_workqueue);
569 xfs_destroy_mount_workqueues(
570 struct xfs_mount *mp)
572 destroy_workqueue(mp->m_sync_workqueue);
573 destroy_workqueue(mp->m_blockgc_wq);
574 destroy_workqueue(mp->m_inodegc_wq);
575 destroy_workqueue(mp->m_reclaim_workqueue);
576 destroy_workqueue(mp->m_unwritten_workqueue);
577 destroy_workqueue(mp->m_buf_workqueue);
581 xfs_flush_inodes_worker(
582 struct work_struct *work)
584 struct xfs_mount *mp = container_of(work, struct xfs_mount,
585 m_flush_inodes_work);
586 struct super_block *sb = mp->m_super;
588 if (down_read_trylock(&sb->s_umount)) {
590 up_read(&sb->s_umount);
595 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
596 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
597 * for IO to complete so that we effectively throttle multiple callers to the
598 * rate at which IO is completing.
602 struct xfs_mount *mp)
605 * If flush_work() returns true then that means we waited for a flush
606 * which was already in progress. Don't bother running another scan.
608 if (flush_work(&mp->m_flush_inodes_work))
611 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
612 flush_work(&mp->m_flush_inodes_work);
615 /* Catch misguided souls that try to use this interface on XFS */
616 STATIC struct inode *
618 struct super_block *sb)
625 * Now that the generic code is guaranteed not to be accessing
626 * the linux inode, we can inactivate and reclaim the inode.
629 xfs_fs_destroy_inode(
632 struct xfs_inode *ip = XFS_I(inode);
634 trace_xfs_destroy_inode(ip);
636 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
637 XFS_STATS_INC(ip->i_mount, vn_rele);
638 XFS_STATS_INC(ip->i_mount, vn_remove);
639 xfs_inode_mark_reclaimable(ip);
647 struct xfs_inode *ip = XFS_I(inode);
648 struct xfs_mount *mp = ip->i_mount;
649 struct xfs_trans *tp;
651 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
655 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
656 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
657 * in flags possibly together with I_DIRTY_SYNC.
659 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
662 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
664 xfs_ilock(ip, XFS_ILOCK_EXCL);
665 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
666 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
667 xfs_trans_commit(tp);
671 * Slab object creation initialisation for the XFS inode.
672 * This covers only the idempotent fields in the XFS inode;
673 * all other fields need to be initialised on allocation
674 * from the slab. This avoids the need to repeatedly initialise
675 * fields in the xfs inode that left in the initialise state
676 * when freeing the inode.
679 xfs_fs_inode_init_once(
682 struct xfs_inode *ip = inode;
684 memset(ip, 0, sizeof(struct xfs_inode));
687 inode_init_once(VFS_I(ip));
690 atomic_set(&ip->i_pincount, 0);
691 spin_lock_init(&ip->i_flags_lock);
693 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
694 "xfsino", ip->i_ino);
698 * We do an unlocked check for XFS_IDONTCACHE here because we are already
699 * serialised against cache hits here via the inode->i_lock and igrab() in
700 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
701 * racing with us, and it avoids needing to grab a spinlock here for every inode
702 * we drop the final reference on.
708 struct xfs_inode *ip = XFS_I(inode);
711 * If this unlinked inode is in the middle of recovery, don't
712 * drop the inode just yet; log recovery will take care of
713 * that. See the comment for this inode flag.
715 if (ip->i_flags & XFS_IRECOVERY) {
716 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
720 return generic_drop_inode(inode);
725 struct xfs_mount *mp)
728 kfree(mp->m_logname);
734 struct super_block *sb,
737 struct xfs_mount *mp = XFS_M(sb);
740 trace_xfs_fs_sync_fs(mp, __return_address);
743 * Doing anything during the async pass would be counterproductive.
748 error = xfs_log_force(mp, XFS_LOG_SYNC);
754 * The disk must be active because we're syncing.
755 * We schedule log work now (now that the disk is
756 * active) instead of later (when it might not be).
758 flush_delayed_work(&mp->m_log->l_work);
762 * If we are called with page faults frozen out, it means we are about
763 * to freeze the transaction subsystem. Take the opportunity to shut
764 * down inodegc because once SB_FREEZE_FS is set it's too late to
765 * prevent inactivation races with freeze. The fs doesn't get called
766 * again by the freezing process until after SB_FREEZE_FS has been set,
767 * so it's now or never. Same logic applies to speculative allocation
768 * garbage collection.
770 * We don't care if this is a normal syncfs call that does this or
771 * freeze that does this - we can run this multiple times without issue
772 * and we won't race with a restart because a restart can only occur
773 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
775 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
776 xfs_inodegc_stop(mp);
777 xfs_blockgc_stop(mp);
785 struct dentry *dentry,
786 struct kstatfs *statp)
788 struct xfs_mount *mp = XFS_M(dentry->d_sb);
789 xfs_sb_t *sbp = &mp->m_sb;
790 struct xfs_inode *ip = XFS_I(d_inode(dentry));
791 uint64_t fakeinos, id;
799 * Expedite background inodegc but don't wait. We do not want to block
800 * here waiting hours for a billion extent file to be truncated.
802 xfs_inodegc_push(mp);
804 statp->f_type = XFS_SUPER_MAGIC;
805 statp->f_namelen = MAXNAMELEN - 1;
807 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
808 statp->f_fsid = u64_to_fsid(id);
810 icount = percpu_counter_sum(&mp->m_icount);
811 ifree = percpu_counter_sum(&mp->m_ifree);
812 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
814 spin_lock(&mp->m_sb_lock);
815 statp->f_bsize = sbp->sb_blocksize;
816 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
817 statp->f_blocks = sbp->sb_dblocks - lsize;
818 spin_unlock(&mp->m_sb_lock);
820 /* make sure statp->f_bfree does not underflow */
821 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
822 statp->f_bavail = statp->f_bfree;
824 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
825 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
826 if (M_IGEO(mp)->maxicount)
827 statp->f_files = min_t(typeof(statp->f_files),
829 M_IGEO(mp)->maxicount);
831 /* If sb_icount overshot maxicount, report actual allocation */
832 statp->f_files = max_t(typeof(statp->f_files),
836 /* make sure statp->f_ffree does not underflow */
837 ffree = statp->f_files - (icount - ifree);
838 statp->f_ffree = max_t(int64_t, ffree, 0);
841 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
842 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
843 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
844 xfs_qm_statvfs(ip, statp);
846 if (XFS_IS_REALTIME_MOUNT(mp) &&
847 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
848 statp->f_blocks = sbp->sb_rblocks;
849 statp->f_bavail = statp->f_bfree =
850 sbp->sb_frextents * sbp->sb_rextsize;
857 xfs_save_resvblks(struct xfs_mount *mp)
859 uint64_t resblks = 0;
861 mp->m_resblks_save = mp->m_resblks;
862 xfs_reserve_blocks(mp, &resblks, NULL);
866 xfs_restore_resvblks(struct xfs_mount *mp)
870 if (mp->m_resblks_save) {
871 resblks = mp->m_resblks_save;
872 mp->m_resblks_save = 0;
874 resblks = xfs_default_resblks(mp);
876 xfs_reserve_blocks(mp, &resblks, NULL);
880 * Second stage of a freeze. The data is already frozen so we only
881 * need to take care of the metadata. Once that's done sync the superblock
882 * to the log to dirty it in case of a crash while frozen. This ensures that we
883 * will recover the unlinked inode lists on the next mount.
887 struct super_block *sb)
889 struct xfs_mount *mp = XFS_M(sb);
894 * The filesystem is now frozen far enough that memory reclaim
895 * cannot safely operate on the filesystem. Hence we need to
896 * set a GFP_NOFS context here to avoid recursion deadlocks.
898 flags = memalloc_nofs_save();
899 xfs_save_resvblks(mp);
900 ret = xfs_log_quiesce(mp);
901 memalloc_nofs_restore(flags);
904 * For read-write filesystems, we need to restart the inodegc on error
905 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
906 * going to be run to restart it now. We are at SB_FREEZE_FS level
907 * here, so we can restart safely without racing with a stop in
910 if (ret && !xfs_is_readonly(mp)) {
911 xfs_blockgc_start(mp);
912 xfs_inodegc_start(mp);
920 struct super_block *sb)
922 struct xfs_mount *mp = XFS_M(sb);
924 xfs_restore_resvblks(mp);
925 xfs_log_work_queue(mp);
928 * Don't reactivate the inodegc worker on a readonly filesystem because
929 * inodes are sent directly to reclaim. Don't reactivate the blockgc
930 * worker because there are no speculative preallocations on a readonly
933 if (!xfs_is_readonly(mp)) {
934 xfs_blockgc_start(mp);
935 xfs_inodegc_start(mp);
942 * This function fills in xfs_mount_t fields based on mount args.
943 * Note: the superblock _has_ now been read in.
947 struct xfs_mount *mp)
949 /* Fail a mount where the logbuf is smaller than the log stripe */
950 if (xfs_has_logv2(mp)) {
951 if (mp->m_logbsize <= 0 &&
952 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
953 mp->m_logbsize = mp->m_sb.sb_logsunit;
954 } else if (mp->m_logbsize > 0 &&
955 mp->m_logbsize < mp->m_sb.sb_logsunit) {
957 "logbuf size must be greater than or equal to log stripe size");
961 /* Fail a mount if the logbuf is larger than 32K */
962 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
964 "logbuf size for version 1 logs must be 16K or 32K");
970 * V5 filesystems always use attr2 format for attributes.
972 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
973 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
974 "attr2 is always enabled for V5 filesystems.");
979 * prohibit r/w mounts of read-only filesystems
981 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
983 "cannot mount a read-only filesystem as read-write");
987 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
988 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
989 !xfs_has_pquotino(mp)) {
991 "Super block does not support project and group quota together");
999 xfs_init_percpu_counters(
1000 struct xfs_mount *mp)
1004 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1008 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1012 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1016 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1023 percpu_counter_destroy(&mp->m_fdblocks);
1025 percpu_counter_destroy(&mp->m_ifree);
1027 percpu_counter_destroy(&mp->m_icount);
1032 xfs_reinit_percpu_counters(
1033 struct xfs_mount *mp)
1035 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1036 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1037 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1041 xfs_destroy_percpu_counters(
1042 struct xfs_mount *mp)
1044 percpu_counter_destroy(&mp->m_icount);
1045 percpu_counter_destroy(&mp->m_ifree);
1046 percpu_counter_destroy(&mp->m_fdblocks);
1047 ASSERT(xfs_is_shutdown(mp) ||
1048 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1049 percpu_counter_destroy(&mp->m_delalloc_blks);
1053 xfs_inodegc_init_percpu(
1054 struct xfs_mount *mp)
1056 struct xfs_inodegc *gc;
1059 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1063 for_each_possible_cpu(cpu) {
1064 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1065 #if defined(DEBUG) || defined(XFS_WARN)
1068 init_llist_head(&gc->list);
1070 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1076 xfs_inodegc_free_percpu(
1077 struct xfs_mount *mp)
1081 free_percpu(mp->m_inodegc);
1086 struct super_block *sb)
1088 struct xfs_mount *mp = XFS_M(sb);
1090 /* if ->fill_super failed, we have no mount to tear down */
1094 xfs_notice(mp, "Unmounting Filesystem");
1095 xfs_filestream_unmount(mp);
1099 free_percpu(mp->m_stats.xs_stats);
1100 xfs_mount_list_del(mp);
1101 xfs_inodegc_free_percpu(mp);
1102 xfs_destroy_percpu_counters(mp);
1103 xfs_destroy_mount_workqueues(mp);
1104 xfs_close_devices(mp);
1106 sb->s_fs_info = NULL;
1111 xfs_fs_nr_cached_objects(
1112 struct super_block *sb,
1113 struct shrink_control *sc)
1115 /* Paranoia: catch incorrect calls during mount setup or teardown */
1116 if (WARN_ON_ONCE(!sb->s_fs_info))
1118 return xfs_reclaim_inodes_count(XFS_M(sb));
1122 xfs_fs_free_cached_objects(
1123 struct super_block *sb,
1124 struct shrink_control *sc)
1126 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1129 static const struct super_operations xfs_super_operations = {
1130 .alloc_inode = xfs_fs_alloc_inode,
1131 .destroy_inode = xfs_fs_destroy_inode,
1132 .dirty_inode = xfs_fs_dirty_inode,
1133 .drop_inode = xfs_fs_drop_inode,
1134 .put_super = xfs_fs_put_super,
1135 .sync_fs = xfs_fs_sync_fs,
1136 .freeze_fs = xfs_fs_freeze,
1137 .unfreeze_fs = xfs_fs_unfreeze,
1138 .statfs = xfs_fs_statfs,
1139 .show_options = xfs_fs_show_options,
1140 .nr_cached_objects = xfs_fs_nr_cached_objects,
1141 .free_cached_objects = xfs_fs_free_cached_objects,
1150 int last, shift_left_factor = 0, _res;
1154 value = kstrdup(s, GFP_KERNEL);
1158 last = strlen(value) - 1;
1159 if (value[last] == 'K' || value[last] == 'k') {
1160 shift_left_factor = 10;
1163 if (value[last] == 'M' || value[last] == 'm') {
1164 shift_left_factor = 20;
1167 if (value[last] == 'G' || value[last] == 'g') {
1168 shift_left_factor = 30;
1172 if (kstrtoint(value, base, &_res))
1175 *res = _res << shift_left_factor;
1180 xfs_fs_warn_deprecated(
1181 struct fs_context *fc,
1182 struct fs_parameter *param,
1186 /* Don't print the warning if reconfiguring and current mount point
1187 * already had the flag set
1189 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1190 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1192 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1196 * Set mount state from a mount option.
1198 * NOTE: mp->m_super is NULL here!
1202 struct fs_context *fc,
1203 struct fs_parameter *param)
1205 struct xfs_mount *parsing_mp = fc->s_fs_info;
1206 struct fs_parse_result result;
1210 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1216 parsing_mp->m_logbufs = result.uint_32;
1219 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1223 kfree(parsing_mp->m_logname);
1224 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1225 if (!parsing_mp->m_logname)
1229 kfree(parsing_mp->m_rtname);
1230 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1231 if (!parsing_mp->m_rtname)
1235 if (suffix_kstrtoint(param->string, 10, &size))
1237 parsing_mp->m_allocsize_log = ffs(size) - 1;
1238 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1242 parsing_mp->m_features |= XFS_FEAT_GRPID;
1245 case Opt_sysvgroups:
1246 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1249 parsing_mp->m_features |= XFS_FEAT_WSYNC;
1251 case Opt_norecovery:
1252 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1255 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1258 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1261 parsing_mp->m_dalign = result.uint_32;
1264 parsing_mp->m_swidth = result.uint_32;
1267 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1270 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1273 parsing_mp->m_features |= XFS_FEAT_NOUUID;
1276 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1279 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1281 case Opt_filestreams:
1282 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1285 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1286 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1291 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1293 case Opt_qnoenforce:
1294 case Opt_uqnoenforce:
1295 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1296 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1300 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1302 case Opt_pqnoenforce:
1303 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1304 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1308 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1310 case Opt_gqnoenforce:
1311 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1312 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1315 parsing_mp->m_features |= XFS_FEAT_DISCARD;
1318 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1320 #ifdef CONFIG_FS_DAX
1322 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1325 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1328 /* Following mount options will be removed in September 2025 */
1330 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1331 parsing_mp->m_features |= XFS_FEAT_IKEEP;
1334 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1335 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1338 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1339 parsing_mp->m_features |= XFS_FEAT_ATTR2;
1342 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1343 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1346 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1354 xfs_fs_validate_params(
1355 struct xfs_mount *mp)
1357 /* No recovery flag requires a read-only mount */
1358 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1359 xfs_warn(mp, "no-recovery mounts must be read-only.");
1364 * We have not read the superblock at this point, so only the attr2
1365 * mount option can set the attr2 feature by this stage.
1367 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1368 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1373 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1375 "sunit and swidth options incompatible with the noalign option");
1379 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1380 xfs_warn(mp, "quota support not available in this kernel.");
1384 if ((mp->m_dalign && !mp->m_swidth) ||
1385 (!mp->m_dalign && mp->m_swidth)) {
1386 xfs_warn(mp, "sunit and swidth must be specified together");
1390 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1392 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1393 mp->m_swidth, mp->m_dalign);
1397 if (mp->m_logbufs != -1 &&
1398 mp->m_logbufs != 0 &&
1399 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1400 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1401 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1402 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1406 if (mp->m_logbsize != -1 &&
1407 mp->m_logbsize != 0 &&
1408 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1409 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1410 !is_power_of_2(mp->m_logbsize))) {
1412 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1417 if (xfs_has_allocsize(mp) &&
1418 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1419 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1420 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1421 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1430 struct super_block *sb,
1431 struct fs_context *fc)
1433 struct xfs_mount *mp = sb->s_fs_info;
1435 int flags = 0, error;
1439 error = xfs_fs_validate_params(mp);
1441 goto out_free_names;
1443 sb_min_blocksize(sb, BBSIZE);
1444 sb->s_xattr = xfs_xattr_handlers;
1445 sb->s_export_op = &xfs_export_operations;
1446 #ifdef CONFIG_XFS_QUOTA
1447 sb->s_qcop = &xfs_quotactl_operations;
1448 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1450 sb->s_op = &xfs_super_operations;
1453 * Delay mount work if the debug hook is set. This is debug
1454 * instrumention to coordinate simulation of xfs mount failures with
1455 * VFS superblock operations
1457 if (xfs_globals.mount_delay) {
1458 xfs_notice(mp, "Delaying mount for %d seconds.",
1459 xfs_globals.mount_delay);
1460 msleep(xfs_globals.mount_delay * 1000);
1463 if (fc->sb_flags & SB_SILENT)
1464 flags |= XFS_MFSI_QUIET;
1466 error = xfs_open_devices(mp);
1468 goto out_free_names;
1470 error = xfs_init_mount_workqueues(mp);
1472 goto out_close_devices;
1474 error = xfs_init_percpu_counters(mp);
1476 goto out_destroy_workqueues;
1478 error = xfs_inodegc_init_percpu(mp);
1480 goto out_destroy_counters;
1483 * All percpu data structures requiring cleanup when a cpu goes offline
1484 * must be allocated before adding this @mp to the cpu-dead handler's
1487 xfs_mount_list_add(mp);
1489 /* Allocate stats memory before we do operations that might use it */
1490 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1491 if (!mp->m_stats.xs_stats) {
1493 goto out_destroy_inodegc;
1496 error = xfs_readsb(mp, flags);
1498 goto out_free_stats;
1500 error = xfs_finish_flags(mp);
1504 error = xfs_setup_devices(mp);
1508 /* V4 support is undergoing deprecation. */
1509 if (!xfs_has_crc(mp)) {
1510 #ifdef CONFIG_XFS_SUPPORT_V4
1512 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1515 "Deprecated V4 format (crc=0) not supported by kernel.");
1521 /* Filesystem claims it needs repair, so refuse the mount. */
1522 if (xfs_has_needsrepair(mp)) {
1523 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1524 error = -EFSCORRUPTED;
1529 * Don't touch the filesystem if a user tool thinks it owns the primary
1530 * superblock. mkfs doesn't clear the flag from secondary supers, so
1531 * we don't check them at all.
1533 if (mp->m_sb.sb_inprogress) {
1534 xfs_warn(mp, "Offline file system operation in progress!");
1535 error = -EFSCORRUPTED;
1540 * Until this is fixed only page-sized or smaller data blocks work.
1542 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1544 "File system with blocksize %d bytes. "
1545 "Only pagesize (%ld) or less will currently work.",
1546 mp->m_sb.sb_blocksize, PAGE_SIZE);
1551 /* Ensure this filesystem fits in the page cache limits */
1552 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1553 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1555 "file system too large to be mounted on this system.");
1561 * XFS block mappings use 54 bits to store the logical block offset.
1562 * This should suffice to handle the maximum file size that the VFS
1563 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1564 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1565 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1566 * to check this assertion.
1568 * Avoid integer overflow by comparing the maximum bmbt offset to the
1569 * maximum pagecache offset in units of fs blocks.
1571 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1573 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1574 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1580 error = xfs_filestream_mount(mp);
1585 * we must configure the block size in the superblock before we run the
1586 * full mount process as the mount process can lookup and cache inodes.
1588 sb->s_magic = XFS_SUPER_MAGIC;
1589 sb->s_blocksize = mp->m_sb.sb_blocksize;
1590 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1591 sb->s_maxbytes = MAX_LFS_FILESIZE;
1592 sb->s_max_links = XFS_MAXLINK;
1593 sb->s_time_gran = 1;
1594 if (xfs_has_bigtime(mp)) {
1595 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1596 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1598 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1599 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1601 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1602 sb->s_iflags |= SB_I_CGROUPWB;
1604 set_posix_acl_flag(sb);
1606 /* version 5 superblocks support inode version counters. */
1607 if (xfs_has_crc(mp))
1608 sb->s_flags |= SB_I_VERSION;
1610 if (xfs_has_dax_always(mp)) {
1611 bool rtdev_is_dax = false, datadev_is_dax;
1614 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1616 datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
1617 if (mp->m_rtdev_targp)
1618 rtdev_is_dax = xfs_buftarg_is_dax(sb,
1620 if (!rtdev_is_dax && !datadev_is_dax) {
1622 "DAX unsupported by block device. Turning off DAX.");
1623 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1625 if (xfs_has_reflink(mp)) {
1627 "DAX and reflink cannot be used together!");
1629 goto out_filestream_unmount;
1633 if (xfs_has_discard(mp)) {
1634 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1636 if (!blk_queue_discard(q)) {
1637 xfs_warn(mp, "mounting with \"discard\" option, but "
1638 "the device does not support discard");
1639 mp->m_features &= ~XFS_FEAT_DISCARD;
1643 if (xfs_has_reflink(mp)) {
1644 if (mp->m_sb.sb_rblocks) {
1646 "reflink not compatible with realtime device!");
1648 goto out_filestream_unmount;
1651 if (xfs_globals.always_cow) {
1652 xfs_info(mp, "using DEBUG-only always_cow mode.");
1653 mp->m_always_cow = true;
1657 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1659 "reverse mapping btree not compatible with realtime device!");
1661 goto out_filestream_unmount;
1664 error = xfs_mountfs(mp);
1666 goto out_filestream_unmount;
1668 root = igrab(VFS_I(mp->m_rootip));
1673 sb->s_root = d_make_root(root);
1681 out_filestream_unmount:
1682 xfs_filestream_unmount(mp);
1686 free_percpu(mp->m_stats.xs_stats);
1687 out_destroy_inodegc:
1688 xfs_mount_list_del(mp);
1689 xfs_inodegc_free_percpu(mp);
1690 out_destroy_counters:
1691 xfs_destroy_percpu_counters(mp);
1692 out_destroy_workqueues:
1693 xfs_destroy_mount_workqueues(mp);
1695 xfs_close_devices(mp);
1697 sb->s_fs_info = NULL;
1702 xfs_filestream_unmount(mp);
1709 struct fs_context *fc)
1711 return get_tree_bdev(fc, xfs_fs_fill_super);
1716 struct xfs_mount *mp)
1718 struct xfs_sb *sbp = &mp->m_sb;
1721 if (xfs_has_norecovery(mp)) {
1723 "ro->rw transition prohibited on norecovery mount");
1727 if (xfs_sb_is_v5(sbp) &&
1728 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1730 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1731 (sbp->sb_features_ro_compat &
1732 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1736 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1739 * If this is the first remount to writeable state we might have some
1740 * superblock changes to update.
1742 if (mp->m_update_sb) {
1743 error = xfs_sync_sb(mp, false);
1745 xfs_warn(mp, "failed to write sb changes");
1748 mp->m_update_sb = false;
1752 * Fill out the reserve pool if it is empty. Use the stashed value if
1753 * it is non-zero, otherwise go with the default.
1755 xfs_restore_resvblks(mp);
1756 xfs_log_work_queue(mp);
1757 xfs_blockgc_start(mp);
1759 /* Create the per-AG metadata reservation pool .*/
1760 error = xfs_fs_reserve_ag_blocks(mp);
1761 if (error && error != -ENOSPC)
1764 /* Re-enable the background inode inactivation worker. */
1765 xfs_inodegc_start(mp);
1772 struct xfs_mount *mp)
1774 struct xfs_icwalk icw = {
1775 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1779 /* Flush all the dirty data to disk. */
1780 error = sync_filesystem(mp->m_super);
1785 * Cancel background eofb scanning so it cannot race with the final
1786 * log force+buftarg wait and deadlock the remount.
1788 xfs_blockgc_stop(mp);
1791 * Clear out all remaining COW staging extents and speculative post-EOF
1792 * preallocations so that we don't leave inodes requiring inactivation
1793 * cleanups during reclaim on a read-only mount. We must process every
1794 * cached inode, so this requires a synchronous cache scan.
1796 error = xfs_blockgc_free_space(mp, &icw);
1798 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1803 * Stop the inodegc background worker. xfs_fs_reconfigure already
1804 * flushed all pending inodegc work when it sync'd the filesystem.
1805 * The VFS holds s_umount, so we know that inodes cannot enter
1806 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1807 * we send inodes straight to reclaim, so no inodes will be queued.
1809 xfs_inodegc_stop(mp);
1811 /* Free the per-AG metadata reservation pool. */
1812 error = xfs_fs_unreserve_ag_blocks(mp);
1814 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1819 * Before we sync the metadata, we need to free up the reserve block
1820 * pool so that the used block count in the superblock on disk is
1821 * correct at the end of the remount. Stash the current* reserve pool
1822 * size so that if we get remounted rw, we can return it to the same
1825 xfs_save_resvblks(mp);
1828 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1834 * Logically we would return an error here to prevent users from believing
1835 * they might have changed mount options using remount which can't be changed.
1837 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1838 * arguments in some cases so we can't blindly reject options, but have to
1839 * check for each specified option if it actually differs from the currently
1840 * set option and only reject it if that's the case.
1842 * Until that is implemented we return success for every remount request, and
1843 * silently ignore all options that we can't actually change.
1847 struct fs_context *fc)
1849 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1850 struct xfs_mount *new_mp = fc->s_fs_info;
1851 int flags = fc->sb_flags;
1854 /* version 5 superblocks always support version counters. */
1855 if (xfs_has_crc(mp))
1856 fc->sb_flags |= SB_I_VERSION;
1858 error = xfs_fs_validate_params(new_mp);
1862 /* inode32 -> inode64 */
1863 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1864 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1865 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1868 /* inode64 -> inode32 */
1869 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1870 mp->m_features |= XFS_FEAT_SMALL_INUMS;
1871 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1875 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1876 error = xfs_remount_rw(mp);
1882 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1883 error = xfs_remount_ro(mp);
1891 static void xfs_fs_free(
1892 struct fs_context *fc)
1894 struct xfs_mount *mp = fc->s_fs_info;
1897 * mp is stored in the fs_context when it is initialized.
1898 * mp is transferred to the superblock on a successful mount,
1899 * but if an error occurs before the transfer we have to free
1906 static const struct fs_context_operations xfs_context_ops = {
1907 .parse_param = xfs_fs_parse_param,
1908 .get_tree = xfs_fs_get_tree,
1909 .reconfigure = xfs_fs_reconfigure,
1910 .free = xfs_fs_free,
1913 static int xfs_init_fs_context(
1914 struct fs_context *fc)
1916 struct xfs_mount *mp;
1918 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1922 spin_lock_init(&mp->m_sb_lock);
1923 spin_lock_init(&mp->m_agirotor_lock);
1924 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1925 spin_lock_init(&mp->m_perag_lock);
1926 mutex_init(&mp->m_growlock);
1927 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1928 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1929 mp->m_kobj.kobject.kset = xfs_kset;
1931 * We don't create the finobt per-ag space reservation until after log
1932 * recovery, so we must set this to true so that an ifree transaction
1933 * started during log recovery will not depend on space reservations
1934 * for finobt expansion.
1936 mp->m_finobt_nores = true;
1939 * These can be overridden by the mount option parsing.
1942 mp->m_logbsize = -1;
1943 mp->m_allocsize_log = 16; /* 64k */
1946 * Copy binary VFS mount flags we are interested in.
1948 if (fc->sb_flags & SB_RDONLY)
1949 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1950 if (fc->sb_flags & SB_DIRSYNC)
1951 mp->m_features |= XFS_FEAT_DIRSYNC;
1952 if (fc->sb_flags & SB_SYNCHRONOUS)
1953 mp->m_features |= XFS_FEAT_WSYNC;
1956 fc->ops = &xfs_context_ops;
1961 static struct file_system_type xfs_fs_type = {
1962 .owner = THIS_MODULE,
1964 .init_fs_context = xfs_init_fs_context,
1965 .parameters = xfs_fs_parameters,
1966 .kill_sb = kill_block_super,
1967 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1969 MODULE_ALIAS_FS("xfs");
1972 xfs_init_zones(void)
1974 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1975 sizeof(struct xlog_ticket),
1977 if (!xfs_log_ticket_zone)
1980 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1981 sizeof(struct xfs_extent_free_item),
1983 if (!xfs_bmap_free_item_zone)
1984 goto out_destroy_log_ticket_zone;
1986 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1987 sizeof(struct xfs_btree_cur),
1989 if (!xfs_btree_cur_zone)
1990 goto out_destroy_bmap_free_item_zone;
1992 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1993 sizeof(struct xfs_da_state),
1995 if (!xfs_da_state_zone)
1996 goto out_destroy_btree_cur_zone;
1998 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1999 sizeof(struct xfs_ifork),
2001 if (!xfs_ifork_zone)
2002 goto out_destroy_da_state_zone;
2004 xfs_trans_zone = kmem_cache_create("xfs_trans",
2005 sizeof(struct xfs_trans),
2007 if (!xfs_trans_zone)
2008 goto out_destroy_ifork_zone;
2012 * The size of the zone allocated buf log item is the maximum
2013 * size possible under XFS. This wastes a little bit of memory,
2014 * but it is much faster.
2016 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
2017 sizeof(struct xfs_buf_log_item),
2019 if (!xfs_buf_item_zone)
2020 goto out_destroy_trans_zone;
2022 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
2023 (sizeof(struct xfs_efd_log_item) +
2024 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
2025 sizeof(struct xfs_extent)),
2028 goto out_destroy_buf_item_zone;
2030 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
2031 (sizeof(struct xfs_efi_log_item) +
2032 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2033 sizeof(struct xfs_extent)),
2036 goto out_destroy_efd_zone;
2038 xfs_inode_zone = kmem_cache_create("xfs_inode",
2039 sizeof(struct xfs_inode), 0,
2040 (SLAB_HWCACHE_ALIGN |
2041 SLAB_RECLAIM_ACCOUNT |
2042 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2043 xfs_fs_inode_init_once);
2044 if (!xfs_inode_zone)
2045 goto out_destroy_efi_zone;
2047 xfs_ili_zone = kmem_cache_create("xfs_ili",
2048 sizeof(struct xfs_inode_log_item), 0,
2049 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2052 goto out_destroy_inode_zone;
2054 xfs_icreate_zone = kmem_cache_create("xfs_icr",
2055 sizeof(struct xfs_icreate_item),
2057 if (!xfs_icreate_zone)
2058 goto out_destroy_ili_zone;
2060 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
2061 sizeof(struct xfs_rud_log_item),
2064 goto out_destroy_icreate_zone;
2066 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2067 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2070 goto out_destroy_rud_zone;
2072 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2073 sizeof(struct xfs_cud_log_item),
2076 goto out_destroy_rui_zone;
2078 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2079 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2082 goto out_destroy_cud_zone;
2084 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2085 sizeof(struct xfs_bud_log_item),
2088 goto out_destroy_cui_zone;
2090 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2091 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2094 goto out_destroy_bud_zone;
2098 out_destroy_bud_zone:
2099 kmem_cache_destroy(xfs_bud_zone);
2100 out_destroy_cui_zone:
2101 kmem_cache_destroy(xfs_cui_zone);
2102 out_destroy_cud_zone:
2103 kmem_cache_destroy(xfs_cud_zone);
2104 out_destroy_rui_zone:
2105 kmem_cache_destroy(xfs_rui_zone);
2106 out_destroy_rud_zone:
2107 kmem_cache_destroy(xfs_rud_zone);
2108 out_destroy_icreate_zone:
2109 kmem_cache_destroy(xfs_icreate_zone);
2110 out_destroy_ili_zone:
2111 kmem_cache_destroy(xfs_ili_zone);
2112 out_destroy_inode_zone:
2113 kmem_cache_destroy(xfs_inode_zone);
2114 out_destroy_efi_zone:
2115 kmem_cache_destroy(xfs_efi_zone);
2116 out_destroy_efd_zone:
2117 kmem_cache_destroy(xfs_efd_zone);
2118 out_destroy_buf_item_zone:
2119 kmem_cache_destroy(xfs_buf_item_zone);
2120 out_destroy_trans_zone:
2121 kmem_cache_destroy(xfs_trans_zone);
2122 out_destroy_ifork_zone:
2123 kmem_cache_destroy(xfs_ifork_zone);
2124 out_destroy_da_state_zone:
2125 kmem_cache_destroy(xfs_da_state_zone);
2126 out_destroy_btree_cur_zone:
2127 kmem_cache_destroy(xfs_btree_cur_zone);
2128 out_destroy_bmap_free_item_zone:
2129 kmem_cache_destroy(xfs_bmap_free_item_zone);
2130 out_destroy_log_ticket_zone:
2131 kmem_cache_destroy(xfs_log_ticket_zone);
2137 xfs_destroy_zones(void)
2140 * Make sure all delayed rcu free are flushed before we
2144 kmem_cache_destroy(xfs_bui_zone);
2145 kmem_cache_destroy(xfs_bud_zone);
2146 kmem_cache_destroy(xfs_cui_zone);
2147 kmem_cache_destroy(xfs_cud_zone);
2148 kmem_cache_destroy(xfs_rui_zone);
2149 kmem_cache_destroy(xfs_rud_zone);
2150 kmem_cache_destroy(xfs_icreate_zone);
2151 kmem_cache_destroy(xfs_ili_zone);
2152 kmem_cache_destroy(xfs_inode_zone);
2153 kmem_cache_destroy(xfs_efi_zone);
2154 kmem_cache_destroy(xfs_efd_zone);
2155 kmem_cache_destroy(xfs_buf_item_zone);
2156 kmem_cache_destroy(xfs_trans_zone);
2157 kmem_cache_destroy(xfs_ifork_zone);
2158 kmem_cache_destroy(xfs_da_state_zone);
2159 kmem_cache_destroy(xfs_btree_cur_zone);
2160 kmem_cache_destroy(xfs_bmap_free_item_zone);
2161 kmem_cache_destroy(xfs_log_ticket_zone);
2165 xfs_init_workqueues(void)
2168 * The allocation workqueue can be used in memory reclaim situations
2169 * (writepage path), and parallelism is only limited by the number of
2170 * AGs in all the filesystems mounted. Hence use the default large
2171 * max_active value for this workqueue.
2173 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2174 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2178 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2180 if (!xfs_discard_wq)
2181 goto out_free_alloc_wq;
2185 destroy_workqueue(xfs_alloc_wq);
2190 xfs_destroy_workqueues(void)
2192 destroy_workqueue(xfs_discard_wq);
2193 destroy_workqueue(xfs_alloc_wq);
2196 #ifdef CONFIG_HOTPLUG_CPU
2201 struct xfs_mount *mp, *n;
2203 spin_lock(&xfs_mount_list_lock);
2204 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2205 spin_unlock(&xfs_mount_list_lock);
2206 xfs_inodegc_cpu_dead(mp, cpu);
2207 spin_lock(&xfs_mount_list_lock);
2209 spin_unlock(&xfs_mount_list_lock);
2214 xfs_cpu_hotplug_init(void)
2218 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2222 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2228 xfs_cpu_hotplug_destroy(void)
2230 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2233 #else /* !CONFIG_HOTPLUG_CPU */
2234 static inline int xfs_cpu_hotplug_init(void) { return 0; }
2235 static inline void xfs_cpu_hotplug_destroy(void) {}
2243 xfs_check_ondisk_structs();
2245 printk(KERN_INFO XFS_VERSION_STRING " with "
2246 XFS_BUILD_OPTIONS " enabled\n");
2250 error = xfs_cpu_hotplug_init();
2254 error = xfs_init_zones();
2256 goto out_destroy_hp;
2258 error = xfs_init_workqueues();
2260 goto out_destroy_zones;
2262 error = xfs_mru_cache_init();
2264 goto out_destroy_wq;
2266 error = xfs_buf_init();
2268 goto out_mru_cache_uninit;
2270 error = xfs_init_procfs();
2272 goto out_buf_terminate;
2274 error = xfs_sysctl_register();
2276 goto out_cleanup_procfs;
2278 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2281 goto out_sysctl_unregister;
2284 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2286 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2287 if (!xfsstats.xs_stats) {
2289 goto out_kset_unregister;
2292 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2295 goto out_free_stats;
2298 xfs_dbg_kobj.kobject.kset = xfs_kset;
2299 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2301 goto out_remove_stats_kobj;
2304 error = xfs_qm_init();
2306 goto out_remove_dbg_kobj;
2308 error = register_filesystem(&xfs_fs_type);
2315 out_remove_dbg_kobj:
2317 xfs_sysfs_del(&xfs_dbg_kobj);
2318 out_remove_stats_kobj:
2320 xfs_sysfs_del(&xfsstats.xs_kobj);
2322 free_percpu(xfsstats.xs_stats);
2323 out_kset_unregister:
2324 kset_unregister(xfs_kset);
2325 out_sysctl_unregister:
2326 xfs_sysctl_unregister();
2328 xfs_cleanup_procfs();
2330 xfs_buf_terminate();
2331 out_mru_cache_uninit:
2332 xfs_mru_cache_uninit();
2334 xfs_destroy_workqueues();
2336 xfs_destroy_zones();
2338 xfs_cpu_hotplug_destroy();
2347 unregister_filesystem(&xfs_fs_type);
2349 xfs_sysfs_del(&xfs_dbg_kobj);
2351 xfs_sysfs_del(&xfsstats.xs_kobj);
2352 free_percpu(xfsstats.xs_stats);
2353 kset_unregister(xfs_kset);
2354 xfs_sysctl_unregister();
2355 xfs_cleanup_procfs();
2356 xfs_buf_terminate();
2357 xfs_mru_cache_uninit();
2358 xfs_destroy_workqueues();
2359 xfs_destroy_zones();
2360 xfs_uuid_table_free();
2361 xfs_cpu_hotplug_destroy();
2364 module_init(init_xfs_fs);
2365 module_exit(exit_xfs_fs);
2367 MODULE_AUTHOR("Silicon Graphics, Inc.");
2368 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2369 MODULE_LICENSE("GPL");