1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
40 #include <linux/magic.h>
41 #include <linux/fs_context.h>
42 #include <linux/fs_parser.h>
44 static const struct super_operations xfs_super_operations;
46 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
48 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
58 xfs_mount_set_dax_mode(
60 enum xfs_dax_mode mode)
64 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
67 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
68 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
71 mp->m_flags |= XFS_MOUNT_DAX_NEVER;
72 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
77 static const struct constant_table dax_param_enums[] = {
78 {"inode", XFS_DAX_INODE },
79 {"always", XFS_DAX_ALWAYS },
80 {"never", XFS_DAX_NEVER },
85 * Table driven mount option parser.
88 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
89 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
90 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
91 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
92 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
93 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
94 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
95 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
96 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
99 static const struct fs_parameter_spec xfs_fs_parameters[] = {
100 fsparam_u32("logbufs", Opt_logbufs),
101 fsparam_string("logbsize", Opt_logbsize),
102 fsparam_string("logdev", Opt_logdev),
103 fsparam_string("rtdev", Opt_rtdev),
104 fsparam_flag("wsync", Opt_wsync),
105 fsparam_flag("noalign", Opt_noalign),
106 fsparam_flag("swalloc", Opt_swalloc),
107 fsparam_u32("sunit", Opt_sunit),
108 fsparam_u32("swidth", Opt_swidth),
109 fsparam_flag("nouuid", Opt_nouuid),
110 fsparam_flag("grpid", Opt_grpid),
111 fsparam_flag("nogrpid", Opt_nogrpid),
112 fsparam_flag("bsdgroups", Opt_bsdgroups),
113 fsparam_flag("sysvgroups", Opt_sysvgroups),
114 fsparam_string("allocsize", Opt_allocsize),
115 fsparam_flag("norecovery", Opt_norecovery),
116 fsparam_flag("inode64", Opt_inode64),
117 fsparam_flag("inode32", Opt_inode32),
118 fsparam_flag("ikeep", Opt_ikeep),
119 fsparam_flag("noikeep", Opt_noikeep),
120 fsparam_flag("largeio", Opt_largeio),
121 fsparam_flag("nolargeio", Opt_nolargeio),
122 fsparam_flag("attr2", Opt_attr2),
123 fsparam_flag("noattr2", Opt_noattr2),
124 fsparam_flag("filestreams", Opt_filestreams),
125 fsparam_flag("quota", Opt_quota),
126 fsparam_flag("noquota", Opt_noquota),
127 fsparam_flag("usrquota", Opt_usrquota),
128 fsparam_flag("grpquota", Opt_grpquota),
129 fsparam_flag("prjquota", Opt_prjquota),
130 fsparam_flag("uquota", Opt_uquota),
131 fsparam_flag("gquota", Opt_gquota),
132 fsparam_flag("pquota", Opt_pquota),
133 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
134 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
135 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
136 fsparam_flag("qnoenforce", Opt_qnoenforce),
137 fsparam_flag("discard", Opt_discard),
138 fsparam_flag("nodiscard", Opt_nodiscard),
139 fsparam_flag("dax", Opt_dax),
140 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
144 struct proc_xfs_info {
154 static struct proc_xfs_info xfs_info_set[] = {
155 /* the few simple ones we can get from the mount struct */
156 { XFS_MOUNT_IKEEP, ",ikeep" },
157 { XFS_MOUNT_WSYNC, ",wsync" },
158 { XFS_MOUNT_NOALIGN, ",noalign" },
159 { XFS_MOUNT_SWALLOC, ",swalloc" },
160 { XFS_MOUNT_NOUUID, ",nouuid" },
161 { XFS_MOUNT_NORECOVERY, ",norecovery" },
162 { XFS_MOUNT_ATTR2, ",attr2" },
163 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
164 { XFS_MOUNT_GRPID, ",grpid" },
165 { XFS_MOUNT_DISCARD, ",discard" },
166 { XFS_MOUNT_LARGEIO, ",largeio" },
167 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
168 { XFS_MOUNT_DAX_NEVER, ",dax=never" },
171 struct xfs_mount *mp = XFS_M(root->d_sb);
172 struct proc_xfs_info *xfs_infop;
174 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
175 if (mp->m_flags & xfs_infop->flag)
176 seq_puts(m, xfs_infop->str);
179 seq_printf(m, ",inode%d",
180 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
182 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
183 seq_printf(m, ",allocsize=%dk",
184 (1 << mp->m_allocsize_log) >> 10);
186 if (mp->m_logbufs > 0)
187 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
188 if (mp->m_logbsize > 0)
189 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
192 seq_show_option(m, "logdev", mp->m_logname);
194 seq_show_option(m, "rtdev", mp->m_rtname);
196 if (mp->m_dalign > 0)
197 seq_printf(m, ",sunit=%d",
198 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
199 if (mp->m_swidth > 0)
200 seq_printf(m, ",swidth=%d",
201 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
203 if (mp->m_qflags & XFS_UQUOTA_ACCT) {
204 if (mp->m_qflags & XFS_UQUOTA_ENFD)
205 seq_puts(m, ",usrquota");
207 seq_puts(m, ",uqnoenforce");
210 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
211 if (mp->m_qflags & XFS_PQUOTA_ENFD)
212 seq_puts(m, ",prjquota");
214 seq_puts(m, ",pqnoenforce");
216 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
217 if (mp->m_qflags & XFS_GQUOTA_ENFD)
218 seq_puts(m, ",grpquota");
220 seq_puts(m, ",gqnoenforce");
223 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
224 seq_puts(m, ",noquota");
230 * Set parameters for inode allocation heuristics, taking into account
231 * filesystem size and inode32/inode64 mount options; i.e. specifically
232 * whether or not XFS_MOUNT_SMALL_INUMS is set.
234 * Inode allocation patterns are altered only if inode32 is requested
235 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
236 * If altered, XFS_MOUNT_32BITINODES is set as well.
238 * An agcount independent of that in the mount structure is provided
239 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
240 * to the potentially higher ag count.
242 * Returns the maximum AG index which may contain inodes.
246 struct xfs_mount *mp,
247 xfs_agnumber_t agcount)
249 xfs_agnumber_t index;
250 xfs_agnumber_t maxagi = 0;
251 xfs_sb_t *sbp = &mp->m_sb;
252 xfs_agnumber_t max_metadata;
257 * Calculate how much should be reserved for inodes to meet
258 * the max inode percentage. Used only for inode32.
260 if (M_IGEO(mp)->maxicount) {
263 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
265 icount += sbp->sb_agblocks - 1;
266 do_div(icount, sbp->sb_agblocks);
267 max_metadata = icount;
269 max_metadata = agcount;
272 /* Get the last possible inode in the filesystem */
273 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
274 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
277 * If user asked for no more than 32-bit inodes, and the fs is
278 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
279 * the allocator to accommodate the request.
281 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
282 mp->m_flags |= XFS_MOUNT_32BITINODES;
284 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
286 for (index = 0; index < agcount; index++) {
287 struct xfs_perag *pag;
289 ino = XFS_AGINO_TO_INO(mp, index, agino);
291 pag = xfs_perag_get(mp, index);
293 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
294 if (ino > XFS_MAXINUMBER_32) {
295 pag->pagi_inodeok = 0;
296 pag->pagf_metadata = 0;
298 pag->pagi_inodeok = 1;
300 if (index < max_metadata)
301 pag->pagf_metadata = 1;
303 pag->pagf_metadata = 0;
306 pag->pagi_inodeok = 1;
307 pag->pagf_metadata = 0;
313 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
320 struct block_device **bdevp)
324 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
326 if (IS_ERR(*bdevp)) {
327 error = PTR_ERR(*bdevp);
328 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
336 struct block_device *bdev)
339 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
343 xfs_blkdev_issue_flush(
344 xfs_buftarg_t *buftarg)
346 blkdev_issue_flush(buftarg->bt_bdev);
351 struct xfs_mount *mp)
353 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
355 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
356 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
357 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
359 xfs_free_buftarg(mp->m_logdev_targp);
360 xfs_blkdev_put(logdev);
361 fs_put_dax(dax_logdev);
363 if (mp->m_rtdev_targp) {
364 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
365 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
367 xfs_free_buftarg(mp->m_rtdev_targp);
368 xfs_blkdev_put(rtdev);
369 fs_put_dax(dax_rtdev);
371 xfs_free_buftarg(mp->m_ddev_targp);
372 fs_put_dax(dax_ddev);
376 * The file system configurations are:
377 * (1) device (partition) with data and internal log
378 * (2) logical volume with data and log subvolumes.
379 * (3) logical volume with data, log, and realtime subvolumes.
381 * We only have to handle opening the log and realtime volumes here if
382 * they are present. The data subvolume has already been opened by
383 * get_sb_bdev() and is stored in sb->s_bdev.
387 struct xfs_mount *mp)
389 struct block_device *ddev = mp->m_super->s_bdev;
390 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
391 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
392 struct block_device *logdev = NULL, *rtdev = NULL;
396 * Open real time and log devices - order is important.
399 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
402 dax_logdev = fs_dax_get_by_bdev(logdev);
406 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
408 goto out_close_logdev;
410 if (rtdev == ddev || rtdev == logdev) {
412 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
414 goto out_close_rtdev;
416 dax_rtdev = fs_dax_get_by_bdev(rtdev);
420 * Setup xfs_mount buffer target pointers
423 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
424 if (!mp->m_ddev_targp)
425 goto out_close_rtdev;
428 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
429 if (!mp->m_rtdev_targp)
430 goto out_free_ddev_targ;
433 if (logdev && logdev != ddev) {
434 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
435 if (!mp->m_logdev_targp)
436 goto out_free_rtdev_targ;
438 mp->m_logdev_targp = mp->m_ddev_targp;
444 if (mp->m_rtdev_targp)
445 xfs_free_buftarg(mp->m_rtdev_targp);
447 xfs_free_buftarg(mp->m_ddev_targp);
449 xfs_blkdev_put(rtdev);
450 fs_put_dax(dax_rtdev);
452 if (logdev && logdev != ddev) {
453 xfs_blkdev_put(logdev);
454 fs_put_dax(dax_logdev);
457 fs_put_dax(dax_ddev);
462 * Setup xfs_mount buffer target pointers based on superblock
466 struct xfs_mount *mp)
470 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
474 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
475 unsigned int log_sector_size = BBSIZE;
477 if (xfs_sb_version_hassector(&mp->m_sb))
478 log_sector_size = mp->m_sb.sb_logsectsize;
479 error = xfs_setsize_buftarg(mp->m_logdev_targp,
484 if (mp->m_rtdev_targp) {
485 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
486 mp->m_sb.sb_sectsize);
495 xfs_init_mount_workqueues(
496 struct xfs_mount *mp)
498 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
499 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
500 1, mp->m_super->s_id);
501 if (!mp->m_buf_workqueue)
504 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
505 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
506 0, mp->m_super->s_id);
507 if (!mp->m_unwritten_workqueue)
508 goto out_destroy_buf;
510 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
511 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
512 0, mp->m_super->s_id);
513 if (!mp->m_cil_workqueue)
514 goto out_destroy_unwritten;
516 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
517 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
518 0, mp->m_super->s_id);
519 if (!mp->m_reclaim_workqueue)
520 goto out_destroy_cil;
522 mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
523 WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
524 0, mp->m_super->s_id);
525 if (!mp->m_gc_workqueue)
526 goto out_destroy_reclaim;
528 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
529 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
530 if (!mp->m_sync_workqueue)
531 goto out_destroy_eofb;
536 destroy_workqueue(mp->m_gc_workqueue);
538 destroy_workqueue(mp->m_reclaim_workqueue);
540 destroy_workqueue(mp->m_cil_workqueue);
541 out_destroy_unwritten:
542 destroy_workqueue(mp->m_unwritten_workqueue);
544 destroy_workqueue(mp->m_buf_workqueue);
550 xfs_destroy_mount_workqueues(
551 struct xfs_mount *mp)
553 destroy_workqueue(mp->m_sync_workqueue);
554 destroy_workqueue(mp->m_gc_workqueue);
555 destroy_workqueue(mp->m_reclaim_workqueue);
556 destroy_workqueue(mp->m_cil_workqueue);
557 destroy_workqueue(mp->m_unwritten_workqueue);
558 destroy_workqueue(mp->m_buf_workqueue);
562 xfs_flush_inodes_worker(
563 struct work_struct *work)
565 struct xfs_mount *mp = container_of(work, struct xfs_mount,
566 m_flush_inodes_work);
567 struct super_block *sb = mp->m_super;
569 if (down_read_trylock(&sb->s_umount)) {
571 up_read(&sb->s_umount);
576 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
577 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
578 * for IO to complete so that we effectively throttle multiple callers to the
579 * rate at which IO is completing.
583 struct xfs_mount *mp)
586 * If flush_work() returns true then that means we waited for a flush
587 * which was already in progress. Don't bother running another scan.
589 if (flush_work(&mp->m_flush_inodes_work))
592 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
593 flush_work(&mp->m_flush_inodes_work);
596 /* Catch misguided souls that try to use this interface on XFS */
597 STATIC struct inode *
599 struct super_block *sb)
608 struct xfs_inode *ip,
611 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
612 struct xfs_bmbt_irec got;
613 struct xfs_iext_cursor icur;
615 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
618 if (isnullstartblock(got.br_startblock)) {
619 xfs_warn(ip->i_mount,
620 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
622 whichfork == XFS_DATA_FORK ? "data" : "cow",
623 got.br_startoff, got.br_blockcount);
625 } while (xfs_iext_next_extent(ifp, &icur, &got));
628 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
632 * Now that the generic code is guaranteed not to be accessing
633 * the linux inode, we can inactivate and reclaim the inode.
636 xfs_fs_destroy_inode(
639 struct xfs_inode *ip = XFS_I(inode);
641 trace_xfs_destroy_inode(ip);
643 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
644 XFS_STATS_INC(ip->i_mount, vn_rele);
645 XFS_STATS_INC(ip->i_mount, vn_remove);
649 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
650 xfs_check_delalloc(ip, XFS_DATA_FORK);
651 xfs_check_delalloc(ip, XFS_COW_FORK);
655 XFS_STATS_INC(ip->i_mount, vn_reclaim);
658 * We should never get here with one of the reclaim flags already set.
660 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
661 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
664 * We always use background reclaim here because even if the inode is
665 * clean, it still may be under IO and hence we have wait for IO
666 * completion to occur before we can reclaim the inode. The background
667 * reclaim path handles this more efficiently than we can here, so
668 * simply let background reclaim tear down all inodes.
670 xfs_inode_set_reclaim_tag(ip);
678 struct xfs_inode *ip = XFS_I(inode);
679 struct xfs_mount *mp = ip->i_mount;
680 struct xfs_trans *tp;
682 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
684 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
687 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
689 xfs_ilock(ip, XFS_ILOCK_EXCL);
690 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
691 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
692 xfs_trans_commit(tp);
696 * Slab object creation initialisation for the XFS inode.
697 * This covers only the idempotent fields in the XFS inode;
698 * all other fields need to be initialised on allocation
699 * from the slab. This avoids the need to repeatedly initialise
700 * fields in the xfs inode that left in the initialise state
701 * when freeing the inode.
704 xfs_fs_inode_init_once(
707 struct xfs_inode *ip = inode;
709 memset(ip, 0, sizeof(struct xfs_inode));
712 inode_init_once(VFS_I(ip));
715 atomic_set(&ip->i_pincount, 0);
716 spin_lock_init(&ip->i_flags_lock);
718 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
719 "xfsino", ip->i_ino);
720 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
721 "xfsino", ip->i_ino);
725 * We do an unlocked check for XFS_IDONTCACHE here because we are already
726 * serialised against cache hits here via the inode->i_lock and igrab() in
727 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
728 * racing with us, and it avoids needing to grab a spinlock here for every inode
729 * we drop the final reference on.
735 struct xfs_inode *ip = XFS_I(inode);
738 * If this unlinked inode is in the middle of recovery, don't
739 * drop the inode just yet; log recovery will take care of
740 * that. See the comment for this inode flag.
742 if (ip->i_flags & XFS_IRECOVERY) {
743 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
747 return generic_drop_inode(inode);
752 struct xfs_mount *mp)
755 kfree(mp->m_logname);
761 struct super_block *sb,
764 struct xfs_mount *mp = XFS_M(sb);
767 * Doing anything during the async pass would be counterproductive.
772 xfs_log_force(mp, XFS_LOG_SYNC);
775 * The disk must be active because we're syncing.
776 * We schedule log work now (now that the disk is
777 * active) instead of later (when it might not be).
779 flush_delayed_work(&mp->m_log->l_work);
787 struct dentry *dentry,
788 struct kstatfs *statp)
790 struct xfs_mount *mp = XFS_M(dentry->d_sb);
791 xfs_sb_t *sbp = &mp->m_sb;
792 struct xfs_inode *ip = XFS_I(d_inode(dentry));
793 uint64_t fakeinos, id;
800 statp->f_type = XFS_SUPER_MAGIC;
801 statp->f_namelen = MAXNAMELEN - 1;
803 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
804 statp->f_fsid = u64_to_fsid(id);
806 icount = percpu_counter_sum(&mp->m_icount);
807 ifree = percpu_counter_sum(&mp->m_ifree);
808 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
810 spin_lock(&mp->m_sb_lock);
811 statp->f_bsize = sbp->sb_blocksize;
812 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
813 statp->f_blocks = sbp->sb_dblocks - lsize;
814 spin_unlock(&mp->m_sb_lock);
816 /* make sure statp->f_bfree does not underflow */
817 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
818 statp->f_bavail = statp->f_bfree;
820 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
821 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
822 if (M_IGEO(mp)->maxicount)
823 statp->f_files = min_t(typeof(statp->f_files),
825 M_IGEO(mp)->maxicount);
827 /* If sb_icount overshot maxicount, report actual allocation */
828 statp->f_files = max_t(typeof(statp->f_files),
832 /* make sure statp->f_ffree does not underflow */
833 ffree = statp->f_files - (icount - ifree);
834 statp->f_ffree = max_t(int64_t, ffree, 0);
837 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
838 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
839 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
840 xfs_qm_statvfs(ip, statp);
842 if (XFS_IS_REALTIME_MOUNT(mp) &&
843 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
844 statp->f_blocks = sbp->sb_rblocks;
845 statp->f_bavail = statp->f_bfree =
846 sbp->sb_frextents * sbp->sb_rextsize;
853 xfs_save_resvblks(struct xfs_mount *mp)
855 uint64_t resblks = 0;
857 mp->m_resblks_save = mp->m_resblks;
858 xfs_reserve_blocks(mp, &resblks, NULL);
862 xfs_restore_resvblks(struct xfs_mount *mp)
866 if (mp->m_resblks_save) {
867 resblks = mp->m_resblks_save;
868 mp->m_resblks_save = 0;
870 resblks = xfs_default_resblks(mp);
872 xfs_reserve_blocks(mp, &resblks, NULL);
876 * Second stage of a freeze. The data is already frozen so we only
877 * need to take care of the metadata. Once that's done sync the superblock
878 * to the log to dirty it in case of a crash while frozen. This ensures that we
879 * will recover the unlinked inode lists on the next mount.
883 struct super_block *sb)
885 struct xfs_mount *mp = XFS_M(sb);
890 * The filesystem is now frozen far enough that memory reclaim
891 * cannot safely operate on the filesystem. Hence we need to
892 * set a GFP_NOFS context here to avoid recursion deadlocks.
894 flags = memalloc_nofs_save();
895 xfs_blockgc_stop(mp);
896 xfs_save_resvblks(mp);
897 ret = xfs_log_quiesce(mp);
898 memalloc_nofs_restore(flags);
904 struct super_block *sb)
906 struct xfs_mount *mp = XFS_M(sb);
908 xfs_restore_resvblks(mp);
909 xfs_log_work_queue(mp);
910 xfs_blockgc_start(mp);
915 * This function fills in xfs_mount_t fields based on mount args.
916 * Note: the superblock _has_ now been read in.
920 struct xfs_mount *mp)
922 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
924 /* Fail a mount where the logbuf is smaller than the log stripe */
925 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
926 if (mp->m_logbsize <= 0 &&
927 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
928 mp->m_logbsize = mp->m_sb.sb_logsunit;
929 } else if (mp->m_logbsize > 0 &&
930 mp->m_logbsize < mp->m_sb.sb_logsunit) {
932 "logbuf size must be greater than or equal to log stripe size");
936 /* Fail a mount if the logbuf is larger than 32K */
937 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
939 "logbuf size for version 1 logs must be 16K or 32K");
945 * V5 filesystems always use attr2 format for attributes.
947 if (xfs_sb_version_hascrc(&mp->m_sb) &&
948 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
949 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
950 "attr2 is always enabled for V5 filesystems.");
955 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
956 * told by noattr2 to turn it off
958 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
959 !(mp->m_flags & XFS_MOUNT_NOATTR2))
960 mp->m_flags |= XFS_MOUNT_ATTR2;
963 * prohibit r/w mounts of read-only filesystems
965 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
967 "cannot mount a read-only filesystem as read-write");
971 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
972 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
973 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
975 "Super block does not support project and group quota together");
983 xfs_init_percpu_counters(
984 struct xfs_mount *mp)
988 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
992 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
996 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1000 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1007 percpu_counter_destroy(&mp->m_fdblocks);
1009 percpu_counter_destroy(&mp->m_ifree);
1011 percpu_counter_destroy(&mp->m_icount);
1016 xfs_reinit_percpu_counters(
1017 struct xfs_mount *mp)
1019 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1020 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1021 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1025 xfs_destroy_percpu_counters(
1026 struct xfs_mount *mp)
1028 percpu_counter_destroy(&mp->m_icount);
1029 percpu_counter_destroy(&mp->m_ifree);
1030 percpu_counter_destroy(&mp->m_fdblocks);
1031 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1032 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1033 percpu_counter_destroy(&mp->m_delalloc_blks);
1038 struct super_block *sb)
1040 struct xfs_mount *mp = XFS_M(sb);
1042 /* if ->fill_super failed, we have no mount to tear down */
1046 xfs_notice(mp, "Unmounting Filesystem");
1047 xfs_filestream_unmount(mp);
1051 free_percpu(mp->m_stats.xs_stats);
1052 xfs_destroy_percpu_counters(mp);
1053 xfs_destroy_mount_workqueues(mp);
1054 xfs_close_devices(mp);
1056 sb->s_fs_info = NULL;
1061 xfs_fs_nr_cached_objects(
1062 struct super_block *sb,
1063 struct shrink_control *sc)
1065 /* Paranoia: catch incorrect calls during mount setup or teardown */
1066 if (WARN_ON_ONCE(!sb->s_fs_info))
1068 return xfs_reclaim_inodes_count(XFS_M(sb));
1072 xfs_fs_free_cached_objects(
1073 struct super_block *sb,
1074 struct shrink_control *sc)
1076 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1079 static const struct super_operations xfs_super_operations = {
1080 .alloc_inode = xfs_fs_alloc_inode,
1081 .destroy_inode = xfs_fs_destroy_inode,
1082 .dirty_inode = xfs_fs_dirty_inode,
1083 .drop_inode = xfs_fs_drop_inode,
1084 .put_super = xfs_fs_put_super,
1085 .sync_fs = xfs_fs_sync_fs,
1086 .freeze_fs = xfs_fs_freeze,
1087 .unfreeze_fs = xfs_fs_unfreeze,
1088 .statfs = xfs_fs_statfs,
1089 .show_options = xfs_fs_show_options,
1090 .nr_cached_objects = xfs_fs_nr_cached_objects,
1091 .free_cached_objects = xfs_fs_free_cached_objects,
1100 int last, shift_left_factor = 0, _res;
1104 value = kstrdup(s, GFP_KERNEL);
1108 last = strlen(value) - 1;
1109 if (value[last] == 'K' || value[last] == 'k') {
1110 shift_left_factor = 10;
1113 if (value[last] == 'M' || value[last] == 'm') {
1114 shift_left_factor = 20;
1117 if (value[last] == 'G' || value[last] == 'g') {
1118 shift_left_factor = 30;
1122 if (kstrtoint(value, base, &_res))
1125 *res = _res << shift_left_factor;
1130 xfs_fs_warn_deprecated(
1131 struct fs_context *fc,
1132 struct fs_parameter *param,
1136 /* Don't print the warning if reconfiguring and current mount point
1137 * already had the flag set
1139 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1140 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
1142 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1146 * Set mount state from a mount option.
1148 * NOTE: mp->m_super is NULL here!
1152 struct fs_context *fc,
1153 struct fs_parameter *param)
1155 struct xfs_mount *parsing_mp = fc->s_fs_info;
1156 struct fs_parse_result result;
1160 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1166 parsing_mp->m_logbufs = result.uint_32;
1169 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1173 kfree(parsing_mp->m_logname);
1174 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1175 if (!parsing_mp->m_logname)
1179 kfree(parsing_mp->m_rtname);
1180 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1181 if (!parsing_mp->m_rtname)
1185 if (suffix_kstrtoint(param->string, 10, &size))
1187 parsing_mp->m_allocsize_log = ffs(size) - 1;
1188 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1192 parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1195 case Opt_sysvgroups:
1196 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1199 parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1201 case Opt_norecovery:
1202 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1205 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1208 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1211 parsing_mp->m_dalign = result.uint_32;
1214 parsing_mp->m_swidth = result.uint_32;
1217 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1220 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1223 parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1226 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1229 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1231 case Opt_filestreams:
1232 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1235 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1236 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1237 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1242 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1245 case Opt_qnoenforce:
1246 case Opt_uqnoenforce:
1247 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1248 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1252 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1255 case Opt_pqnoenforce:
1256 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1257 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1261 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1264 case Opt_gqnoenforce:
1265 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1266 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1269 parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1272 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1274 #ifdef CONFIG_FS_DAX
1276 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1279 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1282 /* Following mount options will be removed in September 2025 */
1284 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1285 parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1288 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1289 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1292 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1293 parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1296 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1297 parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
1298 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1301 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1309 xfs_fs_validate_params(
1310 struct xfs_mount *mp)
1313 * no recovery flag requires a read-only mount
1315 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1316 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1317 xfs_warn(mp, "no-recovery mounts must be read-only.");
1321 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1322 (mp->m_dalign || mp->m_swidth)) {
1324 "sunit and swidth options incompatible with the noalign option");
1328 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1329 xfs_warn(mp, "quota support not available in this kernel.");
1333 if ((mp->m_dalign && !mp->m_swidth) ||
1334 (!mp->m_dalign && mp->m_swidth)) {
1335 xfs_warn(mp, "sunit and swidth must be specified together");
1339 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1341 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1342 mp->m_swidth, mp->m_dalign);
1346 if (mp->m_logbufs != -1 &&
1347 mp->m_logbufs != 0 &&
1348 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1349 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1350 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1351 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1355 if (mp->m_logbsize != -1 &&
1356 mp->m_logbsize != 0 &&
1357 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1358 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1359 !is_power_of_2(mp->m_logbsize))) {
1361 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1366 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1367 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1368 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1369 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1370 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1379 struct super_block *sb,
1380 struct fs_context *fc)
1382 struct xfs_mount *mp = sb->s_fs_info;
1384 int flags = 0, error;
1388 error = xfs_fs_validate_params(mp);
1390 goto out_free_names;
1392 sb_min_blocksize(sb, BBSIZE);
1393 sb->s_xattr = xfs_xattr_handlers;
1394 sb->s_export_op = &xfs_export_operations;
1395 #ifdef CONFIG_XFS_QUOTA
1396 sb->s_qcop = &xfs_quotactl_operations;
1397 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1399 sb->s_op = &xfs_super_operations;
1402 * Delay mount work if the debug hook is set. This is debug
1403 * instrumention to coordinate simulation of xfs mount failures with
1404 * VFS superblock operations
1406 if (xfs_globals.mount_delay) {
1407 xfs_notice(mp, "Delaying mount for %d seconds.",
1408 xfs_globals.mount_delay);
1409 msleep(xfs_globals.mount_delay * 1000);
1412 if (fc->sb_flags & SB_SILENT)
1413 flags |= XFS_MFSI_QUIET;
1415 error = xfs_open_devices(mp);
1417 goto out_free_names;
1419 error = xfs_init_mount_workqueues(mp);
1421 goto out_close_devices;
1423 error = xfs_init_percpu_counters(mp);
1425 goto out_destroy_workqueues;
1427 /* Allocate stats memory before we do operations that might use it */
1428 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1429 if (!mp->m_stats.xs_stats) {
1431 goto out_destroy_counters;
1434 error = xfs_readsb(mp, flags);
1436 goto out_free_stats;
1438 error = xfs_finish_flags(mp);
1442 error = xfs_setup_devices(mp);
1446 /* V4 support is undergoing deprecation. */
1447 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
1448 #ifdef CONFIG_XFS_SUPPORT_V4
1450 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1453 "Deprecated V4 format (crc=0) not supported by kernel.");
1459 /* Filesystem claims it needs repair, so refuse the mount. */
1460 if (xfs_sb_version_needsrepair(&mp->m_sb)) {
1461 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1462 error = -EFSCORRUPTED;
1467 * Don't touch the filesystem if a user tool thinks it owns the primary
1468 * superblock. mkfs doesn't clear the flag from secondary supers, so
1469 * we don't check them at all.
1471 if (mp->m_sb.sb_inprogress) {
1472 xfs_warn(mp, "Offline file system operation in progress!");
1473 error = -EFSCORRUPTED;
1478 * Until this is fixed only page-sized or smaller data blocks work.
1480 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1482 "File system with blocksize %d bytes. "
1483 "Only pagesize (%ld) or less will currently work.",
1484 mp->m_sb.sb_blocksize, PAGE_SIZE);
1489 /* Ensure this filesystem fits in the page cache limits */
1490 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1491 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1493 "file system too large to be mounted on this system.");
1499 * XFS block mappings use 54 bits to store the logical block offset.
1500 * This should suffice to handle the maximum file size that the VFS
1501 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1502 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1503 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1504 * to check this assertion.
1506 * Avoid integer overflow by comparing the maximum bmbt offset to the
1507 * maximum pagecache offset in units of fs blocks.
1509 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1511 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1512 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1518 error = xfs_filestream_mount(mp);
1523 * we must configure the block size in the superblock before we run the
1524 * full mount process as the mount process can lookup and cache inodes.
1526 sb->s_magic = XFS_SUPER_MAGIC;
1527 sb->s_blocksize = mp->m_sb.sb_blocksize;
1528 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1529 sb->s_maxbytes = MAX_LFS_FILESIZE;
1530 sb->s_max_links = XFS_MAXLINK;
1531 sb->s_time_gran = 1;
1532 if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
1533 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1534 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1536 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1537 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1539 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1540 sb->s_iflags |= SB_I_CGROUPWB;
1542 set_posix_acl_flag(sb);
1544 /* version 5 superblocks support inode version counters. */
1545 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1546 sb->s_flags |= SB_I_VERSION;
1548 if (xfs_sb_version_hasbigtime(&mp->m_sb))
1550 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
1552 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1553 bool rtdev_is_dax = false, datadev_is_dax;
1556 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1558 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1560 if (mp->m_rtdev_targp)
1561 rtdev_is_dax = bdev_dax_supported(
1562 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1563 if (!rtdev_is_dax && !datadev_is_dax) {
1565 "DAX unsupported by block device. Turning off DAX.");
1566 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1568 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1570 "DAX and reflink cannot be used together!");
1572 goto out_filestream_unmount;
1576 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1577 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1579 if (!blk_queue_discard(q)) {
1580 xfs_warn(mp, "mounting with \"discard\" option, but "
1581 "the device does not support discard");
1582 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1586 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1587 if (mp->m_sb.sb_rblocks) {
1589 "reflink not compatible with realtime device!");
1591 goto out_filestream_unmount;
1594 if (xfs_globals.always_cow) {
1595 xfs_info(mp, "using DEBUG-only always_cow mode.");
1596 mp->m_always_cow = true;
1600 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1602 "reverse mapping btree not compatible with realtime device!");
1604 goto out_filestream_unmount;
1607 if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
1609 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
1611 error = xfs_mountfs(mp);
1613 goto out_filestream_unmount;
1615 root = igrab(VFS_I(mp->m_rootip));
1620 sb->s_root = d_make_root(root);
1628 out_filestream_unmount:
1629 xfs_filestream_unmount(mp);
1633 free_percpu(mp->m_stats.xs_stats);
1634 out_destroy_counters:
1635 xfs_destroy_percpu_counters(mp);
1636 out_destroy_workqueues:
1637 xfs_destroy_mount_workqueues(mp);
1639 xfs_close_devices(mp);
1641 sb->s_fs_info = NULL;
1646 xfs_filestream_unmount(mp);
1653 struct fs_context *fc)
1655 return get_tree_bdev(fc, xfs_fs_fill_super);
1660 struct xfs_mount *mp)
1662 struct xfs_sb *sbp = &mp->m_sb;
1665 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1667 "ro->rw transition prohibited on norecovery mount");
1671 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1672 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1674 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1675 (sbp->sb_features_ro_compat &
1676 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1680 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1683 * If this is the first remount to writeable state we might have some
1684 * superblock changes to update.
1686 if (mp->m_update_sb) {
1687 error = xfs_sync_sb(mp, false);
1689 xfs_warn(mp, "failed to write sb changes");
1692 mp->m_update_sb = false;
1696 * Fill out the reserve pool if it is empty. Use the stashed value if
1697 * it is non-zero, otherwise go with the default.
1699 xfs_restore_resvblks(mp);
1700 xfs_log_work_queue(mp);
1702 /* Recover any CoW blocks that never got remapped. */
1703 error = xfs_reflink_recover_cow(mp);
1706 "Error %d recovering leftover CoW allocations.", error);
1707 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1710 xfs_blockgc_start(mp);
1712 /* Create the per-AG metadata reservation pool .*/
1713 error = xfs_fs_reserve_ag_blocks(mp);
1714 if (error && error != -ENOSPC)
1722 struct xfs_mount *mp)
1727 * Cancel background eofb scanning so it cannot race with the final
1728 * log force+buftarg wait and deadlock the remount.
1730 xfs_blockgc_stop(mp);
1732 /* Get rid of any leftover CoW reservations... */
1733 error = xfs_blockgc_free_space(mp, NULL);
1735 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1739 /* Free the per-AG metadata reservation pool. */
1740 error = xfs_fs_unreserve_ag_blocks(mp);
1742 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1747 * Before we sync the metadata, we need to free up the reserve block
1748 * pool so that the used block count in the superblock on disk is
1749 * correct at the end of the remount. Stash the current* reserve pool
1750 * size so that if we get remounted rw, we can return it to the same
1753 xfs_save_resvblks(mp);
1756 mp->m_flags |= XFS_MOUNT_RDONLY;
1762 * Logically we would return an error here to prevent users from believing
1763 * they might have changed mount options using remount which can't be changed.
1765 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1766 * arguments in some cases so we can't blindly reject options, but have to
1767 * check for each specified option if it actually differs from the currently
1768 * set option and only reject it if that's the case.
1770 * Until that is implemented we return success for every remount request, and
1771 * silently ignore all options that we can't actually change.
1775 struct fs_context *fc)
1777 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1778 struct xfs_mount *new_mp = fc->s_fs_info;
1779 xfs_sb_t *sbp = &mp->m_sb;
1780 int flags = fc->sb_flags;
1783 /* version 5 superblocks always support version counters. */
1784 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1785 fc->sb_flags |= SB_I_VERSION;
1787 error = xfs_fs_validate_params(new_mp);
1791 sync_filesystem(mp->m_super);
1793 /* inode32 -> inode64 */
1794 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1795 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1796 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1797 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1800 /* inode64 -> inode32 */
1801 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1802 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1803 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1804 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1808 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1809 error = xfs_remount_rw(mp);
1815 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1816 error = xfs_remount_ro(mp);
1824 static void xfs_fs_free(
1825 struct fs_context *fc)
1827 struct xfs_mount *mp = fc->s_fs_info;
1830 * mp is stored in the fs_context when it is initialized.
1831 * mp is transferred to the superblock on a successful mount,
1832 * but if an error occurs before the transfer we have to free
1839 static const struct fs_context_operations xfs_context_ops = {
1840 .parse_param = xfs_fs_parse_param,
1841 .get_tree = xfs_fs_get_tree,
1842 .reconfigure = xfs_fs_reconfigure,
1843 .free = xfs_fs_free,
1846 static int xfs_init_fs_context(
1847 struct fs_context *fc)
1849 struct xfs_mount *mp;
1851 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1855 spin_lock_init(&mp->m_sb_lock);
1856 spin_lock_init(&mp->m_agirotor_lock);
1857 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1858 spin_lock_init(&mp->m_perag_lock);
1859 mutex_init(&mp->m_growlock);
1860 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1861 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1862 mp->m_kobj.kobject.kset = xfs_kset;
1864 * We don't create the finobt per-ag space reservation until after log
1865 * recovery, so we must set this to true so that an ifree transaction
1866 * started during log recovery will not depend on space reservations
1867 * for finobt expansion.
1869 mp->m_finobt_nores = true;
1872 * These can be overridden by the mount option parsing.
1875 mp->m_logbsize = -1;
1876 mp->m_allocsize_log = 16; /* 64k */
1879 * Copy binary VFS mount flags we are interested in.
1881 if (fc->sb_flags & SB_RDONLY)
1882 mp->m_flags |= XFS_MOUNT_RDONLY;
1883 if (fc->sb_flags & SB_DIRSYNC)
1884 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1885 if (fc->sb_flags & SB_SYNCHRONOUS)
1886 mp->m_flags |= XFS_MOUNT_WSYNC;
1889 fc->ops = &xfs_context_ops;
1894 static struct file_system_type xfs_fs_type = {
1895 .owner = THIS_MODULE,
1897 .init_fs_context = xfs_init_fs_context,
1898 .parameters = xfs_fs_parameters,
1899 .kill_sb = kill_block_super,
1900 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1902 MODULE_ALIAS_FS("xfs");
1905 xfs_init_zones(void)
1907 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1908 sizeof(struct xlog_ticket),
1910 if (!xfs_log_ticket_zone)
1913 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1914 sizeof(struct xfs_extent_free_item),
1916 if (!xfs_bmap_free_item_zone)
1917 goto out_destroy_log_ticket_zone;
1919 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1920 sizeof(struct xfs_btree_cur),
1922 if (!xfs_btree_cur_zone)
1923 goto out_destroy_bmap_free_item_zone;
1925 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1926 sizeof(struct xfs_da_state),
1928 if (!xfs_da_state_zone)
1929 goto out_destroy_btree_cur_zone;
1931 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1932 sizeof(struct xfs_ifork),
1934 if (!xfs_ifork_zone)
1935 goto out_destroy_da_state_zone;
1937 xfs_trans_zone = kmem_cache_create("xfs_trans",
1938 sizeof(struct xfs_trans),
1940 if (!xfs_trans_zone)
1941 goto out_destroy_ifork_zone;
1945 * The size of the zone allocated buf log item is the maximum
1946 * size possible under XFS. This wastes a little bit of memory,
1947 * but it is much faster.
1949 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1950 sizeof(struct xfs_buf_log_item),
1952 if (!xfs_buf_item_zone)
1953 goto out_destroy_trans_zone;
1955 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1956 (sizeof(struct xfs_efd_log_item) +
1957 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1958 sizeof(struct xfs_extent)),
1961 goto out_destroy_buf_item_zone;
1963 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1964 (sizeof(struct xfs_efi_log_item) +
1965 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1966 sizeof(struct xfs_extent)),
1969 goto out_destroy_efd_zone;
1971 xfs_inode_zone = kmem_cache_create("xfs_inode",
1972 sizeof(struct xfs_inode), 0,
1973 (SLAB_HWCACHE_ALIGN |
1974 SLAB_RECLAIM_ACCOUNT |
1975 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1976 xfs_fs_inode_init_once);
1977 if (!xfs_inode_zone)
1978 goto out_destroy_efi_zone;
1980 xfs_ili_zone = kmem_cache_create("xfs_ili",
1981 sizeof(struct xfs_inode_log_item), 0,
1982 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1985 goto out_destroy_inode_zone;
1987 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1988 sizeof(struct xfs_icreate_item),
1990 if (!xfs_icreate_zone)
1991 goto out_destroy_ili_zone;
1993 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1994 sizeof(struct xfs_rud_log_item),
1997 goto out_destroy_icreate_zone;
1999 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2000 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2003 goto out_destroy_rud_zone;
2005 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2006 sizeof(struct xfs_cud_log_item),
2009 goto out_destroy_rui_zone;
2011 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2012 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2015 goto out_destroy_cud_zone;
2017 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2018 sizeof(struct xfs_bud_log_item),
2021 goto out_destroy_cui_zone;
2023 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2024 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2027 goto out_destroy_bud_zone;
2031 out_destroy_bud_zone:
2032 kmem_cache_destroy(xfs_bud_zone);
2033 out_destroy_cui_zone:
2034 kmem_cache_destroy(xfs_cui_zone);
2035 out_destroy_cud_zone:
2036 kmem_cache_destroy(xfs_cud_zone);
2037 out_destroy_rui_zone:
2038 kmem_cache_destroy(xfs_rui_zone);
2039 out_destroy_rud_zone:
2040 kmem_cache_destroy(xfs_rud_zone);
2041 out_destroy_icreate_zone:
2042 kmem_cache_destroy(xfs_icreate_zone);
2043 out_destroy_ili_zone:
2044 kmem_cache_destroy(xfs_ili_zone);
2045 out_destroy_inode_zone:
2046 kmem_cache_destroy(xfs_inode_zone);
2047 out_destroy_efi_zone:
2048 kmem_cache_destroy(xfs_efi_zone);
2049 out_destroy_efd_zone:
2050 kmem_cache_destroy(xfs_efd_zone);
2051 out_destroy_buf_item_zone:
2052 kmem_cache_destroy(xfs_buf_item_zone);
2053 out_destroy_trans_zone:
2054 kmem_cache_destroy(xfs_trans_zone);
2055 out_destroy_ifork_zone:
2056 kmem_cache_destroy(xfs_ifork_zone);
2057 out_destroy_da_state_zone:
2058 kmem_cache_destroy(xfs_da_state_zone);
2059 out_destroy_btree_cur_zone:
2060 kmem_cache_destroy(xfs_btree_cur_zone);
2061 out_destroy_bmap_free_item_zone:
2062 kmem_cache_destroy(xfs_bmap_free_item_zone);
2063 out_destroy_log_ticket_zone:
2064 kmem_cache_destroy(xfs_log_ticket_zone);
2070 xfs_destroy_zones(void)
2073 * Make sure all delayed rcu free are flushed before we
2077 kmem_cache_destroy(xfs_bui_zone);
2078 kmem_cache_destroy(xfs_bud_zone);
2079 kmem_cache_destroy(xfs_cui_zone);
2080 kmem_cache_destroy(xfs_cud_zone);
2081 kmem_cache_destroy(xfs_rui_zone);
2082 kmem_cache_destroy(xfs_rud_zone);
2083 kmem_cache_destroy(xfs_icreate_zone);
2084 kmem_cache_destroy(xfs_ili_zone);
2085 kmem_cache_destroy(xfs_inode_zone);
2086 kmem_cache_destroy(xfs_efi_zone);
2087 kmem_cache_destroy(xfs_efd_zone);
2088 kmem_cache_destroy(xfs_buf_item_zone);
2089 kmem_cache_destroy(xfs_trans_zone);
2090 kmem_cache_destroy(xfs_ifork_zone);
2091 kmem_cache_destroy(xfs_da_state_zone);
2092 kmem_cache_destroy(xfs_btree_cur_zone);
2093 kmem_cache_destroy(xfs_bmap_free_item_zone);
2094 kmem_cache_destroy(xfs_log_ticket_zone);
2098 xfs_init_workqueues(void)
2101 * The allocation workqueue can be used in memory reclaim situations
2102 * (writepage path), and parallelism is only limited by the number of
2103 * AGs in all the filesystems mounted. Hence use the default large
2104 * max_active value for this workqueue.
2106 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2107 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2111 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2113 if (!xfs_discard_wq)
2114 goto out_free_alloc_wq;
2118 destroy_workqueue(xfs_alloc_wq);
2123 xfs_destroy_workqueues(void)
2125 destroy_workqueue(xfs_discard_wq);
2126 destroy_workqueue(xfs_alloc_wq);
2134 xfs_check_ondisk_structs();
2136 printk(KERN_INFO XFS_VERSION_STRING " with "
2137 XFS_BUILD_OPTIONS " enabled\n");
2141 error = xfs_init_zones();
2145 error = xfs_init_workqueues();
2147 goto out_destroy_zones;
2149 error = xfs_mru_cache_init();
2151 goto out_destroy_wq;
2153 error = xfs_buf_init();
2155 goto out_mru_cache_uninit;
2157 error = xfs_init_procfs();
2159 goto out_buf_terminate;
2161 error = xfs_sysctl_register();
2163 goto out_cleanup_procfs;
2165 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2168 goto out_sysctl_unregister;
2171 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2173 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2174 if (!xfsstats.xs_stats) {
2176 goto out_kset_unregister;
2179 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2182 goto out_free_stats;
2185 xfs_dbg_kobj.kobject.kset = xfs_kset;
2186 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2188 goto out_remove_stats_kobj;
2191 error = xfs_qm_init();
2193 goto out_remove_dbg_kobj;
2195 error = register_filesystem(&xfs_fs_type);
2202 out_remove_dbg_kobj:
2204 xfs_sysfs_del(&xfs_dbg_kobj);
2205 out_remove_stats_kobj:
2207 xfs_sysfs_del(&xfsstats.xs_kobj);
2209 free_percpu(xfsstats.xs_stats);
2210 out_kset_unregister:
2211 kset_unregister(xfs_kset);
2212 out_sysctl_unregister:
2213 xfs_sysctl_unregister();
2215 xfs_cleanup_procfs();
2217 xfs_buf_terminate();
2218 out_mru_cache_uninit:
2219 xfs_mru_cache_uninit();
2221 xfs_destroy_workqueues();
2223 xfs_destroy_zones();
2232 unregister_filesystem(&xfs_fs_type);
2234 xfs_sysfs_del(&xfs_dbg_kobj);
2236 xfs_sysfs_del(&xfsstats.xs_kobj);
2237 free_percpu(xfsstats.xs_stats);
2238 kset_unregister(xfs_kset);
2239 xfs_sysctl_unregister();
2240 xfs_cleanup_procfs();
2241 xfs_buf_terminate();
2242 xfs_mru_cache_uninit();
2243 xfs_destroy_workqueues();
2244 xfs_destroy_zones();
2245 xfs_uuid_table_free();
2248 module_init(init_xfs_fs);
2249 module_exit(exit_xfs_fs);
2251 MODULE_AUTHOR("Silicon Graphics, Inc.");
2252 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2253 MODULE_LICENSE("GPL");