1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * super.c contains code to handle: - mount structures
9 * - filesystem drivers list
11 * - umount system call
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fscrypt.h>
36 #include <linux/fsnotify.h>
37 #include <linux/lockdep.h>
38 #include <linux/user_namespace.h>
39 #include <linux/fs_context.h>
40 #include <uapi/linux/mount.h>
43 static int thaw_super_locked(struct super_block *sb);
45 static LIST_HEAD(super_blocks);
46 static DEFINE_SPINLOCK(sb_lock);
48 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
55 * One thing we have to be careful of with a per-sb shrinker is that we don't
56 * drop the last active reference to the superblock from within the shrinker.
57 * If that happens we could trigger unregistering the shrinker from within the
58 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
59 * take a passive reference to the superblock to avoid this from occurring.
61 static unsigned long super_cache_scan(struct shrinker *shrink,
62 struct shrink_control *sc)
64 struct super_block *sb;
71 sb = container_of(shrink, struct super_block, s_shrink);
74 * Deadlock avoidance. We may hold various FS locks, and we don't want
75 * to recurse into the FS that called us in clear_inode() and friends..
77 if (!(sc->gfp_mask & __GFP_FS))
80 if (!trylock_super(sb))
83 if (sb->s_op->nr_cached_objects)
84 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
86 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
87 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
88 total_objects = dentries + inodes + fs_objects + 1;
92 /* proportion the scan between the caches */
93 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
94 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
95 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
98 * prune the dcache first as the icache is pinned by it, then
99 * prune the icache, followed by the filesystem specific caches
101 * Ensure that we always scan at least one object - memcg kmem
102 * accounting uses this to fully empty the caches.
104 sc->nr_to_scan = dentries + 1;
105 freed = prune_dcache_sb(sb, sc);
106 sc->nr_to_scan = inodes + 1;
107 freed += prune_icache_sb(sb, sc);
110 sc->nr_to_scan = fs_objects + 1;
111 freed += sb->s_op->free_cached_objects(sb, sc);
114 up_read(&sb->s_umount);
118 static unsigned long super_cache_count(struct shrinker *shrink,
119 struct shrink_control *sc)
121 struct super_block *sb;
122 long total_objects = 0;
124 sb = container_of(shrink, struct super_block, s_shrink);
127 * We don't call trylock_super() here as it is a scalability bottleneck,
128 * so we're exposed to partial setup state. The shrinker rwsem does not
129 * protect filesystem operations backing list_lru_shrink_count() or
130 * s_op->nr_cached_objects(). Counts can change between
131 * super_cache_count and super_cache_scan, so we really don't need locks
134 * However, if we are currently mounting the superblock, the underlying
135 * filesystem might be in a state of partial construction and hence it
136 * is dangerous to access it. trylock_super() uses a SB_BORN check to
137 * avoid this situation, so do the same here. The memory barrier is
138 * matched with the one in mount_fs() as we don't hold locks here.
140 if (!(sb->s_flags & SB_BORN))
144 if (sb->s_op && sb->s_op->nr_cached_objects)
145 total_objects = sb->s_op->nr_cached_objects(sb, sc);
147 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
148 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
153 total_objects = vfs_pressure_ratio(total_objects);
154 return total_objects;
157 static void destroy_super_work(struct work_struct *work)
159 struct super_block *s = container_of(work, struct super_block,
163 for (i = 0; i < SB_FREEZE_LEVELS; i++)
164 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
168 static void destroy_super_rcu(struct rcu_head *head)
170 struct super_block *s = container_of(head, struct super_block, rcu);
171 INIT_WORK(&s->destroy_work, destroy_super_work);
172 schedule_work(&s->destroy_work);
175 /* Free a superblock that has never been seen by anyone */
176 static void destroy_unused_super(struct super_block *s)
180 up_write(&s->s_umount);
181 list_lru_destroy(&s->s_dentry_lru);
182 list_lru_destroy(&s->s_inode_lru);
184 put_user_ns(s->s_user_ns);
186 free_prealloced_shrinker(&s->s_shrink);
187 /* no delays needed */
188 destroy_super_work(&s->destroy_work);
192 * alloc_super - create new superblock
193 * @type: filesystem type superblock should belong to
194 * @flags: the mount flags
195 * @user_ns: User namespace for the super_block
197 * Allocates and initializes a new &struct super_block. alloc_super()
198 * returns a pointer new superblock or %NULL if allocation had failed.
200 static struct super_block *alloc_super(struct file_system_type *type, int flags,
201 struct user_namespace *user_ns)
203 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
204 static const struct super_operations default_op;
210 INIT_LIST_HEAD(&s->s_mounts);
211 s->s_user_ns = get_user_ns(user_ns);
212 init_rwsem(&s->s_umount);
213 lockdep_set_class(&s->s_umount, &type->s_umount_key);
215 * sget() can have s_umount recursion.
217 * When it cannot find a suitable sb, it allocates a new
218 * one (this one), and tries again to find a suitable old
221 * In case that succeeds, it will acquire the s_umount
222 * lock of the old one. Since these are clearly distrinct
223 * locks, and this object isn't exposed yet, there's no
226 * Annotate this by putting this lock in a different
229 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
231 if (security_sb_alloc(s))
234 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
235 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
237 &type->s_writers_key[i]))
240 init_waitqueue_head(&s->s_writers.wait_unfrozen);
241 s->s_bdi = &noop_backing_dev_info;
243 if (s->s_user_ns != &init_user_ns)
244 s->s_iflags |= SB_I_NODEV;
245 INIT_HLIST_NODE(&s->s_instances);
246 INIT_HLIST_BL_HEAD(&s->s_roots);
247 mutex_init(&s->s_sync_lock);
248 INIT_LIST_HEAD(&s->s_inodes);
249 spin_lock_init(&s->s_inode_list_lock);
250 INIT_LIST_HEAD(&s->s_inodes_wb);
251 spin_lock_init(&s->s_inode_wblist_lock);
254 atomic_set(&s->s_active, 1);
255 mutex_init(&s->s_vfs_rename_mutex);
256 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
257 init_rwsem(&s->s_dquot.dqio_sem);
258 s->s_maxbytes = MAX_NON_LFS;
259 s->s_op = &default_op;
260 s->s_time_gran = 1000000000;
261 s->s_time_min = TIME64_MIN;
262 s->s_time_max = TIME64_MAX;
263 s->cleancache_poolid = CLEANCACHE_NO_POOL;
265 s->s_shrink.seeks = DEFAULT_SEEKS;
266 s->s_shrink.scan_objects = super_cache_scan;
267 s->s_shrink.count_objects = super_cache_count;
268 s->s_shrink.batch = 1024;
269 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
270 if (prealloc_shrinker(&s->s_shrink))
272 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
274 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
279 destroy_unused_super(s);
283 /* Superblock refcounting */
286 * Drop a superblock's refcount. The caller must hold sb_lock.
288 static void __put_super(struct super_block *s)
291 list_del_init(&s->s_list);
292 WARN_ON(s->s_dentry_lru.node);
293 WARN_ON(s->s_inode_lru.node);
294 WARN_ON(!list_empty(&s->s_mounts));
296 fscrypt_destroy_keyring(s);
297 put_user_ns(s->s_user_ns);
299 call_rcu(&s->rcu, destroy_super_rcu);
304 * put_super - drop a temporary reference to superblock
305 * @sb: superblock in question
307 * Drops a temporary reference, frees superblock if there's no
310 static void put_super(struct super_block *sb)
314 spin_unlock(&sb_lock);
319 * deactivate_locked_super - drop an active reference to superblock
320 * @s: superblock to deactivate
322 * Drops an active reference to superblock, converting it into a temporary
323 * one if there is no other active references left. In that case we
324 * tell fs driver to shut it down and drop the temporary reference we
327 * Caller holds exclusive lock on superblock; that lock is released.
329 void deactivate_locked_super(struct super_block *s)
331 struct file_system_type *fs = s->s_type;
332 if (atomic_dec_and_test(&s->s_active)) {
333 cleancache_invalidate_fs(s);
334 unregister_shrinker(&s->s_shrink);
338 * Since list_lru_destroy() may sleep, we cannot call it from
339 * put_super(), where we hold the sb_lock. Therefore we destroy
340 * the lru lists right now.
342 list_lru_destroy(&s->s_dentry_lru);
343 list_lru_destroy(&s->s_inode_lru);
348 up_write(&s->s_umount);
352 EXPORT_SYMBOL(deactivate_locked_super);
355 * deactivate_super - drop an active reference to superblock
356 * @s: superblock to deactivate
358 * Variant of deactivate_locked_super(), except that superblock is *not*
359 * locked by caller. If we are going to drop the final active reference,
360 * lock will be acquired prior to that.
362 void deactivate_super(struct super_block *s)
364 if (!atomic_add_unless(&s->s_active, -1, 1)) {
365 down_write(&s->s_umount);
366 deactivate_locked_super(s);
370 EXPORT_SYMBOL(deactivate_super);
373 * grab_super - acquire an active reference
374 * @s: reference we are trying to make active
376 * Tries to acquire an active reference. grab_super() is used when we
377 * had just found a superblock in super_blocks or fs_type->fs_supers
378 * and want to turn it into a full-blown active reference. grab_super()
379 * is called with sb_lock held and drops it. Returns 1 in case of
380 * success, 0 if we had failed (superblock contents was already dead or
381 * dying when grab_super() had been called). Note that this is only
382 * called for superblocks not in rundown mode (== ones still on ->fs_supers
383 * of their type), so increment of ->s_count is OK here.
385 static int grab_super(struct super_block *s) __releases(sb_lock)
388 spin_unlock(&sb_lock);
389 down_write(&s->s_umount);
390 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
394 up_write(&s->s_umount);
400 * trylock_super - try to grab ->s_umount shared
401 * @sb: reference we are trying to grab
403 * Try to prevent fs shutdown. This is used in places where we
404 * cannot take an active reference but we need to ensure that the
405 * filesystem is not shut down while we are working on it. It returns
406 * false if we cannot acquire s_umount or if we lose the race and
407 * filesystem already got into shutdown, and returns true with the s_umount
408 * lock held in read mode in case of success. On successful return,
409 * the caller must drop the s_umount lock when done.
411 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
412 * The reason why it's safe is that we are OK with doing trylock instead
413 * of down_read(). There's a couple of places that are OK with that, but
414 * it's very much not a general-purpose interface.
416 bool trylock_super(struct super_block *sb)
418 if (down_read_trylock(&sb->s_umount)) {
419 if (!hlist_unhashed(&sb->s_instances) &&
420 sb->s_root && (sb->s_flags & SB_BORN))
422 up_read(&sb->s_umount);
429 * generic_shutdown_super - common helper for ->kill_sb()
430 * @sb: superblock to kill
432 * generic_shutdown_super() does all fs-independent work on superblock
433 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
434 * that need destruction out of superblock, call generic_shutdown_super()
435 * and release aforementioned objects. Note: dentries and inodes _are_
436 * taken care of and do not need specific handling.
438 * Upon calling this function, the filesystem may no longer alter or
439 * rearrange the set of dentries belonging to this super_block, nor may it
440 * change the attachments of dentries to inodes.
442 void generic_shutdown_super(struct super_block *sb)
444 const struct super_operations *sop = sb->s_op;
447 shrink_dcache_for_umount(sb);
449 sb->s_flags &= ~SB_ACTIVE;
451 cgroup_writeback_umount();
453 /* evict all inodes with zero refcount */
455 /* only nonzero refcount inodes can have marks */
456 fsnotify_sb_delete(sb);
457 fscrypt_destroy_keyring(sb);
459 if (sb->s_dio_done_wq) {
460 destroy_workqueue(sb->s_dio_done_wq);
461 sb->s_dio_done_wq = NULL;
467 if (!list_empty(&sb->s_inodes)) {
468 printk("VFS: Busy inodes after unmount of %s. "
469 "Self-destruct in 5 seconds. Have a nice day...\n",
474 /* should be initialized for __put_super_and_need_restart() */
475 hlist_del_init(&sb->s_instances);
476 spin_unlock(&sb_lock);
477 up_write(&sb->s_umount);
478 if (sb->s_bdi != &noop_backing_dev_info) {
480 sb->s_bdi = &noop_backing_dev_info;
484 EXPORT_SYMBOL(generic_shutdown_super);
486 bool mount_capable(struct fs_context *fc)
488 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
489 return capable(CAP_SYS_ADMIN);
491 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
495 * sget_fc - Find or create a superblock
496 * @fc: Filesystem context.
497 * @test: Comparison callback
498 * @set: Setup callback
500 * Find or create a superblock using the parameters stored in the filesystem
501 * context and the two callback functions.
503 * If an extant superblock is matched, then that will be returned with an
504 * elevated reference count that the caller must transfer or discard.
506 * If no match is made, a new superblock will be allocated and basic
507 * initialisation will be performed (s_type, s_fs_info and s_id will be set and
508 * the set() callback will be invoked), the superblock will be published and it
509 * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
512 struct super_block *sget_fc(struct fs_context *fc,
513 int (*test)(struct super_block *, struct fs_context *),
514 int (*set)(struct super_block *, struct fs_context *))
516 struct super_block *s = NULL;
517 struct super_block *old;
518 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
524 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
526 goto share_extant_sb;
530 spin_unlock(&sb_lock);
531 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
533 return ERR_PTR(-ENOMEM);
537 s->s_fs_info = fc->s_fs_info;
541 spin_unlock(&sb_lock);
542 destroy_unused_super(s);
545 fc->s_fs_info = NULL;
546 s->s_type = fc->fs_type;
547 s->s_iflags |= fc->s_iflags;
548 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
549 list_add_tail(&s->s_list, &super_blocks);
550 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
551 spin_unlock(&sb_lock);
552 get_filesystem(s->s_type);
553 register_shrinker_prepared(&s->s_shrink);
557 if (user_ns != old->s_user_ns) {
558 spin_unlock(&sb_lock);
559 destroy_unused_super(s);
560 return ERR_PTR(-EBUSY);
562 if (!grab_super(old))
564 destroy_unused_super(s);
567 EXPORT_SYMBOL(sget_fc);
570 * sget - find or create a superblock
571 * @type: filesystem type superblock should belong to
572 * @test: comparison callback
573 * @set: setup callback
574 * @flags: mount flags
575 * @data: argument to each of them
577 struct super_block *sget(struct file_system_type *type,
578 int (*test)(struct super_block *,void *),
579 int (*set)(struct super_block *,void *),
583 struct user_namespace *user_ns = current_user_ns();
584 struct super_block *s = NULL;
585 struct super_block *old;
588 /* We don't yet pass the user namespace of the parent
589 * mount through to here so always use &init_user_ns
590 * until that changes.
592 if (flags & SB_SUBMOUNT)
593 user_ns = &init_user_ns;
598 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
599 if (!test(old, data))
601 if (user_ns != old->s_user_ns) {
602 spin_unlock(&sb_lock);
603 destroy_unused_super(s);
604 return ERR_PTR(-EBUSY);
606 if (!grab_super(old))
608 destroy_unused_super(s);
613 spin_unlock(&sb_lock);
614 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
616 return ERR_PTR(-ENOMEM);
622 spin_unlock(&sb_lock);
623 destroy_unused_super(s);
627 strlcpy(s->s_id, type->name, sizeof(s->s_id));
628 list_add_tail(&s->s_list, &super_blocks);
629 hlist_add_head(&s->s_instances, &type->fs_supers);
630 spin_unlock(&sb_lock);
631 get_filesystem(type);
632 register_shrinker_prepared(&s->s_shrink);
637 void drop_super(struct super_block *sb)
639 up_read(&sb->s_umount);
643 EXPORT_SYMBOL(drop_super);
645 void drop_super_exclusive(struct super_block *sb)
647 up_write(&sb->s_umount);
650 EXPORT_SYMBOL(drop_super_exclusive);
652 static void __iterate_supers(void (*f)(struct super_block *))
654 struct super_block *sb, *p = NULL;
657 list_for_each_entry(sb, &super_blocks, s_list) {
658 if (hlist_unhashed(&sb->s_instances))
661 spin_unlock(&sb_lock);
672 spin_unlock(&sb_lock);
675 * iterate_supers - call function for all active superblocks
676 * @f: function to call
677 * @arg: argument to pass to it
679 * Scans the superblock list and calls given function, passing it
680 * locked superblock and given argument.
682 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
684 struct super_block *sb, *p = NULL;
687 list_for_each_entry(sb, &super_blocks, s_list) {
688 if (hlist_unhashed(&sb->s_instances))
691 spin_unlock(&sb_lock);
693 down_read(&sb->s_umount);
694 if (sb->s_root && (sb->s_flags & SB_BORN))
696 up_read(&sb->s_umount);
705 spin_unlock(&sb_lock);
709 * iterate_supers_type - call function for superblocks of given type
711 * @f: function to call
712 * @arg: argument to pass to it
714 * Scans the superblock list and calls given function, passing it
715 * locked superblock and given argument.
717 void iterate_supers_type(struct file_system_type *type,
718 void (*f)(struct super_block *, void *), void *arg)
720 struct super_block *sb, *p = NULL;
723 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
725 spin_unlock(&sb_lock);
727 down_read(&sb->s_umount);
728 if (sb->s_root && (sb->s_flags & SB_BORN))
730 up_read(&sb->s_umount);
739 spin_unlock(&sb_lock);
742 EXPORT_SYMBOL(iterate_supers_type);
744 static struct super_block *__get_super(struct block_device *bdev, bool excl)
746 struct super_block *sb;
753 list_for_each_entry(sb, &super_blocks, s_list) {
754 if (hlist_unhashed(&sb->s_instances))
756 if (sb->s_bdev == bdev) {
758 spin_unlock(&sb_lock);
760 down_read(&sb->s_umount);
762 down_write(&sb->s_umount);
764 if (sb->s_root && (sb->s_flags & SB_BORN))
767 up_read(&sb->s_umount);
769 up_write(&sb->s_umount);
770 /* nope, got unmounted */
776 spin_unlock(&sb_lock);
781 * get_super - get the superblock of a device
782 * @bdev: device to get the superblock for
784 * Scans the superblock list and finds the superblock of the file system
785 * mounted on the device given. %NULL is returned if no match is found.
787 struct super_block *get_super(struct block_device *bdev)
789 return __get_super(bdev, false);
791 EXPORT_SYMBOL(get_super);
793 static struct super_block *__get_super_thawed(struct block_device *bdev,
797 struct super_block *s = __get_super(bdev, excl);
798 if (!s || s->s_writers.frozen == SB_UNFROZEN)
801 up_read(&s->s_umount);
803 up_write(&s->s_umount);
804 wait_event(s->s_writers.wait_unfrozen,
805 s->s_writers.frozen == SB_UNFROZEN);
811 * get_super_thawed - get thawed superblock of a device
812 * @bdev: device to get the superblock for
814 * Scans the superblock list and finds the superblock of the file system
815 * mounted on the device. The superblock is returned once it is thawed
816 * (or immediately if it was not frozen). %NULL is returned if no match
819 struct super_block *get_super_thawed(struct block_device *bdev)
821 return __get_super_thawed(bdev, false);
823 EXPORT_SYMBOL(get_super_thawed);
826 * get_super_exclusive_thawed - get thawed superblock of a device
827 * @bdev: device to get the superblock for
829 * Scans the superblock list and finds the superblock of the file system
830 * mounted on the device. The superblock is returned once it is thawed
831 * (or immediately if it was not frozen) and s_umount semaphore is held
832 * in exclusive mode. %NULL is returned if no match is found.
834 struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
836 return __get_super_thawed(bdev, true);
838 EXPORT_SYMBOL(get_super_exclusive_thawed);
841 * get_active_super - get an active reference to the superblock of a device
842 * @bdev: device to get the superblock for
844 * Scans the superblock list and finds the superblock of the file system
845 * mounted on the device given. Returns the superblock with an active
846 * reference or %NULL if none was found.
848 struct super_block *get_active_super(struct block_device *bdev)
850 struct super_block *sb;
857 list_for_each_entry(sb, &super_blocks, s_list) {
858 if (hlist_unhashed(&sb->s_instances))
860 if (sb->s_bdev == bdev) {
863 up_write(&sb->s_umount);
867 spin_unlock(&sb_lock);
871 struct super_block *user_get_super(dev_t dev)
873 struct super_block *sb;
877 list_for_each_entry(sb, &super_blocks, s_list) {
878 if (hlist_unhashed(&sb->s_instances))
880 if (sb->s_dev == dev) {
882 spin_unlock(&sb_lock);
883 down_read(&sb->s_umount);
885 if (sb->s_root && (sb->s_flags & SB_BORN))
887 up_read(&sb->s_umount);
888 /* nope, got unmounted */
894 spin_unlock(&sb_lock);
899 * reconfigure_super - asks filesystem to change superblock parameters
900 * @fc: The superblock and configuration
902 * Alters the configuration parameters of a live superblock.
904 int reconfigure_super(struct fs_context *fc)
906 struct super_block *sb = fc->root->d_sb;
908 bool remount_ro = false;
909 bool remount_rw = false;
910 bool force = fc->sb_flags & SB_FORCE;
912 if (fc->sb_flags_mask & ~MS_RMT_MASK)
914 if (sb->s_writers.frozen != SB_UNFROZEN)
917 retval = security_sb_remount(sb, fc->security);
921 if (fc->sb_flags_mask & SB_RDONLY) {
923 if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
926 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
927 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
931 if (!hlist_empty(&sb->s_pins)) {
932 up_write(&sb->s_umount);
933 group_pin_kill(&sb->s_pins);
934 down_write(&sb->s_umount);
937 if (sb->s_writers.frozen != SB_UNFROZEN)
939 remount_ro = !sb_rdonly(sb);
942 shrink_dcache_sb(sb);
944 /* If we are reconfiguring to RDONLY and current sb is read/write,
945 * make sure there are no files open for writing.
949 sb->s_readonly_remount = 1;
952 retval = sb_prepare_remount_readonly(sb);
956 } else if (remount_rw) {
958 * We set s_readonly_remount here to protect filesystem's
959 * reconfigure code from writes from userspace until
960 * reconfigure finishes.
962 sb->s_readonly_remount = 1;
966 if (fc->ops->reconfigure) {
967 retval = fc->ops->reconfigure(fc);
970 goto cancel_readonly;
971 /* If forced remount, go ahead despite any errors */
972 WARN(1, "forced remount of a %s fs returned %i\n",
973 sb->s_type->name, retval);
977 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
978 (fc->sb_flags & fc->sb_flags_mask)));
979 /* Needs to be ordered wrt mnt_is_readonly() */
981 sb->s_readonly_remount = 0;
984 * Some filesystems modify their metadata via some other path than the
985 * bdev buffer cache (eg. use a private mapping, or directories in
986 * pagecache, etc). Also file data modifications go via their own
987 * mappings. So If we try to mount readonly then copy the filesystem
988 * from bdev, we could get stale data, so invalidate it to give a best
989 * effort at coherency.
991 if (remount_ro && sb->s_bdev)
992 invalidate_bdev(sb->s_bdev);
996 sb->s_readonly_remount = 0;
1000 static void do_emergency_remount_callback(struct super_block *sb)
1002 down_write(&sb->s_umount);
1003 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
1005 struct fs_context *fc;
1007 fc = fs_context_for_reconfigure(sb->s_root,
1008 SB_RDONLY | SB_FORCE, SB_RDONLY);
1010 if (parse_monolithic_mount_data(fc, NULL) == 0)
1011 (void)reconfigure_super(fc);
1015 up_write(&sb->s_umount);
1018 static void do_emergency_remount(struct work_struct *work)
1020 __iterate_supers(do_emergency_remount_callback);
1022 printk("Emergency Remount complete\n");
1025 void emergency_remount(void)
1027 struct work_struct *work;
1029 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1031 INIT_WORK(work, do_emergency_remount);
1032 schedule_work(work);
1036 static void do_thaw_all_callback(struct super_block *sb)
1038 down_write(&sb->s_umount);
1039 if (sb->s_root && sb->s_flags & SB_BORN) {
1040 emergency_thaw_bdev(sb);
1041 thaw_super_locked(sb);
1043 up_write(&sb->s_umount);
1047 static void do_thaw_all(struct work_struct *work)
1049 __iterate_supers(do_thaw_all_callback);
1051 printk(KERN_WARNING "Emergency Thaw complete\n");
1055 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1057 * Used for emergency unfreeze of all filesystems via SysRq
1059 void emergency_thaw_all(void)
1061 struct work_struct *work;
1063 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1065 INIT_WORK(work, do_thaw_all);
1066 schedule_work(work);
1070 static DEFINE_IDA(unnamed_dev_ida);
1073 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1074 * @p: Pointer to a dev_t.
1076 * Filesystems which don't use real block devices can call this function
1077 * to allocate a virtual block device.
1079 * Context: Any context. Frequently called while holding sb_lock.
1080 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1081 * or -ENOMEM if memory allocation failed.
1083 int get_anon_bdev(dev_t *p)
1088 * Many userspace utilities consider an FSID of 0 invalid.
1089 * Always return at least 1 from get_anon_bdev.
1091 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1101 EXPORT_SYMBOL(get_anon_bdev);
1103 void free_anon_bdev(dev_t dev)
1105 ida_free(&unnamed_dev_ida, MINOR(dev));
1107 EXPORT_SYMBOL(free_anon_bdev);
1109 int set_anon_super(struct super_block *s, void *data)
1111 return get_anon_bdev(&s->s_dev);
1113 EXPORT_SYMBOL(set_anon_super);
1115 void kill_anon_super(struct super_block *sb)
1117 dev_t dev = sb->s_dev;
1118 generic_shutdown_super(sb);
1119 free_anon_bdev(dev);
1121 EXPORT_SYMBOL(kill_anon_super);
1123 void kill_litter_super(struct super_block *sb)
1126 d_genocide(sb->s_root);
1127 kill_anon_super(sb);
1129 EXPORT_SYMBOL(kill_litter_super);
1131 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1133 return set_anon_super(sb, NULL);
1135 EXPORT_SYMBOL(set_anon_super_fc);
1137 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1139 return sb->s_fs_info == fc->s_fs_info;
1142 static int test_single_super(struct super_block *s, struct fs_context *fc)
1148 * vfs_get_super - Get a superblock with a search key set in s_fs_info.
1149 * @fc: The filesystem context holding the parameters
1150 * @keying: How to distinguish superblocks
1151 * @fill_super: Helper to initialise a new superblock
1153 * Search for a superblock and create a new one if not found. The search
1154 * criterion is controlled by @keying. If the search fails, a new superblock
1155 * is created and @fill_super() is called to initialise it.
1157 * @keying can take one of a number of values:
1159 * (1) vfs_get_single_super - Only one superblock of this type may exist on the
1160 * system. This is typically used for special system filesystems.
1162 * (2) vfs_get_keyed_super - Multiple superblocks may exist, but they must have
1163 * distinct keys (where the key is in s_fs_info). Searching for the same
1164 * key again will turn up the superblock for that key.
1166 * (3) vfs_get_independent_super - Multiple superblocks may exist and are
1167 * unkeyed. Each call will get a new superblock.
1169 * A permissions check is made by sget_fc() unless we're getting a superblock
1170 * for a kernel-internal mount or a submount.
1172 int vfs_get_super(struct fs_context *fc,
1173 enum vfs_get_super_keying keying,
1174 int (*fill_super)(struct super_block *sb,
1175 struct fs_context *fc))
1177 int (*test)(struct super_block *, struct fs_context *);
1178 struct super_block *sb;
1182 case vfs_get_single_super:
1183 case vfs_get_single_reconf_super:
1184 test = test_single_super;
1186 case vfs_get_keyed_super:
1187 test = test_keyed_super;
1189 case vfs_get_independent_super:
1196 sb = sget_fc(fc, test, set_anon_super_fc);
1201 err = fill_super(sb, fc);
1205 sb->s_flags |= SB_ACTIVE;
1206 fc->root = dget(sb->s_root);
1208 fc->root = dget(sb->s_root);
1209 if (keying == vfs_get_single_reconf_super) {
1210 err = reconfigure_super(fc);
1222 deactivate_locked_super(sb);
1225 EXPORT_SYMBOL(vfs_get_super);
1227 int get_tree_nodev(struct fs_context *fc,
1228 int (*fill_super)(struct super_block *sb,
1229 struct fs_context *fc))
1231 return vfs_get_super(fc, vfs_get_independent_super, fill_super);
1233 EXPORT_SYMBOL(get_tree_nodev);
1235 int get_tree_single(struct fs_context *fc,
1236 int (*fill_super)(struct super_block *sb,
1237 struct fs_context *fc))
1239 return vfs_get_super(fc, vfs_get_single_super, fill_super);
1241 EXPORT_SYMBOL(get_tree_single);
1243 int get_tree_single_reconf(struct fs_context *fc,
1244 int (*fill_super)(struct super_block *sb,
1245 struct fs_context *fc))
1247 return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
1249 EXPORT_SYMBOL(get_tree_single_reconf);
1251 int get_tree_keyed(struct fs_context *fc,
1252 int (*fill_super)(struct super_block *sb,
1253 struct fs_context *fc),
1256 fc->s_fs_info = key;
1257 return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
1259 EXPORT_SYMBOL(get_tree_keyed);
1263 static int set_bdev_super(struct super_block *s, void *data)
1266 s->s_dev = s->s_bdev->bd_dev;
1267 s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1269 if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue))
1270 s->s_iflags |= SB_I_STABLE_WRITES;
1274 static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1276 return set_bdev_super(s, fc->sget_key);
1279 static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1281 return s->s_bdev == fc->sget_key;
1285 * get_tree_bdev - Get a superblock based on a single block device
1286 * @fc: The filesystem context holding the parameters
1287 * @fill_super: Helper to initialise a new superblock
1289 int get_tree_bdev(struct fs_context *fc,
1290 int (*fill_super)(struct super_block *,
1291 struct fs_context *))
1293 struct block_device *bdev;
1294 struct super_block *s;
1295 fmode_t mode = FMODE_READ | FMODE_EXCL;
1298 if (!(fc->sb_flags & SB_RDONLY))
1299 mode |= FMODE_WRITE;
1302 return invalf(fc, "No source specified");
1304 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
1306 errorf(fc, "%s: Can't open blockdev", fc->source);
1307 return PTR_ERR(bdev);
1310 /* Once the superblock is inserted into the list by sget_fc(), s_umount
1311 * will protect the lockfs code from trying to start a snapshot while
1314 mutex_lock(&bdev->bd_fsfreeze_mutex);
1315 if (bdev->bd_fsfreeze_count > 0) {
1316 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1317 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1318 blkdev_put(bdev, mode);
1322 fc->sb_flags |= SB_NOSEC;
1323 fc->sget_key = bdev;
1324 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
1325 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1327 blkdev_put(bdev, mode);
1332 /* Don't summarily change the RO/RW state. */
1333 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1334 warnf(fc, "%pg: Can't mount, would change RO state", bdev);
1335 deactivate_locked_super(s);
1336 blkdev_put(bdev, mode);
1341 * s_umount nests inside bd_mutex during
1342 * __invalidate_device(). blkdev_put() acquires
1343 * bd_mutex and can't be called under s_umount. Drop
1344 * s_umount temporarily. This is safe as we're
1345 * holding an active reference.
1347 up_write(&s->s_umount);
1348 blkdev_put(bdev, mode);
1349 down_write(&s->s_umount);
1352 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1353 sb_set_blocksize(s, block_size(bdev));
1354 error = fill_super(s, fc);
1356 deactivate_locked_super(s);
1360 s->s_flags |= SB_ACTIVE;
1365 fc->root = dget(s->s_root);
1368 EXPORT_SYMBOL(get_tree_bdev);
1370 static int test_bdev_super(struct super_block *s, void *data)
1372 return (void *)s->s_bdev == data;
1375 struct dentry *mount_bdev(struct file_system_type *fs_type,
1376 int flags, const char *dev_name, void *data,
1377 int (*fill_super)(struct super_block *, void *, int))
1379 struct block_device *bdev;
1380 struct super_block *s;
1381 fmode_t mode = FMODE_READ | FMODE_EXCL;
1384 if (!(flags & SB_RDONLY))
1385 mode |= FMODE_WRITE;
1387 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1389 return ERR_CAST(bdev);
1392 * once the super is inserted into the list by sget, s_umount
1393 * will protect the lockfs code from trying to start a snapshot
1394 * while we are mounting
1396 mutex_lock(&bdev->bd_fsfreeze_mutex);
1397 if (bdev->bd_fsfreeze_count > 0) {
1398 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1402 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
1404 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1409 if ((flags ^ s->s_flags) & SB_RDONLY) {
1410 deactivate_locked_super(s);
1416 * s_umount nests inside bd_mutex during
1417 * __invalidate_device(). blkdev_put() acquires
1418 * bd_mutex and can't be called under s_umount. Drop
1419 * s_umount temporarily. This is safe as we're
1420 * holding an active reference.
1422 up_write(&s->s_umount);
1423 blkdev_put(bdev, mode);
1424 down_write(&s->s_umount);
1427 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1428 sb_set_blocksize(s, block_size(bdev));
1429 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1431 deactivate_locked_super(s);
1435 s->s_flags |= SB_ACTIVE;
1439 return dget(s->s_root);
1444 blkdev_put(bdev, mode);
1446 return ERR_PTR(error);
1448 EXPORT_SYMBOL(mount_bdev);
1450 void kill_block_super(struct super_block *sb)
1452 struct block_device *bdev = sb->s_bdev;
1453 fmode_t mode = sb->s_mode;
1455 bdev->bd_super = NULL;
1456 generic_shutdown_super(sb);
1457 sync_blockdev(bdev);
1458 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1459 blkdev_put(bdev, mode | FMODE_EXCL);
1462 EXPORT_SYMBOL(kill_block_super);
1465 struct dentry *mount_nodev(struct file_system_type *fs_type,
1466 int flags, void *data,
1467 int (*fill_super)(struct super_block *, void *, int))
1470 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1475 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1477 deactivate_locked_super(s);
1478 return ERR_PTR(error);
1480 s->s_flags |= SB_ACTIVE;
1481 return dget(s->s_root);
1483 EXPORT_SYMBOL(mount_nodev);
1485 int reconfigure_single(struct super_block *s,
1486 int flags, void *data)
1488 struct fs_context *fc;
1491 /* The caller really need to be passing fc down into mount_single(),
1492 * then a chunk of this can be removed. [Bollocks -- AV]
1493 * Better yet, reconfiguration shouldn't happen, but rather the second
1494 * mount should be rejected if the parameters are not compatible.
1496 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1500 ret = parse_monolithic_mount_data(fc, data);
1504 ret = reconfigure_super(fc);
1510 static int compare_single(struct super_block *s, void *p)
1515 struct dentry *mount_single(struct file_system_type *fs_type,
1516 int flags, void *data,
1517 int (*fill_super)(struct super_block *, void *, int))
1519 struct super_block *s;
1522 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1526 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1528 s->s_flags |= SB_ACTIVE;
1530 error = reconfigure_single(s, flags, data);
1532 if (unlikely(error)) {
1533 deactivate_locked_super(s);
1534 return ERR_PTR(error);
1536 return dget(s->s_root);
1538 EXPORT_SYMBOL(mount_single);
1541 * vfs_get_tree - Get the mountable root
1542 * @fc: The superblock configuration context.
1544 * The filesystem is invoked to get or create a superblock which can then later
1545 * be used for mounting. The filesystem places a pointer to the root to be
1546 * used for mounting in @fc->root.
1548 int vfs_get_tree(struct fs_context *fc)
1550 struct super_block *sb;
1556 /* Get the mountable root in fc->root, with a ref on the root and a ref
1557 * on the superblock.
1559 error = fc->ops->get_tree(fc);
1564 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1566 /* We don't know what the locking state of the superblock is -
1567 * if there is a superblock.
1572 sb = fc->root->d_sb;
1573 WARN_ON(!sb->s_bdi);
1576 * Write barrier is for super_cache_count(). We place it before setting
1577 * SB_BORN as the data dependency between the two functions is the
1578 * superblock structure contents that we just set up, not the SB_BORN
1582 sb->s_flags |= SB_BORN;
1584 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1585 if (unlikely(error)) {
1591 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1592 * but s_maxbytes was an unsigned long long for many releases. Throw
1593 * this warning for a little while to try and catch filesystems that
1594 * violate this rule.
1596 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1597 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1601 EXPORT_SYMBOL(vfs_get_tree);
1604 * Setup private BDI for given superblock. It gets automatically cleaned up
1605 * in generic_shutdown_super().
1607 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1609 struct backing_dev_info *bdi;
1613 bdi = bdi_alloc(NUMA_NO_NODE);
1617 va_start(args, fmt);
1618 err = bdi_register_va(bdi, fmt, args);
1624 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1629 EXPORT_SYMBOL(super_setup_bdi_name);
1632 * Setup private BDI for given superblock. I gets automatically cleaned up
1633 * in generic_shutdown_super().
1635 int super_setup_bdi(struct super_block *sb)
1637 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1639 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1640 atomic_long_inc_return(&bdi_seq));
1642 EXPORT_SYMBOL(super_setup_bdi);
1645 * sb_wait_write - wait until all writers to given file system finish
1646 * @sb: the super for which we wait
1647 * @level: type of writers we wait for (normal vs page fault)
1649 * This function waits until there are no writers of given type to given file
1652 static void sb_wait_write(struct super_block *sb, int level)
1654 percpu_down_write(sb->s_writers.rw_sem + level-1);
1658 * We are going to return to userspace and forget about these locks, the
1659 * ownership goes to the caller of thaw_super() which does unlock().
1661 static void lockdep_sb_freeze_release(struct super_block *sb)
1665 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1666 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1670 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1672 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1676 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1677 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1680 static void sb_freeze_unlock(struct super_block *sb, int level)
1682 for (level--; level >= 0; level--)
1683 percpu_up_write(sb->s_writers.rw_sem + level);
1687 * freeze_super - lock the filesystem and force it into a consistent state
1688 * @sb: the super to lock
1690 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1691 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1694 * During this function, sb->s_writers.frozen goes through these values:
1696 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1698 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1699 * writes should be blocked, though page faults are still allowed. We wait for
1700 * all writes to complete and then proceed to the next stage.
1702 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1703 * but internal fs threads can still modify the filesystem (although they
1704 * should not dirty new pages or inodes), writeback can run etc. After waiting
1705 * for all running page faults we sync the filesystem which will clean all
1706 * dirty pages and inodes (no new dirty pages or inodes can be created when
1709 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1710 * modification are blocked (e.g. XFS preallocation truncation on inode
1711 * reclaim). This is usually implemented by blocking new transactions for
1712 * filesystems that have them and need this additional guard. After all
1713 * internal writers are finished we call ->freeze_fs() to finish filesystem
1714 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1715 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1717 * sb->s_writers.frozen is protected by sb->s_umount.
1719 int freeze_super(struct super_block *sb)
1723 atomic_inc(&sb->s_active);
1724 down_write(&sb->s_umount);
1725 if (sb->s_writers.frozen != SB_UNFROZEN) {
1726 deactivate_locked_super(sb);
1730 if (!(sb->s_flags & SB_BORN)) {
1731 up_write(&sb->s_umount);
1732 return 0; /* sic - it's "nothing to do" */
1735 if (sb_rdonly(sb)) {
1736 /* Nothing to do really... */
1737 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1738 up_write(&sb->s_umount);
1742 sb->s_writers.frozen = SB_FREEZE_WRITE;
1743 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1744 up_write(&sb->s_umount);
1745 sb_wait_write(sb, SB_FREEZE_WRITE);
1746 down_write(&sb->s_umount);
1748 /* Now we go and block page faults... */
1749 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1750 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1752 /* All writers are done so after syncing there won't be dirty data */
1753 ret = sync_filesystem(sb);
1755 sb->s_writers.frozen = SB_UNFROZEN;
1756 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
1757 wake_up(&sb->s_writers.wait_unfrozen);
1758 deactivate_locked_super(sb);
1762 /* Now wait for internal filesystem counter */
1763 sb->s_writers.frozen = SB_FREEZE_FS;
1764 sb_wait_write(sb, SB_FREEZE_FS);
1766 if (sb->s_op->freeze_fs) {
1767 ret = sb->s_op->freeze_fs(sb);
1770 "VFS:Filesystem freeze failed\n");
1771 sb->s_writers.frozen = SB_UNFROZEN;
1772 sb_freeze_unlock(sb, SB_FREEZE_FS);
1773 wake_up(&sb->s_writers.wait_unfrozen);
1774 deactivate_locked_super(sb);
1779 * For debugging purposes so that fs can warn if it sees write activity
1780 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1782 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1783 lockdep_sb_freeze_release(sb);
1784 up_write(&sb->s_umount);
1787 EXPORT_SYMBOL(freeze_super);
1790 * thaw_super -- unlock filesystem
1791 * @sb: the super to thaw
1793 * Unlocks the filesystem and marks it writeable again after freeze_super().
1795 static int thaw_super_locked(struct super_block *sb)
1799 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1800 up_write(&sb->s_umount);
1804 if (sb_rdonly(sb)) {
1805 sb->s_writers.frozen = SB_UNFROZEN;
1809 lockdep_sb_freeze_acquire(sb);
1811 if (sb->s_op->unfreeze_fs) {
1812 error = sb->s_op->unfreeze_fs(sb);
1815 "VFS:Filesystem thaw failed\n");
1816 lockdep_sb_freeze_release(sb);
1817 up_write(&sb->s_umount);
1822 sb->s_writers.frozen = SB_UNFROZEN;
1823 sb_freeze_unlock(sb, SB_FREEZE_FS);
1825 wake_up(&sb->s_writers.wait_unfrozen);
1826 deactivate_locked_super(sb);
1830 int thaw_super(struct super_block *sb)
1832 down_write(&sb->s_umount);
1833 return thaw_super_locked(sb);
1835 EXPORT_SYMBOL(thaw_super);