1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/super.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include <linux/module.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/parser.h>
17 #include <linux/seq_file.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/erofs.h>
23 static struct kmem_cache *erofs_inode_cachep __read_mostly;
25 static void init_once(void *ptr)
27 struct erofs_vnode *vi = ptr;
29 inode_init_once(&vi->vfs_inode);
32 static int erofs_init_inode_cache(void)
34 erofs_inode_cachep = kmem_cache_create("erofs_inode",
35 sizeof(struct erofs_vnode), 0,
36 SLAB_RECLAIM_ACCOUNT, init_once);
38 return erofs_inode_cachep != NULL ? 0 : -ENOMEM;
41 static void erofs_exit_inode_cache(void)
43 kmem_cache_destroy(erofs_inode_cachep);
46 static struct inode *alloc_inode(struct super_block *sb)
48 struct erofs_vnode *vi =
49 kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
54 /* zero out everything except vfs_inode */
55 memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
56 return &vi->vfs_inode;
59 static void i_callback(struct rcu_head *head)
61 struct inode *inode = container_of(head, struct inode, i_rcu);
62 struct erofs_vnode *vi = EROFS_V(inode);
64 /* be careful RCU symlink path (see ext4_inode_info->i_data)! */
65 if (is_inode_fast_symlink(inode))
68 kfree(vi->xattr_shared_xattrs);
70 kmem_cache_free(erofs_inode_cachep, vi);
73 static void destroy_inode(struct inode *inode)
75 call_rcu(&inode->i_rcu, i_callback);
78 static bool check_layout_compatibility(struct super_block *sb,
79 struct erofs_super_block *layout)
81 const unsigned int requirements = le32_to_cpu(layout->requirements);
83 EROFS_SB(sb)->requirements = requirements;
85 /* check if current kernel meets all mandatory requirements */
86 if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
87 errln("unidentified requirements %x, please upgrade kernel version",
88 requirements & ~EROFS_ALL_REQUIREMENTS);
94 static int superblock_read(struct super_block *sb)
96 struct erofs_sb_info *sbi;
97 struct buffer_head *bh;
98 struct erofs_super_block *layout;
102 bh = sb_bread(sb, 0);
105 errln("cannot read erofs superblock");
110 layout = (struct erofs_super_block *)((u8 *)bh->b_data
111 + EROFS_SUPER_OFFSET);
114 if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
115 errln("cannot find valid erofs superblock");
119 blkszbits = layout->blkszbits;
120 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
121 if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
122 errln("blksize %u isn't supported on this platform",
127 if (!check_layout_compatibility(sb, layout))
130 sbi->blocks = le32_to_cpu(layout->blocks);
131 sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
132 #ifdef CONFIG_EROFS_FS_XATTR
133 sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
135 sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
136 #ifdef CONFIG_EROFS_FS_ZIP
137 sbi->clusterbits = 12;
139 if (1 << (sbi->clusterbits - 12) > Z_EROFS_CLUSTER_MAX_PAGES)
140 errln("clusterbits %u is not supported on this kernel",
144 sbi->root_nid = le16_to_cpu(layout->root_nid);
145 sbi->inos = le64_to_cpu(layout->inos);
147 sbi->build_time = le64_to_cpu(layout->build_time);
148 sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
150 memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
151 memcpy(sbi->volume_name, layout->volume_name,
152 sizeof(layout->volume_name));
160 #ifdef CONFIG_EROFS_FAULT_INJECTION
161 char *erofs_fault_name[FAULT_MAX] = {
162 [FAULT_KMALLOC] = "kmalloc",
165 static void erofs_build_fault_attr(struct erofs_sb_info *sbi,
168 struct erofs_fault_info *ffi = &sbi->fault_info;
171 atomic_set(&ffi->inject_ops, 0);
172 ffi->inject_rate = rate;
173 ffi->inject_type = (1 << FAULT_MAX) - 1;
175 memset(ffi, 0, sizeof(struct erofs_fault_info));
180 static void default_options(struct erofs_sb_info *sbi)
182 #ifdef CONFIG_EROFS_FS_XATTR
183 set_opt(sbi, XATTR_USER);
186 #ifdef CONFIG_EROFS_FS_POSIX_ACL
187 set_opt(sbi, POSIX_ACL);
200 static match_table_t erofs_tokens = {
201 {Opt_user_xattr, "user_xattr"},
202 {Opt_nouser_xattr, "nouser_xattr"},
204 {Opt_noacl, "noacl"},
205 {Opt_fault_injection, "fault_injection=%u"},
209 static int parse_options(struct super_block *sb, char *options)
211 substring_t args[MAX_OPT_ARGS];
218 while ((p = strsep(&options, ",")) != NULL) {
224 args[0].to = args[0].from = NULL;
225 token = match_token(p, erofs_tokens, args);
228 #ifdef CONFIG_EROFS_FS_XATTR
230 set_opt(EROFS_SB(sb), XATTR_USER);
232 case Opt_nouser_xattr:
233 clear_opt(EROFS_SB(sb), XATTR_USER);
237 infoln("user_xattr options not supported");
239 case Opt_nouser_xattr:
240 infoln("nouser_xattr options not supported");
243 #ifdef CONFIG_EROFS_FS_POSIX_ACL
245 set_opt(EROFS_SB(sb), POSIX_ACL);
248 clear_opt(EROFS_SB(sb), POSIX_ACL);
252 infoln("acl options not supported");
255 infoln("noacl options not supported");
258 case Opt_fault_injection:
259 if (args->from && match_int(args, &arg))
261 #ifdef CONFIG_EROFS_FAULT_INJECTION
262 erofs_build_fault_attr(EROFS_SB(sb), arg);
263 set_opt(EROFS_SB(sb), FAULT_INJECTION);
265 infoln("FAULT_INJECTION was not selected");
269 errln("Unrecognized mount option \"%s\" "
270 "or missing value", p);
277 #ifdef EROFS_FS_HAS_MANAGED_CACHE
279 static const struct address_space_operations managed_cache_aops;
281 static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
283 int ret = 1; /* 0 - busy */
284 struct address_space *const mapping = page->mapping;
286 DBG_BUGON(!PageLocked(page));
287 DBG_BUGON(mapping->a_ops != &managed_cache_aops);
289 if (PagePrivate(page))
290 ret = erofs_try_to_free_cached_page(mapping, page);
295 static void managed_cache_invalidatepage(struct page *page,
296 unsigned int offset, unsigned int length)
298 const unsigned int stop = length + offset;
300 DBG_BUGON(!PageLocked(page));
302 /* Check for potential overflow in debug mode */
303 DBG_BUGON(stop > PAGE_SIZE || stop < length);
305 if (offset == 0 && stop == PAGE_SIZE)
306 while (!managed_cache_releasepage(page, GFP_NOFS))
310 static const struct address_space_operations managed_cache_aops = {
311 .releasepage = managed_cache_releasepage,
312 .invalidatepage = managed_cache_invalidatepage,
315 static struct inode *erofs_init_managed_cache(struct super_block *sb)
317 struct inode *inode = new_inode(sb);
319 if (unlikely(inode == NULL))
320 return ERR_PTR(-ENOMEM);
323 inode->i_size = OFFSET_MAX;
325 inode->i_mapping->a_ops = &managed_cache_aops;
326 mapping_set_gfp_mask(inode->i_mapping,
327 GFP_NOFS | __GFP_HIGHMEM |
328 __GFP_MOVABLE | __GFP_NOFAIL);
334 static int erofs_read_super(struct super_block *sb,
335 const char *dev_name, void *data, int silent)
338 struct erofs_sb_info *sbi;
341 infoln("read_super, device -> %s", dev_name);
342 infoln("options -> %s", (char *)data);
344 if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
345 errln("failed to set erofs blksize");
349 sbi = kzalloc(sizeof(struct erofs_sb_info), GFP_KERNEL);
350 if (unlikely(sbi == NULL)) {
356 err = superblock_read(sb);
360 sb->s_magic = EROFS_SUPER_MAGIC;
361 sb->s_flags |= SB_RDONLY | SB_NOATIME;
362 sb->s_maxbytes = MAX_LFS_FILESIZE;
365 sb->s_op = &erofs_sops;
367 #ifdef CONFIG_EROFS_FS_XATTR
368 sb->s_xattr = erofs_xattr_handlers;
371 /* set erofs default mount options */
372 default_options(sbi);
374 err = parse_options(sb, data);
379 infoln("root inode @ nid %llu", ROOT_NID(sbi));
381 #ifdef CONFIG_EROFS_FS_ZIP
382 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
385 #ifdef EROFS_FS_HAS_MANAGED_CACHE
386 sbi->managed_cache = erofs_init_managed_cache(sb);
387 if (IS_ERR(sbi->managed_cache)) {
388 err = PTR_ERR(sbi->managed_cache);
389 goto err_init_managed_cache;
393 /* get the root inode */
394 inode = erofs_iget(sb, ROOT_NID(sbi), true);
396 err = PTR_ERR(inode);
400 if (!S_ISDIR(inode->i_mode)) {
401 errln("rootino(nid %llu) is not a directory(i_mode %o)",
402 ROOT_NID(sbi), inode->i_mode);
407 sb->s_root = d_make_root(inode);
408 if (sb->s_root == NULL) {
413 /* save the device name to sbi */
414 sbi->dev_name = __getname();
415 if (sbi->dev_name == NULL) {
420 snprintf(sbi->dev_name, PATH_MAX, "%s", dev_name);
421 sbi->dev_name[PATH_MAX - 1] = '\0';
423 erofs_register_super(sb);
426 infoln("mounted on %s with opts: %s.", dev_name,
430 * please add a label for each exit point and use
431 * the following name convention, thus new features
432 * can be integrated easily without renaming labels.
438 if (sb->s_root == NULL)
441 #ifdef EROFS_FS_HAS_MANAGED_CACHE
442 iput(sbi->managed_cache);
443 err_init_managed_cache:
447 sb->s_fs_info = NULL;
454 * could be triggered after deactivate_locked_super()
455 * is called, thus including umount and failed to initialize.
457 static void erofs_put_super(struct super_block *sb)
459 struct erofs_sb_info *sbi = EROFS_SB(sb);
461 /* for cases which are failed in "read_super" */
465 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
467 infoln("unmounted for %s", sbi->dev_name);
468 __putname(sbi->dev_name);
470 #ifdef EROFS_FS_HAS_MANAGED_CACHE
471 iput(sbi->managed_cache);
474 mutex_lock(&sbi->umount_mutex);
476 #ifdef CONFIG_EROFS_FS_ZIP
477 erofs_workstation_cleanup_all(sb);
480 erofs_unregister_super(sb);
481 mutex_unlock(&sbi->umount_mutex);
484 sb->s_fs_info = NULL;
488 struct erofs_mount_private {
489 const char *dev_name;
493 /* support mount_bdev() with options */
494 static int erofs_fill_super(struct super_block *sb,
495 void *_priv, int silent)
497 struct erofs_mount_private *priv = _priv;
499 return erofs_read_super(sb, priv->dev_name,
500 priv->options, silent);
503 static struct dentry *erofs_mount(
504 struct file_system_type *fs_type, int flags,
505 const char *dev_name, void *data)
507 struct erofs_mount_private priv = {
508 .dev_name = dev_name,
512 return mount_bdev(fs_type, flags, dev_name,
513 &priv, erofs_fill_super);
516 static void erofs_kill_sb(struct super_block *sb)
518 kill_block_super(sb);
521 static struct shrinker erofs_shrinker_info = {
522 .scan_objects = erofs_shrink_scan,
523 .count_objects = erofs_shrink_count,
524 .seeks = DEFAULT_SEEKS,
527 static struct file_system_type erofs_fs_type = {
528 .owner = THIS_MODULE,
530 .mount = erofs_mount,
531 .kill_sb = erofs_kill_sb,
532 .fs_flags = FS_REQUIRES_DEV,
534 MODULE_ALIAS_FS("erofs");
536 #ifdef CONFIG_EROFS_FS_ZIP
537 extern int z_erofs_init_zip_subsystem(void);
538 extern void z_erofs_exit_zip_subsystem(void);
541 static int __init erofs_module_init(void)
545 erofs_check_ondisk_layout_definitions();
546 infoln("initializing erofs " EROFS_VERSION);
548 err = erofs_init_inode_cache();
552 err = register_shrinker(&erofs_shrinker_info);
556 #ifdef CONFIG_EROFS_FS_ZIP
557 err = z_erofs_init_zip_subsystem();
562 err = register_filesystem(&erofs_fs_type);
566 infoln("successfully to initialize erofs");
570 #ifdef CONFIG_EROFS_FS_ZIP
571 z_erofs_exit_zip_subsystem();
574 unregister_shrinker(&erofs_shrinker_info);
576 erofs_exit_inode_cache();
581 static void __exit erofs_module_exit(void)
583 unregister_filesystem(&erofs_fs_type);
584 #ifdef CONFIG_EROFS_FS_ZIP
585 z_erofs_exit_zip_subsystem();
587 unregister_shrinker(&erofs_shrinker_info);
588 erofs_exit_inode_cache();
589 infoln("successfully finalize erofs");
592 /* get filesystem statistics */
593 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
595 struct super_block *sb = dentry->d_sb;
596 struct erofs_sb_info *sbi = EROFS_SB(sb);
597 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
599 buf->f_type = sb->s_magic;
600 buf->f_bsize = EROFS_BLKSIZ;
601 buf->f_blocks = sbi->blocks;
602 buf->f_bfree = buf->f_bavail = 0;
604 buf->f_files = ULLONG_MAX;
605 buf->f_ffree = ULLONG_MAX - sbi->inos;
607 buf->f_namelen = EROFS_NAME_LEN;
609 buf->f_fsid.val[0] = (u32)id;
610 buf->f_fsid.val[1] = (u32)(id >> 32);
614 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
616 struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
618 #ifdef CONFIG_EROFS_FS_XATTR
619 if (test_opt(sbi, XATTR_USER))
620 seq_puts(seq, ",user_xattr");
622 seq_puts(seq, ",nouser_xattr");
624 #ifdef CONFIG_EROFS_FS_POSIX_ACL
625 if (test_opt(sbi, POSIX_ACL))
626 seq_puts(seq, ",acl");
628 seq_puts(seq, ",noacl");
630 #ifdef CONFIG_EROFS_FAULT_INJECTION
631 if (test_opt(sbi, FAULT_INJECTION))
632 seq_printf(seq, ",fault_injection=%u",
633 sbi->fault_info.inject_rate);
638 static int erofs_remount(struct super_block *sb, int *flags, char *data)
640 DBG_BUGON(!sb_rdonly(sb));
646 const struct super_operations erofs_sops = {
647 .put_super = erofs_put_super,
648 .alloc_inode = alloc_inode,
649 .destroy_inode = destroy_inode,
650 .statfs = erofs_statfs,
651 .show_options = erofs_show_options,
652 .remount_fs = erofs_remount,
655 module_init(erofs_module_init);
656 module_exit(erofs_module_exit);
658 MODULE_DESCRIPTION("Enhanced ROM File System");
659 MODULE_AUTHOR("Gao Xiang, Yu Chao, Miao Xie, CONSUMER BG, HUAWEI Inc.");
660 MODULE_LICENSE("GPL");