1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/f2fs.h>
39 static struct kmem_cache *f2fs_inode_cachep;
41 #ifdef CONFIG_F2FS_FAULT_INJECTION
43 const char *f2fs_fault_name[FAULT_MAX] = {
44 [FAULT_KMALLOC] = "kmalloc",
45 [FAULT_KVMALLOC] = "kvmalloc",
46 [FAULT_PAGE_ALLOC] = "page alloc",
47 [FAULT_PAGE_GET] = "page get",
48 [FAULT_ALLOC_BIO] = "alloc bio",
49 [FAULT_ALLOC_NID] = "alloc nid",
50 [FAULT_ORPHAN] = "orphan",
51 [FAULT_BLOCK] = "no more block",
52 [FAULT_DIR_DEPTH] = "too big dir depth",
53 [FAULT_EVICT_INODE] = "evict_inode fail",
54 [FAULT_TRUNCATE] = "truncate fail",
55 [FAULT_READ_IO] = "read IO error",
56 [FAULT_CHECKPOINT] = "checkpoint error",
57 [FAULT_DISCARD] = "discard error",
58 [FAULT_WRITE_IO] = "write IO error",
61 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
67 atomic_set(&ffi->inject_ops, 0);
68 ffi->inject_rate = rate;
72 ffi->inject_type = type;
75 memset(ffi, 0, sizeof(struct f2fs_fault_info));
79 /* f2fs-wide shrinker description */
80 static struct shrinker f2fs_shrinker_info = {
81 .scan_objects = f2fs_shrink_scan,
82 .count_objects = f2fs_shrink_count,
83 .seeks = DEFAULT_SEEKS,
88 Opt_disable_roll_forward,
99 Opt_disable_ext_identify,
102 Opt_inline_xattr_size,
140 Opt_test_dummy_encryption,
142 Opt_checkpoint_disable,
143 Opt_checkpoint_disable_cap,
144 Opt_checkpoint_disable_cap_perc,
145 Opt_checkpoint_enable,
146 Opt_compress_algorithm,
147 Opt_compress_log_size,
148 Opt_compress_extension,
153 static match_table_t f2fs_tokens = {
154 {Opt_gc_background, "background_gc=%s"},
155 {Opt_disable_roll_forward, "disable_roll_forward"},
156 {Opt_norecovery, "norecovery"},
157 {Opt_discard, "discard"},
158 {Opt_nodiscard, "nodiscard"},
159 {Opt_noheap, "no_heap"},
161 {Opt_user_xattr, "user_xattr"},
162 {Opt_nouser_xattr, "nouser_xattr"},
164 {Opt_noacl, "noacl"},
165 {Opt_active_logs, "active_logs=%u"},
166 {Opt_disable_ext_identify, "disable_ext_identify"},
167 {Opt_inline_xattr, "inline_xattr"},
168 {Opt_noinline_xattr, "noinline_xattr"},
169 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
170 {Opt_inline_data, "inline_data"},
171 {Opt_inline_dentry, "inline_dentry"},
172 {Opt_noinline_dentry, "noinline_dentry"},
173 {Opt_flush_merge, "flush_merge"},
174 {Opt_noflush_merge, "noflush_merge"},
175 {Opt_nobarrier, "nobarrier"},
176 {Opt_fastboot, "fastboot"},
177 {Opt_extent_cache, "extent_cache"},
178 {Opt_noextent_cache, "noextent_cache"},
179 {Opt_noinline_data, "noinline_data"},
180 {Opt_data_flush, "data_flush"},
181 {Opt_reserve_root, "reserve_root=%u"},
182 {Opt_resgid, "resgid=%u"},
183 {Opt_resuid, "resuid=%u"},
184 {Opt_mode, "mode=%s"},
185 {Opt_io_size_bits, "io_bits=%u"},
186 {Opt_fault_injection, "fault_injection=%u"},
187 {Opt_fault_type, "fault_type=%u"},
188 {Opt_lazytime, "lazytime"},
189 {Opt_nolazytime, "nolazytime"},
190 {Opt_quota, "quota"},
191 {Opt_noquota, "noquota"},
192 {Opt_usrquota, "usrquota"},
193 {Opt_grpquota, "grpquota"},
194 {Opt_prjquota, "prjquota"},
195 {Opt_usrjquota, "usrjquota=%s"},
196 {Opt_grpjquota, "grpjquota=%s"},
197 {Opt_prjjquota, "prjjquota=%s"},
198 {Opt_offusrjquota, "usrjquota="},
199 {Opt_offgrpjquota, "grpjquota="},
200 {Opt_offprjjquota, "prjjquota="},
201 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
202 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
203 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
204 {Opt_whint, "whint_mode=%s"},
205 {Opt_alloc, "alloc_mode=%s"},
206 {Opt_fsync, "fsync_mode=%s"},
207 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
208 {Opt_test_dummy_encryption, "test_dummy_encryption"},
209 {Opt_inlinecrypt, "inlinecrypt"},
210 {Opt_checkpoint_disable, "checkpoint=disable"},
211 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
212 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
213 {Opt_checkpoint_enable, "checkpoint=enable"},
214 {Opt_compress_algorithm, "compress_algorithm=%s"},
215 {Opt_compress_log_size, "compress_log_size=%u"},
216 {Opt_compress_extension, "compress_extension=%s"},
221 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
223 struct va_format vaf;
229 level = printk_get_level(fmt);
230 vaf.fmt = printk_skip_level(fmt);
232 printk("%c%cF2FS-fs (%s): %pV\n",
233 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
238 #ifdef CONFIG_UNICODE
239 static const struct f2fs_sb_encodings {
243 } f2fs_sb_encoding_map[] = {
244 {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
247 static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
248 const struct f2fs_sb_encodings **encoding,
251 __u16 magic = le16_to_cpu(sb->s_encoding);
254 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
255 if (magic == f2fs_sb_encoding_map[i].magic)
258 if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
261 *encoding = &f2fs_sb_encoding_map[i];
262 *flags = le16_to_cpu(sb->s_encoding_flags);
268 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
270 block_t limit = min((sbi->user_block_count >> 3),
271 sbi->user_block_count - sbi->reserved_blocks);
274 if (test_opt(sbi, RESERVE_ROOT) &&
275 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
276 F2FS_OPTION(sbi).root_reserved_blocks = limit;
277 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
278 F2FS_OPTION(sbi).root_reserved_blocks);
280 if (!test_opt(sbi, RESERVE_ROOT) &&
281 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
282 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
283 !gid_eq(F2FS_OPTION(sbi).s_resgid,
284 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
285 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
286 from_kuid_munged(&init_user_ns,
287 F2FS_OPTION(sbi).s_resuid),
288 from_kgid_munged(&init_user_ns,
289 F2FS_OPTION(sbi).s_resgid));
292 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
294 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
295 unsigned int avg_vblocks;
296 unsigned int wanted_reserved_segments;
297 block_t avail_user_block_count;
299 if (!F2FS_IO_ALIGNED(sbi))
302 /* average valid block count in section in worst case */
303 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
306 * we need enough free space when migrating one section in worst case
308 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
309 reserved_segments(sbi);
310 wanted_reserved_segments -= reserved_segments(sbi);
312 avail_user_block_count = sbi->user_block_count -
313 sbi->current_reserved_blocks -
314 F2FS_OPTION(sbi).root_reserved_blocks;
316 if (wanted_reserved_segments * sbi->blocks_per_seg >
317 avail_user_block_count) {
318 f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
319 wanted_reserved_segments,
320 avail_user_block_count >> sbi->log_blocks_per_seg);
324 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
326 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
327 wanted_reserved_segments);
332 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
334 if (!F2FS_OPTION(sbi).unusable_cap_perc)
337 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
338 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
340 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
341 F2FS_OPTION(sbi).unusable_cap_perc;
343 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
344 F2FS_OPTION(sbi).unusable_cap,
345 F2FS_OPTION(sbi).unusable_cap_perc);
348 static void init_once(void *foo)
350 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
352 inode_init_once(&fi->vfs_inode);
356 static const char * const quotatypes[] = INITQFNAMES;
357 #define QTYPE2NAME(t) (quotatypes[t])
358 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
361 struct f2fs_sb_info *sbi = F2FS_SB(sb);
365 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
366 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
369 if (f2fs_sb_has_quota_ino(sbi)) {
370 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
374 qname = match_strdup(args);
376 f2fs_err(sbi, "Not enough memory for storing quotafile name");
379 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
380 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
383 f2fs_err(sbi, "%s quota file already specified",
387 if (strchr(qname, '/')) {
388 f2fs_err(sbi, "quotafile must be on filesystem root");
391 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
399 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
401 struct f2fs_sb_info *sbi = F2FS_SB(sb);
403 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
404 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
407 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
408 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
412 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
415 * We do the test below only for project quotas. 'usrquota' and
416 * 'grpquota' mount options are allowed even without quota feature
417 * to support legacy quotas in quota files.
419 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
420 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
423 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
424 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
425 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
426 if (test_opt(sbi, USRQUOTA) &&
427 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
428 clear_opt(sbi, USRQUOTA);
430 if (test_opt(sbi, GRPQUOTA) &&
431 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
432 clear_opt(sbi, GRPQUOTA);
434 if (test_opt(sbi, PRJQUOTA) &&
435 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
436 clear_opt(sbi, PRJQUOTA);
438 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
439 test_opt(sbi, PRJQUOTA)) {
440 f2fs_err(sbi, "old and new quota format mixing");
444 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
445 f2fs_err(sbi, "journaled quota format not specified");
450 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
451 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
452 F2FS_OPTION(sbi).s_jquota_fmt = 0;
458 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
460 const substring_t *arg,
463 struct f2fs_sb_info *sbi = F2FS_SB(sb);
464 #ifdef CONFIG_FS_ENCRYPTION
467 if (!f2fs_sb_has_encrypt(sbi)) {
468 f2fs_err(sbi, "Encrypt feature is off");
473 * This mount option is just for testing, and it's not worthwhile to
474 * implement the extra complexity (e.g. RCU protection) that would be
475 * needed to allow it to be set or changed during remount. We do allow
476 * it to be specified during remount, but only if there is no change.
478 if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
479 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
482 err = fscrypt_set_test_dummy_encryption(
483 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
487 "Can't change test_dummy_encryption on remount");
488 else if (err == -EINVAL)
489 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
492 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
496 f2fs_warn(sbi, "Test dummy encryption mode enabled");
498 f2fs_warn(sbi, "Test dummy encryption mount option ignored");
503 static int parse_options(struct super_block *sb, char *options, bool is_remount)
505 struct f2fs_sb_info *sbi = F2FS_SB(sb);
506 substring_t args[MAX_OPT_ARGS];
507 #ifdef CONFIG_F2FS_FS_COMPRESSION
508 unsigned char (*ext)[F2FS_EXTENSION_LEN];
520 while ((p = strsep(&options, ",")) != NULL) {
525 * Initialize args struct so we know whether arg was
526 * found; some options take optional arguments.
528 args[0].to = args[0].from = NULL;
529 token = match_token(p, f2fs_tokens, args);
532 case Opt_gc_background:
533 name = match_strdup(&args[0]);
537 if (!strcmp(name, "on")) {
538 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
539 } else if (!strcmp(name, "off")) {
540 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
541 } else if (!strcmp(name, "sync")) {
542 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
549 case Opt_disable_roll_forward:
550 set_opt(sbi, DISABLE_ROLL_FORWARD);
553 /* this option mounts f2fs with ro */
554 set_opt(sbi, NORECOVERY);
555 if (!f2fs_readonly(sb))
559 set_opt(sbi, DISCARD);
562 if (f2fs_sb_has_blkzoned(sbi)) {
563 f2fs_warn(sbi, "discard is required for zoned block devices");
566 clear_opt(sbi, DISCARD);
569 set_opt(sbi, NOHEAP);
572 clear_opt(sbi, NOHEAP);
574 #ifdef CONFIG_F2FS_FS_XATTR
576 set_opt(sbi, XATTR_USER);
578 case Opt_nouser_xattr:
579 clear_opt(sbi, XATTR_USER);
581 case Opt_inline_xattr:
582 set_opt(sbi, INLINE_XATTR);
584 case Opt_noinline_xattr:
585 clear_opt(sbi, INLINE_XATTR);
587 case Opt_inline_xattr_size:
588 if (args->from && match_int(args, &arg))
590 set_opt(sbi, INLINE_XATTR_SIZE);
591 F2FS_OPTION(sbi).inline_xattr_size = arg;
595 f2fs_info(sbi, "user_xattr options not supported");
597 case Opt_nouser_xattr:
598 f2fs_info(sbi, "nouser_xattr options not supported");
600 case Opt_inline_xattr:
601 f2fs_info(sbi, "inline_xattr options not supported");
603 case Opt_noinline_xattr:
604 f2fs_info(sbi, "noinline_xattr options not supported");
607 #ifdef CONFIG_F2FS_FS_POSIX_ACL
609 set_opt(sbi, POSIX_ACL);
612 clear_opt(sbi, POSIX_ACL);
616 f2fs_info(sbi, "acl options not supported");
619 f2fs_info(sbi, "noacl options not supported");
622 case Opt_active_logs:
623 if (args->from && match_int(args, &arg))
625 if (arg != 2 && arg != 4 &&
626 arg != NR_CURSEG_PERSIST_TYPE)
628 F2FS_OPTION(sbi).active_logs = arg;
630 case Opt_disable_ext_identify:
631 set_opt(sbi, DISABLE_EXT_IDENTIFY);
633 case Opt_inline_data:
634 set_opt(sbi, INLINE_DATA);
636 case Opt_inline_dentry:
637 set_opt(sbi, INLINE_DENTRY);
639 case Opt_noinline_dentry:
640 clear_opt(sbi, INLINE_DENTRY);
642 case Opt_flush_merge:
643 set_opt(sbi, FLUSH_MERGE);
645 case Opt_noflush_merge:
646 clear_opt(sbi, FLUSH_MERGE);
649 set_opt(sbi, NOBARRIER);
652 set_opt(sbi, FASTBOOT);
654 case Opt_extent_cache:
655 set_opt(sbi, EXTENT_CACHE);
657 case Opt_noextent_cache:
658 clear_opt(sbi, EXTENT_CACHE);
660 case Opt_noinline_data:
661 clear_opt(sbi, INLINE_DATA);
664 set_opt(sbi, DATA_FLUSH);
666 case Opt_reserve_root:
667 if (args->from && match_int(args, &arg))
669 if (test_opt(sbi, RESERVE_ROOT)) {
670 f2fs_info(sbi, "Preserve previous reserve_root=%u",
671 F2FS_OPTION(sbi).root_reserved_blocks);
673 F2FS_OPTION(sbi).root_reserved_blocks = arg;
674 set_opt(sbi, RESERVE_ROOT);
678 if (args->from && match_int(args, &arg))
680 uid = make_kuid(current_user_ns(), arg);
681 if (!uid_valid(uid)) {
682 f2fs_err(sbi, "Invalid uid value %d", arg);
685 F2FS_OPTION(sbi).s_resuid = uid;
688 if (args->from && match_int(args, &arg))
690 gid = make_kgid(current_user_ns(), arg);
691 if (!gid_valid(gid)) {
692 f2fs_err(sbi, "Invalid gid value %d", arg);
695 F2FS_OPTION(sbi).s_resgid = gid;
698 name = match_strdup(&args[0]);
702 if (!strcmp(name, "adaptive")) {
703 if (f2fs_sb_has_blkzoned(sbi)) {
704 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
708 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
709 } else if (!strcmp(name, "lfs")) {
710 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
717 case Opt_io_size_bits:
718 if (args->from && match_int(args, &arg))
720 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
721 f2fs_warn(sbi, "Not support %d, larger than %d",
722 1 << arg, BIO_MAX_PAGES);
725 F2FS_OPTION(sbi).write_io_size_bits = arg;
727 #ifdef CONFIG_F2FS_FAULT_INJECTION
728 case Opt_fault_injection:
729 if (args->from && match_int(args, &arg))
731 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
732 set_opt(sbi, FAULT_INJECTION);
736 if (args->from && match_int(args, &arg))
738 f2fs_build_fault_attr(sbi, 0, arg);
739 set_opt(sbi, FAULT_INJECTION);
742 case Opt_fault_injection:
743 f2fs_info(sbi, "fault_injection options not supported");
747 f2fs_info(sbi, "fault_type options not supported");
751 sb->s_flags |= SB_LAZYTIME;
754 sb->s_flags &= ~SB_LAZYTIME;
759 set_opt(sbi, USRQUOTA);
762 set_opt(sbi, GRPQUOTA);
765 set_opt(sbi, PRJQUOTA);
768 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
773 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
778 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
782 case Opt_offusrjquota:
783 ret = f2fs_clear_qf_name(sb, USRQUOTA);
787 case Opt_offgrpjquota:
788 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
792 case Opt_offprjjquota:
793 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
797 case Opt_jqfmt_vfsold:
798 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
800 case Opt_jqfmt_vfsv0:
801 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
803 case Opt_jqfmt_vfsv1:
804 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
807 clear_opt(sbi, QUOTA);
808 clear_opt(sbi, USRQUOTA);
809 clear_opt(sbi, GRPQUOTA);
810 clear_opt(sbi, PRJQUOTA);
820 case Opt_offusrjquota:
821 case Opt_offgrpjquota:
822 case Opt_offprjjquota:
823 case Opt_jqfmt_vfsold:
824 case Opt_jqfmt_vfsv0:
825 case Opt_jqfmt_vfsv1:
827 f2fs_info(sbi, "quota operations not supported");
831 name = match_strdup(&args[0]);
834 if (!strcmp(name, "user-based")) {
835 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
836 } else if (!strcmp(name, "off")) {
837 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
838 } else if (!strcmp(name, "fs-based")) {
839 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
847 name = match_strdup(&args[0]);
851 if (!strcmp(name, "default")) {
852 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
853 } else if (!strcmp(name, "reuse")) {
854 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
862 name = match_strdup(&args[0]);
865 if (!strcmp(name, "posix")) {
866 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
867 } else if (!strcmp(name, "strict")) {
868 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
869 } else if (!strcmp(name, "nobarrier")) {
870 F2FS_OPTION(sbi).fsync_mode =
871 FSYNC_MODE_NOBARRIER;
878 case Opt_test_dummy_encryption:
879 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
884 case Opt_inlinecrypt:
885 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
886 sb->s_flags |= SB_INLINECRYPT;
888 f2fs_info(sbi, "inline encryption not supported");
891 case Opt_checkpoint_disable_cap_perc:
892 if (args->from && match_int(args, &arg))
894 if (arg < 0 || arg > 100)
896 F2FS_OPTION(sbi).unusable_cap_perc = arg;
897 set_opt(sbi, DISABLE_CHECKPOINT);
899 case Opt_checkpoint_disable_cap:
900 if (args->from && match_int(args, &arg))
902 F2FS_OPTION(sbi).unusable_cap = arg;
903 set_opt(sbi, DISABLE_CHECKPOINT);
905 case Opt_checkpoint_disable:
906 set_opt(sbi, DISABLE_CHECKPOINT);
908 case Opt_checkpoint_enable:
909 clear_opt(sbi, DISABLE_CHECKPOINT);
911 #ifdef CONFIG_F2FS_FS_COMPRESSION
912 case Opt_compress_algorithm:
913 if (!f2fs_sb_has_compression(sbi)) {
914 f2fs_info(sbi, "Image doesn't support compression");
917 name = match_strdup(&args[0]);
920 if (!strcmp(name, "lzo")) {
921 F2FS_OPTION(sbi).compress_algorithm =
923 } else if (!strcmp(name, "lz4")) {
924 F2FS_OPTION(sbi).compress_algorithm =
926 } else if (!strcmp(name, "zstd")) {
927 F2FS_OPTION(sbi).compress_algorithm =
929 } else if (!strcmp(name, "lzo-rle")) {
930 F2FS_OPTION(sbi).compress_algorithm =
938 case Opt_compress_log_size:
939 if (!f2fs_sb_has_compression(sbi)) {
940 f2fs_info(sbi, "Image doesn't support compression");
943 if (args->from && match_int(args, &arg))
945 if (arg < MIN_COMPRESS_LOG_SIZE ||
946 arg > MAX_COMPRESS_LOG_SIZE) {
948 "Compress cluster log size is out of range");
951 F2FS_OPTION(sbi).compress_log_size = arg;
953 case Opt_compress_extension:
954 if (!f2fs_sb_has_compression(sbi)) {
955 f2fs_info(sbi, "Image doesn't support compression");
958 name = match_strdup(&args[0]);
962 ext = F2FS_OPTION(sbi).extensions;
963 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
965 if (strlen(name) >= F2FS_EXTENSION_LEN ||
966 ext_cnt >= COMPRESS_EXT_NUM) {
968 "invalid extension length/number");
973 strcpy(ext[ext_cnt], name);
974 F2FS_OPTION(sbi).compress_ext_cnt++;
978 case Opt_compress_algorithm:
979 case Opt_compress_log_size:
980 case Opt_compress_extension:
981 f2fs_info(sbi, "compression options not supported");
988 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
994 if (f2fs_check_quota_options(sbi))
997 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
998 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1001 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1002 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1006 #ifndef CONFIG_UNICODE
1007 if (f2fs_sb_has_casefold(sbi)) {
1009 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1014 * The BLKZONED feature indicates that the drive was formatted with
1015 * zone alignment optimization. This is optional for host-aware
1016 * devices, but mandatory for host-managed zoned block devices.
1018 #ifndef CONFIG_BLK_DEV_ZONED
1019 if (f2fs_sb_has_blkzoned(sbi)) {
1020 f2fs_err(sbi, "Zoned block device support is not enabled");
1025 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1026 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1027 F2FS_IO_SIZE_KB(sbi));
1031 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1032 int min_size, max_size;
1034 if (!f2fs_sb_has_extra_attr(sbi) ||
1035 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1036 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1039 if (!test_opt(sbi, INLINE_XATTR)) {
1040 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1044 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1045 max_size = MAX_INLINE_XATTR_SIZE;
1047 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1048 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1049 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1050 min_size, max_size);
1055 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1056 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
1060 /* Not pass down write hints if the number of active logs is lesser
1061 * than NR_CURSEG_PERSIST_TYPE.
1063 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
1064 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1068 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1070 struct f2fs_inode_info *fi;
1072 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
1076 init_once((void *) fi);
1078 /* Initialize f2fs-specific inode info */
1079 atomic_set(&fi->dirty_pages, 0);
1080 atomic_set(&fi->i_compr_blocks, 0);
1081 init_rwsem(&fi->i_sem);
1082 spin_lock_init(&fi->i_size_lock);
1083 INIT_LIST_HEAD(&fi->dirty_list);
1084 INIT_LIST_HEAD(&fi->gdirty_list);
1085 INIT_LIST_HEAD(&fi->inmem_ilist);
1086 INIT_LIST_HEAD(&fi->inmem_pages);
1087 mutex_init(&fi->inmem_lock);
1088 init_rwsem(&fi->i_gc_rwsem[READ]);
1089 init_rwsem(&fi->i_gc_rwsem[WRITE]);
1090 init_rwsem(&fi->i_mmap_sem);
1091 init_rwsem(&fi->i_xattr_sem);
1093 /* Will be used by directory only */
1094 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1098 return &fi->vfs_inode;
1101 static int f2fs_drop_inode(struct inode *inode)
1103 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1107 * during filesystem shutdown, if checkpoint is disabled,
1108 * drop useless meta/node dirty pages.
1110 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1111 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1112 inode->i_ino == F2FS_META_INO(sbi)) {
1113 trace_f2fs_drop_inode(inode, 1);
1119 * This is to avoid a deadlock condition like below.
1120 * writeback_single_inode(inode)
1121 * - f2fs_write_data_page
1122 * - f2fs_gc -> iput -> evict
1123 * - inode_wait_for_writeback(inode)
1125 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1126 if (!inode->i_nlink && !is_bad_inode(inode)) {
1127 /* to avoid evict_inode call simultaneously */
1128 atomic_inc(&inode->i_count);
1129 spin_unlock(&inode->i_lock);
1131 /* some remained atomic pages should discarded */
1132 if (f2fs_is_atomic_file(inode))
1133 f2fs_drop_inmem_pages(inode);
1135 /* should remain fi->extent_tree for writepage */
1136 f2fs_destroy_extent_node(inode);
1138 sb_start_intwrite(inode->i_sb);
1139 f2fs_i_size_write(inode, 0);
1141 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1142 inode, NULL, 0, DATA);
1143 truncate_inode_pages_final(inode->i_mapping);
1145 if (F2FS_HAS_BLOCKS(inode))
1146 f2fs_truncate(inode);
1148 sb_end_intwrite(inode->i_sb);
1150 spin_lock(&inode->i_lock);
1151 atomic_dec(&inode->i_count);
1153 trace_f2fs_drop_inode(inode, 0);
1156 ret = generic_drop_inode(inode);
1158 ret = fscrypt_drop_inode(inode);
1159 trace_f2fs_drop_inode(inode, ret);
1163 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1165 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1168 spin_lock(&sbi->inode_lock[DIRTY_META]);
1169 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1172 set_inode_flag(inode, FI_DIRTY_INODE);
1173 stat_inc_dirty_inode(sbi, DIRTY_META);
1175 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1176 list_add_tail(&F2FS_I(inode)->gdirty_list,
1177 &sbi->inode_list[DIRTY_META]);
1178 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1180 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1184 void f2fs_inode_synced(struct inode *inode)
1186 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1188 spin_lock(&sbi->inode_lock[DIRTY_META]);
1189 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1190 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1193 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1194 list_del_init(&F2FS_I(inode)->gdirty_list);
1195 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1197 clear_inode_flag(inode, FI_DIRTY_INODE);
1198 clear_inode_flag(inode, FI_AUTO_RECOVER);
1199 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1200 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1204 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1206 * We should call set_dirty_inode to write the dirty inode through write_inode.
1208 static void f2fs_dirty_inode(struct inode *inode, int flags)
1210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1212 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1213 inode->i_ino == F2FS_META_INO(sbi))
1216 if (flags == I_DIRTY_TIME)
1219 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1220 clear_inode_flag(inode, FI_AUTO_RECOVER);
1222 f2fs_inode_dirtied(inode, false);
1225 static void f2fs_free_inode(struct inode *inode)
1227 fscrypt_free_inode(inode);
1228 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1231 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1233 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1234 percpu_counter_destroy(&sbi->total_valid_inode_count);
1237 static void destroy_device_list(struct f2fs_sb_info *sbi)
1241 for (i = 0; i < sbi->s_ndevs; i++) {
1242 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1243 #ifdef CONFIG_BLK_DEV_ZONED
1244 kvfree(FDEV(i).blkz_seq);
1250 static void f2fs_put_super(struct super_block *sb)
1252 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1256 /* unregister procfs/sysfs entries in advance to avoid race case */
1257 f2fs_unregister_sysfs(sbi);
1259 f2fs_quota_off_umount(sb);
1261 /* prevent remaining shrinker jobs */
1262 mutex_lock(&sbi->umount_mutex);
1265 * We don't need to do checkpoint when superblock is clean.
1266 * But, the previous checkpoint was not done by umount, it needs to do
1267 * clean checkpoint again.
1269 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1270 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1271 struct cp_control cpc = {
1272 .reason = CP_UMOUNT,
1274 f2fs_write_checkpoint(sbi, &cpc);
1277 /* be sure to wait for any on-going discard commands */
1278 dropped = f2fs_issue_discard_timeout(sbi);
1280 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1281 !sbi->discard_blks && !dropped) {
1282 struct cp_control cpc = {
1283 .reason = CP_UMOUNT | CP_TRIMMED,
1285 f2fs_write_checkpoint(sbi, &cpc);
1289 * normally superblock is clean, so we need to release this.
1290 * In addition, EIO will skip do checkpoint, we need this as well.
1292 f2fs_release_ino_entry(sbi, true);
1294 f2fs_leave_shrinker(sbi);
1295 mutex_unlock(&sbi->umount_mutex);
1297 /* our cp_error case, we can wait for any writeback page */
1298 f2fs_flush_merged_writes(sbi);
1300 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1302 f2fs_bug_on(sbi, sbi->fsync_node_num);
1304 iput(sbi->node_inode);
1305 sbi->node_inode = NULL;
1307 iput(sbi->meta_inode);
1308 sbi->meta_inode = NULL;
1311 * iput() can update stat information, if f2fs_write_checkpoint()
1312 * above failed with error.
1314 f2fs_destroy_stats(sbi);
1316 /* destroy f2fs internal modules */
1317 f2fs_destroy_node_manager(sbi);
1318 f2fs_destroy_segment_manager(sbi);
1320 f2fs_destroy_post_read_wq(sbi);
1324 sb->s_fs_info = NULL;
1325 if (sbi->s_chksum_driver)
1326 crypto_free_shash(sbi->s_chksum_driver);
1327 kfree(sbi->raw_super);
1329 destroy_device_list(sbi);
1330 f2fs_destroy_page_array_cache(sbi);
1331 f2fs_destroy_xattr_caches(sbi);
1332 mempool_destroy(sbi->write_io_dummy);
1334 for (i = 0; i < MAXQUOTAS; i++)
1335 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1337 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1338 destroy_percpu_info(sbi);
1339 for (i = 0; i < NR_PAGE_TYPE; i++)
1340 kvfree(sbi->write_io[i]);
1341 #ifdef CONFIG_UNICODE
1342 utf8_unload(sb->s_encoding);
1347 int f2fs_sync_fs(struct super_block *sb, int sync)
1349 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1352 if (unlikely(f2fs_cp_error(sbi)))
1354 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1357 trace_f2fs_sync_fs(sb, sync);
1359 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1363 struct cp_control cpc;
1365 cpc.reason = __get_cp_reason(sbi);
1367 down_write(&sbi->gc_lock);
1368 err = f2fs_write_checkpoint(sbi, &cpc);
1369 up_write(&sbi->gc_lock);
1371 f2fs_trace_ios(NULL, 1);
1376 static int f2fs_freeze(struct super_block *sb)
1378 if (f2fs_readonly(sb))
1381 /* IO error happened before */
1382 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1385 /* must be clean, since sync_filesystem() was already called */
1386 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1391 static int f2fs_unfreeze(struct super_block *sb)
1397 static int f2fs_statfs_project(struct super_block *sb,
1398 kprojid_t projid, struct kstatfs *buf)
1401 struct dquot *dquot;
1405 qid = make_kqid_projid(projid);
1406 dquot = dqget(sb, qid);
1408 return PTR_ERR(dquot);
1409 spin_lock(&dquot->dq_dqb_lock);
1411 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1412 dquot->dq_dqb.dqb_bhardlimit);
1414 limit >>= sb->s_blocksize_bits;
1416 if (limit && buf->f_blocks > limit) {
1417 curblock = (dquot->dq_dqb.dqb_curspace +
1418 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1419 buf->f_blocks = limit;
1420 buf->f_bfree = buf->f_bavail =
1421 (buf->f_blocks > curblock) ?
1422 (buf->f_blocks - curblock) : 0;
1425 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1426 dquot->dq_dqb.dqb_ihardlimit);
1428 if (limit && buf->f_files > limit) {
1429 buf->f_files = limit;
1431 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1432 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1435 spin_unlock(&dquot->dq_dqb_lock);
1441 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1443 struct super_block *sb = dentry->d_sb;
1444 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1445 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1446 block_t total_count, user_block_count, start_count;
1447 u64 avail_node_count;
1449 total_count = le64_to_cpu(sbi->raw_super->block_count);
1450 user_block_count = sbi->user_block_count;
1451 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1452 buf->f_type = F2FS_SUPER_MAGIC;
1453 buf->f_bsize = sbi->blocksize;
1455 buf->f_blocks = total_count - start_count;
1456 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1457 sbi->current_reserved_blocks;
1459 spin_lock(&sbi->stat_lock);
1460 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1463 buf->f_bfree -= sbi->unusable_block_count;
1464 spin_unlock(&sbi->stat_lock);
1466 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1467 buf->f_bavail = buf->f_bfree -
1468 F2FS_OPTION(sbi).root_reserved_blocks;
1472 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1474 if (avail_node_count > user_block_count) {
1475 buf->f_files = user_block_count;
1476 buf->f_ffree = buf->f_bavail;
1478 buf->f_files = avail_node_count;
1479 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1483 buf->f_namelen = F2FS_NAME_LEN;
1484 buf->f_fsid = u64_to_fsid(id);
1487 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1488 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1489 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1495 static inline void f2fs_show_quota_options(struct seq_file *seq,
1496 struct super_block *sb)
1499 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1501 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1504 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1515 seq_printf(seq, ",jqfmt=%s", fmtname);
1518 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1519 seq_show_option(seq, "usrjquota",
1520 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1522 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1523 seq_show_option(seq, "grpjquota",
1524 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1526 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1527 seq_show_option(seq, "prjjquota",
1528 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1532 static inline void f2fs_show_compress_options(struct seq_file *seq,
1533 struct super_block *sb)
1535 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1539 if (!f2fs_sb_has_compression(sbi))
1542 switch (F2FS_OPTION(sbi).compress_algorithm) {
1552 case COMPRESS_LZORLE:
1553 algtype = "lzo-rle";
1556 seq_printf(seq, ",compress_algorithm=%s", algtype);
1558 seq_printf(seq, ",compress_log_size=%u",
1559 F2FS_OPTION(sbi).compress_log_size);
1561 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1562 seq_printf(seq, ",compress_extension=%s",
1563 F2FS_OPTION(sbi).extensions[i]);
1567 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1569 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1571 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1572 seq_printf(seq, ",background_gc=%s", "sync");
1573 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1574 seq_printf(seq, ",background_gc=%s", "on");
1575 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1576 seq_printf(seq, ",background_gc=%s", "off");
1578 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1579 seq_puts(seq, ",disable_roll_forward");
1580 if (test_opt(sbi, NORECOVERY))
1581 seq_puts(seq, ",norecovery");
1582 if (test_opt(sbi, DISCARD))
1583 seq_puts(seq, ",discard");
1585 seq_puts(seq, ",nodiscard");
1586 if (test_opt(sbi, NOHEAP))
1587 seq_puts(seq, ",no_heap");
1589 seq_puts(seq, ",heap");
1590 #ifdef CONFIG_F2FS_FS_XATTR
1591 if (test_opt(sbi, XATTR_USER))
1592 seq_puts(seq, ",user_xattr");
1594 seq_puts(seq, ",nouser_xattr");
1595 if (test_opt(sbi, INLINE_XATTR))
1596 seq_puts(seq, ",inline_xattr");
1598 seq_puts(seq, ",noinline_xattr");
1599 if (test_opt(sbi, INLINE_XATTR_SIZE))
1600 seq_printf(seq, ",inline_xattr_size=%u",
1601 F2FS_OPTION(sbi).inline_xattr_size);
1603 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1604 if (test_opt(sbi, POSIX_ACL))
1605 seq_puts(seq, ",acl");
1607 seq_puts(seq, ",noacl");
1609 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1610 seq_puts(seq, ",disable_ext_identify");
1611 if (test_opt(sbi, INLINE_DATA))
1612 seq_puts(seq, ",inline_data");
1614 seq_puts(seq, ",noinline_data");
1615 if (test_opt(sbi, INLINE_DENTRY))
1616 seq_puts(seq, ",inline_dentry");
1618 seq_puts(seq, ",noinline_dentry");
1619 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1620 seq_puts(seq, ",flush_merge");
1621 if (test_opt(sbi, NOBARRIER))
1622 seq_puts(seq, ",nobarrier");
1623 if (test_opt(sbi, FASTBOOT))
1624 seq_puts(seq, ",fastboot");
1625 if (test_opt(sbi, EXTENT_CACHE))
1626 seq_puts(seq, ",extent_cache");
1628 seq_puts(seq, ",noextent_cache");
1629 if (test_opt(sbi, DATA_FLUSH))
1630 seq_puts(seq, ",data_flush");
1632 seq_puts(seq, ",mode=");
1633 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1634 seq_puts(seq, "adaptive");
1635 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1636 seq_puts(seq, "lfs");
1637 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1638 if (test_opt(sbi, RESERVE_ROOT))
1639 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1640 F2FS_OPTION(sbi).root_reserved_blocks,
1641 from_kuid_munged(&init_user_ns,
1642 F2FS_OPTION(sbi).s_resuid),
1643 from_kgid_munged(&init_user_ns,
1644 F2FS_OPTION(sbi).s_resgid));
1645 if (F2FS_IO_SIZE_BITS(sbi))
1646 seq_printf(seq, ",io_bits=%u",
1647 F2FS_OPTION(sbi).write_io_size_bits);
1648 #ifdef CONFIG_F2FS_FAULT_INJECTION
1649 if (test_opt(sbi, FAULT_INJECTION)) {
1650 seq_printf(seq, ",fault_injection=%u",
1651 F2FS_OPTION(sbi).fault_info.inject_rate);
1652 seq_printf(seq, ",fault_type=%u",
1653 F2FS_OPTION(sbi).fault_info.inject_type);
1657 if (test_opt(sbi, QUOTA))
1658 seq_puts(seq, ",quota");
1659 if (test_opt(sbi, USRQUOTA))
1660 seq_puts(seq, ",usrquota");
1661 if (test_opt(sbi, GRPQUOTA))
1662 seq_puts(seq, ",grpquota");
1663 if (test_opt(sbi, PRJQUOTA))
1664 seq_puts(seq, ",prjquota");
1666 f2fs_show_quota_options(seq, sbi->sb);
1667 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1668 seq_printf(seq, ",whint_mode=%s", "user-based");
1669 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1670 seq_printf(seq, ",whint_mode=%s", "fs-based");
1672 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1674 if (sbi->sb->s_flags & SB_INLINECRYPT)
1675 seq_puts(seq, ",inlinecrypt");
1677 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1678 seq_printf(seq, ",alloc_mode=%s", "default");
1679 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1680 seq_printf(seq, ",alloc_mode=%s", "reuse");
1682 if (test_opt(sbi, DISABLE_CHECKPOINT))
1683 seq_printf(seq, ",checkpoint=disable:%u",
1684 F2FS_OPTION(sbi).unusable_cap);
1685 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1686 seq_printf(seq, ",fsync_mode=%s", "posix");
1687 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1688 seq_printf(seq, ",fsync_mode=%s", "strict");
1689 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1690 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1692 #ifdef CONFIG_F2FS_FS_COMPRESSION
1693 f2fs_show_compress_options(seq, sbi->sb);
1696 if (test_opt(sbi, ATGC))
1697 seq_puts(seq, ",atgc");
1701 static void default_options(struct f2fs_sb_info *sbi)
1703 /* init some FS parameters */
1704 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
1705 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1706 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1707 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1708 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1709 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1710 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1711 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1712 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1713 F2FS_OPTION(sbi).compress_ext_cnt = 0;
1714 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
1716 sbi->sb->s_flags &= ~SB_INLINECRYPT;
1718 set_opt(sbi, INLINE_XATTR);
1719 set_opt(sbi, INLINE_DATA);
1720 set_opt(sbi, INLINE_DENTRY);
1721 set_opt(sbi, EXTENT_CACHE);
1722 set_opt(sbi, NOHEAP);
1723 clear_opt(sbi, DISABLE_CHECKPOINT);
1724 F2FS_OPTION(sbi).unusable_cap = 0;
1725 sbi->sb->s_flags |= SB_LAZYTIME;
1726 set_opt(sbi, FLUSH_MERGE);
1727 set_opt(sbi, DISCARD);
1728 if (f2fs_sb_has_blkzoned(sbi))
1729 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
1731 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
1733 #ifdef CONFIG_F2FS_FS_XATTR
1734 set_opt(sbi, XATTR_USER);
1736 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1737 set_opt(sbi, POSIX_ACL);
1740 f2fs_build_fault_attr(sbi, 0, 0);
1744 static int f2fs_enable_quotas(struct super_block *sb);
1747 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1749 unsigned int s_flags = sbi->sb->s_flags;
1750 struct cp_control cpc;
1755 if (s_flags & SB_RDONLY) {
1756 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1759 sbi->sb->s_flags |= SB_ACTIVE;
1761 f2fs_update_time(sbi, DISABLE_TIME);
1763 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
1764 down_write(&sbi->gc_lock);
1765 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1766 if (err == -ENODATA) {
1770 if (err && err != -EAGAIN)
1774 ret = sync_filesystem(sbi->sb);
1776 err = ret ? ret: err;
1780 unusable = f2fs_get_unusable_blocks(sbi);
1781 if (f2fs_disable_cp_again(sbi, unusable)) {
1786 down_write(&sbi->gc_lock);
1787 cpc.reason = CP_PAUSE;
1788 set_sbi_flag(sbi, SBI_CP_DISABLED);
1789 err = f2fs_write_checkpoint(sbi, &cpc);
1793 spin_lock(&sbi->stat_lock);
1794 sbi->unusable_block_count = unusable;
1795 spin_unlock(&sbi->stat_lock);
1798 up_write(&sbi->gc_lock);
1800 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
1804 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
1806 int retry = DEFAULT_RETRY_IO_COUNT;
1808 /* we should flush all the data to keep data consistency */
1810 sync_inodes_sb(sbi->sb);
1812 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
1813 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
1815 if (unlikely(retry < 0))
1816 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
1818 down_write(&sbi->gc_lock);
1819 f2fs_dirty_to_prefree(sbi);
1821 clear_sbi_flag(sbi, SBI_CP_DISABLED);
1822 set_sbi_flag(sbi, SBI_IS_DIRTY);
1823 up_write(&sbi->gc_lock);
1825 f2fs_sync_fs(sbi->sb, 1);
1828 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1830 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1831 struct f2fs_mount_info org_mount_opt;
1832 unsigned long old_sb_flags;
1834 bool need_restart_gc = false;
1835 bool need_stop_gc = false;
1836 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1837 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1838 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
1839 bool no_atgc = !test_opt(sbi, ATGC);
1840 bool checkpoint_changed;
1846 * Save the old mount options in case we
1847 * need to restore them.
1849 org_mount_opt = sbi->mount_opt;
1850 old_sb_flags = sb->s_flags;
1853 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1854 for (i = 0; i < MAXQUOTAS; i++) {
1855 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1856 org_mount_opt.s_qf_names[i] =
1857 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1859 if (!org_mount_opt.s_qf_names[i]) {
1860 for (j = 0; j < i; j++)
1861 kfree(org_mount_opt.s_qf_names[j]);
1865 org_mount_opt.s_qf_names[i] = NULL;
1870 /* recover superblocks we couldn't write due to previous RO mount */
1871 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1872 err = f2fs_commit_super(sbi, false);
1873 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1876 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1879 default_options(sbi);
1881 /* parse mount options */
1882 err = parse_options(sb, data, true);
1885 checkpoint_changed =
1886 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
1889 * Previous and new state of filesystem is RO,
1890 * so skip checking GC and FLUSH_MERGE conditions.
1892 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1896 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1897 err = dquot_suspend(sb, -1);
1900 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
1901 /* dquot_resume needs RW */
1902 sb->s_flags &= ~SB_RDONLY;
1903 if (sb_any_quota_suspended(sb)) {
1904 dquot_resume(sb, -1);
1905 } else if (f2fs_sb_has_quota_ino(sbi)) {
1906 err = f2fs_enable_quotas(sb);
1912 /* disallow enable atgc dynamically */
1913 if (no_atgc == !!test_opt(sbi, ATGC)) {
1915 f2fs_warn(sbi, "switch atgc option is not allowed");
1919 /* disallow enable/disable extent_cache dynamically */
1920 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1922 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1926 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
1928 f2fs_warn(sbi, "switch io_bits option is not allowed");
1932 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1934 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
1939 * We stop the GC thread if FS is mounted as RO
1940 * or if background_gc = off is passed in mount
1941 * option. Also sync the filesystem.
1943 if ((*flags & SB_RDONLY) ||
1944 F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) {
1945 if (sbi->gc_thread) {
1946 f2fs_stop_gc_thread(sbi);
1947 need_restart_gc = true;
1949 } else if (!sbi->gc_thread) {
1950 err = f2fs_start_gc_thread(sbi);
1953 need_stop_gc = true;
1956 if (*flags & SB_RDONLY ||
1957 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1958 writeback_inodes_sb(sb, WB_REASON_SYNC);
1961 set_sbi_flag(sbi, SBI_IS_DIRTY);
1962 set_sbi_flag(sbi, SBI_IS_CLOSE);
1963 f2fs_sync_fs(sb, 1);
1964 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1967 if (checkpoint_changed) {
1968 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1969 err = f2fs_disable_checkpoint(sbi);
1973 f2fs_enable_checkpoint(sbi);
1978 * We stop issue flush thread if FS is mounted as RO
1979 * or if flush_merge is not passed in mount option.
1981 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1982 clear_opt(sbi, FLUSH_MERGE);
1983 f2fs_destroy_flush_cmd_control(sbi, false);
1985 err = f2fs_create_flush_cmd_control(sbi);
1991 /* Release old quota file names */
1992 for (i = 0; i < MAXQUOTAS; i++)
1993 kfree(org_mount_opt.s_qf_names[i]);
1995 /* Update the POSIXACL Flag */
1996 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1997 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1999 limit_reserve_root(sbi);
2000 adjust_unusable_cap_perc(sbi);
2001 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2004 if (need_restart_gc) {
2005 if (f2fs_start_gc_thread(sbi))
2006 f2fs_warn(sbi, "background gc thread has stopped");
2007 } else if (need_stop_gc) {
2008 f2fs_stop_gc_thread(sbi);
2012 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2013 for (i = 0; i < MAXQUOTAS; i++) {
2014 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2015 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2018 sbi->mount_opt = org_mount_opt;
2019 sb->s_flags = old_sb_flags;
2024 /* Read data from quotafile */
2025 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2026 size_t len, loff_t off)
2028 struct inode *inode = sb_dqopt(sb)->files[type];
2029 struct address_space *mapping = inode->i_mapping;
2030 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2031 int offset = off & (sb->s_blocksize - 1);
2034 loff_t i_size = i_size_read(inode);
2040 if (off + len > i_size)
2043 while (toread > 0) {
2044 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2046 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2048 if (PTR_ERR(page) == -ENOMEM) {
2049 congestion_wait(BLK_RW_ASYNC,
2050 DEFAULT_IO_TIMEOUT);
2053 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2054 return PTR_ERR(page);
2059 if (unlikely(page->mapping != mapping)) {
2060 f2fs_put_page(page, 1);
2063 if (unlikely(!PageUptodate(page))) {
2064 f2fs_put_page(page, 1);
2065 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2069 memcpy_from_page(data, page, offset, tocopy);
2070 f2fs_put_page(page, 1);
2080 /* Write to quotafile */
2081 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2082 const char *data, size_t len, loff_t off)
2084 struct inode *inode = sb_dqopt(sb)->files[type];
2085 struct address_space *mapping = inode->i_mapping;
2086 const struct address_space_operations *a_ops = mapping->a_ops;
2087 int offset = off & (sb->s_blocksize - 1);
2088 size_t towrite = len;
2090 void *fsdata = NULL;
2094 while (towrite > 0) {
2095 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2098 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2100 if (unlikely(err)) {
2101 if (err == -ENOMEM) {
2102 congestion_wait(BLK_RW_ASYNC,
2103 DEFAULT_IO_TIMEOUT);
2106 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2110 memcpy_to_page(page, offset, data, tocopy);
2112 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2123 inode->i_mtime = inode->i_ctime = current_time(inode);
2124 f2fs_mark_inode_dirty_sync(inode, false);
2125 return len - towrite;
2128 static struct dquot **f2fs_get_dquots(struct inode *inode)
2130 return F2FS_I(inode)->i_dquot;
2133 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2135 return &F2FS_I(inode)->i_reserved_quota;
2138 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2140 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2141 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2145 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2146 F2FS_OPTION(sbi).s_jquota_fmt, type);
2149 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2154 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2155 err = f2fs_enable_quotas(sbi->sb);
2157 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2163 for (i = 0; i < MAXQUOTAS; i++) {
2164 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2165 err = f2fs_quota_on_mount(sbi, i);
2170 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2177 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2180 struct inode *qf_inode;
2181 unsigned long qf_inum;
2184 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2186 qf_inum = f2fs_qf_ino(sb, type);
2190 qf_inode = f2fs_iget(sb, qf_inum);
2191 if (IS_ERR(qf_inode)) {
2192 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2193 return PTR_ERR(qf_inode);
2196 /* Don't account quota for quota files to avoid recursion */
2197 qf_inode->i_flags |= S_NOQUOTA;
2198 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2203 static int f2fs_enable_quotas(struct super_block *sb)
2205 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2207 unsigned long qf_inum;
2208 bool quota_mopt[MAXQUOTAS] = {
2209 test_opt(sbi, USRQUOTA),
2210 test_opt(sbi, GRPQUOTA),
2211 test_opt(sbi, PRJQUOTA),
2214 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2215 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2219 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2221 for (type = 0; type < MAXQUOTAS; type++) {
2222 qf_inum = f2fs_qf_ino(sb, type);
2224 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2225 DQUOT_USAGE_ENABLED |
2226 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2228 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2230 for (type--; type >= 0; type--)
2231 dquot_quota_off(sb, type);
2232 set_sbi_flag(F2FS_SB(sb),
2233 SBI_QUOTA_NEED_REPAIR);
2241 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2243 struct quota_info *dqopt = sb_dqopt(sbi->sb);
2244 struct address_space *mapping = dqopt->files[type]->i_mapping;
2247 ret = dquot_writeback_dquots(sbi->sb, type);
2251 ret = filemap_fdatawrite(mapping);
2255 /* if we are using journalled quota */
2256 if (is_journalled_quota(sbi))
2259 ret = filemap_fdatawait(mapping);
2261 truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2264 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2268 int f2fs_quota_sync(struct super_block *sb, int type)
2270 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2271 struct quota_info *dqopt = sb_dqopt(sb);
2276 * Now when everything is written we can discard the pagecache so
2277 * that userspace sees the changes.
2279 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2281 if (type != -1 && cnt != type)
2284 if (!sb_has_quota_active(sb, cnt))
2287 if (!f2fs_sb_has_quota_ino(sbi))
2288 inode_lock(dqopt->files[cnt]);
2293 * down_read(quota_sem)
2294 * dquot_writeback_dquots()
2297 * down_read(quota_sem)
2300 down_read(&sbi->quota_sem);
2302 ret = f2fs_quota_sync_file(sbi, cnt);
2304 up_read(&sbi->quota_sem);
2305 f2fs_unlock_op(sbi);
2307 if (!f2fs_sb_has_quota_ino(sbi))
2308 inode_unlock(dqopt->files[cnt]);
2316 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2317 const struct path *path)
2319 struct inode *inode;
2322 /* if quota sysfile exists, deny enabling quota with specific file */
2323 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2324 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2328 err = f2fs_quota_sync(sb, type);
2332 err = dquot_quota_on(sb, type, format_id, path);
2336 inode = d_inode(path->dentry);
2339 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2340 f2fs_set_inode_flags(inode);
2341 inode_unlock(inode);
2342 f2fs_mark_inode_dirty_sync(inode, false);
2347 static int __f2fs_quota_off(struct super_block *sb, int type)
2349 struct inode *inode = sb_dqopt(sb)->files[type];
2352 if (!inode || !igrab(inode))
2353 return dquot_quota_off(sb, type);
2355 err = f2fs_quota_sync(sb, type);
2359 err = dquot_quota_off(sb, type);
2360 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2364 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2365 f2fs_set_inode_flags(inode);
2366 inode_unlock(inode);
2367 f2fs_mark_inode_dirty_sync(inode, false);
2373 static int f2fs_quota_off(struct super_block *sb, int type)
2375 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2378 err = __f2fs_quota_off(sb, type);
2381 * quotactl can shutdown journalled quota, result in inconsistence
2382 * between quota record and fs data by following updates, tag the
2383 * flag to let fsck be aware of it.
2385 if (is_journalled_quota(sbi))
2386 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2390 void f2fs_quota_off_umount(struct super_block *sb)
2395 for (type = 0; type < MAXQUOTAS; type++) {
2396 err = __f2fs_quota_off(sb, type);
2398 int ret = dquot_quota_off(sb, type);
2400 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2402 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2406 * In case of checkpoint=disable, we must flush quota blocks.
2407 * This can cause NULL exception for node_inode in end_io, since
2408 * put_super already dropped it.
2410 sync_filesystem(sb);
2413 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2415 struct quota_info *dqopt = sb_dqopt(sb);
2418 for (type = 0; type < MAXQUOTAS; type++) {
2419 if (!dqopt->files[type])
2421 f2fs_inode_synced(dqopt->files[type]);
2425 static int f2fs_dquot_commit(struct dquot *dquot)
2427 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2430 down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2431 ret = dquot_commit(dquot);
2433 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2434 up_read(&sbi->quota_sem);
2438 static int f2fs_dquot_acquire(struct dquot *dquot)
2440 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2443 down_read(&sbi->quota_sem);
2444 ret = dquot_acquire(dquot);
2446 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2447 up_read(&sbi->quota_sem);
2451 static int f2fs_dquot_release(struct dquot *dquot)
2453 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2454 int ret = dquot_release(dquot);
2457 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2461 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2463 struct super_block *sb = dquot->dq_sb;
2464 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2465 int ret = dquot_mark_dquot_dirty(dquot);
2467 /* if we are using journalled quota */
2468 if (is_journalled_quota(sbi))
2469 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2474 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2476 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2477 int ret = dquot_commit_info(sb, type);
2480 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2484 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2486 *projid = F2FS_I(inode)->i_projid;
2490 static const struct dquot_operations f2fs_quota_operations = {
2491 .get_reserved_space = f2fs_get_reserved_space,
2492 .write_dquot = f2fs_dquot_commit,
2493 .acquire_dquot = f2fs_dquot_acquire,
2494 .release_dquot = f2fs_dquot_release,
2495 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2496 .write_info = f2fs_dquot_commit_info,
2497 .alloc_dquot = dquot_alloc,
2498 .destroy_dquot = dquot_destroy,
2499 .get_projid = f2fs_get_projid,
2500 .get_next_id = dquot_get_next_id,
2503 static const struct quotactl_ops f2fs_quotactl_ops = {
2504 .quota_on = f2fs_quota_on,
2505 .quota_off = f2fs_quota_off,
2506 .quota_sync = f2fs_quota_sync,
2507 .get_state = dquot_get_state,
2508 .set_info = dquot_set_dqinfo,
2509 .get_dqblk = dquot_get_dqblk,
2510 .set_dqblk = dquot_set_dqblk,
2511 .get_nextdqblk = dquot_get_next_dqblk,
2514 int f2fs_quota_sync(struct super_block *sb, int type)
2519 void f2fs_quota_off_umount(struct super_block *sb)
2524 static const struct super_operations f2fs_sops = {
2525 .alloc_inode = f2fs_alloc_inode,
2526 .free_inode = f2fs_free_inode,
2527 .drop_inode = f2fs_drop_inode,
2528 .write_inode = f2fs_write_inode,
2529 .dirty_inode = f2fs_dirty_inode,
2530 .show_options = f2fs_show_options,
2532 .quota_read = f2fs_quota_read,
2533 .quota_write = f2fs_quota_write,
2534 .get_dquots = f2fs_get_dquots,
2536 .evict_inode = f2fs_evict_inode,
2537 .put_super = f2fs_put_super,
2538 .sync_fs = f2fs_sync_fs,
2539 .freeze_fs = f2fs_freeze,
2540 .unfreeze_fs = f2fs_unfreeze,
2541 .statfs = f2fs_statfs,
2542 .remount_fs = f2fs_remount,
2545 #ifdef CONFIG_FS_ENCRYPTION
2546 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2548 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2549 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2553 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2556 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2559 * Encrypting the root directory is not allowed because fsck
2560 * expects lost+found directory to exist and remain unencrypted
2561 * if LOST_FOUND feature is enabled.
2564 if (f2fs_sb_has_lost_found(sbi) &&
2565 inode->i_ino == F2FS_ROOT_INO(sbi))
2568 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2569 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2570 ctx, len, fs_data, XATTR_CREATE);
2573 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2575 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2578 static bool f2fs_has_stable_inodes(struct super_block *sb)
2583 static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2584 int *ino_bits_ret, int *lblk_bits_ret)
2586 *ino_bits_ret = 8 * sizeof(nid_t);
2587 *lblk_bits_ret = 8 * sizeof(block_t);
2590 static int f2fs_get_num_devices(struct super_block *sb)
2592 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2594 if (f2fs_is_multi_device(sbi))
2595 return sbi->s_ndevs;
2599 static void f2fs_get_devices(struct super_block *sb,
2600 struct request_queue **devs)
2602 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2605 for (i = 0; i < sbi->s_ndevs; i++)
2606 devs[i] = bdev_get_queue(FDEV(i).bdev);
2609 static const struct fscrypt_operations f2fs_cryptops = {
2610 .key_prefix = "f2fs:",
2611 .get_context = f2fs_get_context,
2612 .set_context = f2fs_set_context,
2613 .get_dummy_policy = f2fs_get_dummy_policy,
2614 .empty_dir = f2fs_empty_dir,
2615 .max_namelen = F2FS_NAME_LEN,
2616 .has_stable_inodes = f2fs_has_stable_inodes,
2617 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
2618 .get_num_devices = f2fs_get_num_devices,
2619 .get_devices = f2fs_get_devices,
2623 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2624 u64 ino, u32 generation)
2626 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2627 struct inode *inode;
2629 if (f2fs_check_nid_range(sbi, ino))
2630 return ERR_PTR(-ESTALE);
2633 * f2fs_iget isn't quite right if the inode is currently unallocated!
2634 * However f2fs_iget currently does appropriate checks to handle stale
2635 * inodes so everything is OK.
2637 inode = f2fs_iget(sb, ino);
2639 return ERR_CAST(inode);
2640 if (unlikely(generation && inode->i_generation != generation)) {
2641 /* we didn't find the right inode.. */
2643 return ERR_PTR(-ESTALE);
2648 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2649 int fh_len, int fh_type)
2651 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2652 f2fs_nfs_get_inode);
2655 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2656 int fh_len, int fh_type)
2658 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2659 f2fs_nfs_get_inode);
2662 static const struct export_operations f2fs_export_ops = {
2663 .fh_to_dentry = f2fs_fh_to_dentry,
2664 .fh_to_parent = f2fs_fh_to_parent,
2665 .get_parent = f2fs_get_parent,
2668 static loff_t max_file_blocks(void)
2671 loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
2674 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2675 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2676 * space in inode.i_addr, it will be more safe to reassign
2680 /* two direct node blocks */
2681 result += (leaf_count * 2);
2683 /* two indirect node blocks */
2684 leaf_count *= NIDS_PER_BLOCK;
2685 result += (leaf_count * 2);
2687 /* one double indirect node block */
2688 leaf_count *= NIDS_PER_BLOCK;
2689 result += leaf_count;
2694 static int __f2fs_commit_super(struct buffer_head *bh,
2695 struct f2fs_super_block *super)
2699 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2700 set_buffer_dirty(bh);
2703 /* it's rare case, we can do fua all the time */
2704 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2707 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2708 struct buffer_head *bh)
2710 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2711 (bh->b_data + F2FS_SUPER_OFFSET);
2712 struct super_block *sb = sbi->sb;
2713 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2714 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2715 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2716 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2717 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2718 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2719 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2720 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2721 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2722 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2723 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2724 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2725 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2726 u64 main_end_blkaddr = main_blkaddr +
2727 (segment_count_main << log_blocks_per_seg);
2728 u64 seg_end_blkaddr = segment0_blkaddr +
2729 (segment_count << log_blocks_per_seg);
2731 if (segment0_blkaddr != cp_blkaddr) {
2732 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2733 segment0_blkaddr, cp_blkaddr);
2737 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2739 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2740 cp_blkaddr, sit_blkaddr,
2741 segment_count_ckpt << log_blocks_per_seg);
2745 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2747 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2748 sit_blkaddr, nat_blkaddr,
2749 segment_count_sit << log_blocks_per_seg);
2753 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2755 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2756 nat_blkaddr, ssa_blkaddr,
2757 segment_count_nat << log_blocks_per_seg);
2761 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2763 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2764 ssa_blkaddr, main_blkaddr,
2765 segment_count_ssa << log_blocks_per_seg);
2769 if (main_end_blkaddr > seg_end_blkaddr) {
2770 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
2771 main_blkaddr, seg_end_blkaddr,
2772 segment_count_main << log_blocks_per_seg);
2774 } else if (main_end_blkaddr < seg_end_blkaddr) {
2778 /* fix in-memory information all the time */
2779 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2780 segment0_blkaddr) >> log_blocks_per_seg);
2782 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2783 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2786 err = __f2fs_commit_super(bh, NULL);
2787 res = err ? "failed" : "done";
2789 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
2790 res, main_blkaddr, seg_end_blkaddr,
2791 segment_count_main << log_blocks_per_seg);
2798 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2799 struct buffer_head *bh)
2801 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
2802 block_t total_sections, blocks_per_seg;
2803 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2804 (bh->b_data + F2FS_SUPER_OFFSET);
2805 size_t crc_offset = 0;
2808 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
2809 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2810 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2814 /* Check checksum_offset and crc in superblock */
2815 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2816 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2818 offsetof(struct f2fs_super_block, crc)) {
2819 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2821 return -EFSCORRUPTED;
2823 crc = le32_to_cpu(raw_super->crc);
2824 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2825 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2826 return -EFSCORRUPTED;
2830 /* Currently, support only 4KB page cache size */
2831 if (F2FS_BLKSIZE != PAGE_SIZE) {
2832 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2834 return -EFSCORRUPTED;
2837 /* Currently, support only 4KB block size */
2838 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
2839 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
2840 le32_to_cpu(raw_super->log_blocksize),
2842 return -EFSCORRUPTED;
2845 /* check log blocks per segment */
2846 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2847 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2848 le32_to_cpu(raw_super->log_blocks_per_seg));
2849 return -EFSCORRUPTED;
2852 /* Currently, support 512/1024/2048/4096 bytes sector size */
2853 if (le32_to_cpu(raw_super->log_sectorsize) >
2854 F2FS_MAX_LOG_SECTOR_SIZE ||
2855 le32_to_cpu(raw_super->log_sectorsize) <
2856 F2FS_MIN_LOG_SECTOR_SIZE) {
2857 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2858 le32_to_cpu(raw_super->log_sectorsize));
2859 return -EFSCORRUPTED;
2861 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2862 le32_to_cpu(raw_super->log_sectorsize) !=
2863 F2FS_MAX_LOG_SECTOR_SIZE) {
2864 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2865 le32_to_cpu(raw_super->log_sectors_per_block),
2866 le32_to_cpu(raw_super->log_sectorsize));
2867 return -EFSCORRUPTED;
2870 segment_count = le32_to_cpu(raw_super->segment_count);
2871 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2872 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2873 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2874 total_sections = le32_to_cpu(raw_super->section_count);
2876 /* blocks_per_seg should be 512, given the above check */
2877 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2879 if (segment_count > F2FS_MAX_SEGMENT ||
2880 segment_count < F2FS_MIN_SEGMENTS) {
2881 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2882 return -EFSCORRUPTED;
2885 if (total_sections > segment_count_main || total_sections < 1 ||
2886 segs_per_sec > segment_count || !segs_per_sec) {
2887 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2888 segment_count, total_sections, segs_per_sec);
2889 return -EFSCORRUPTED;
2892 if (segment_count_main != total_sections * segs_per_sec) {
2893 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
2894 segment_count_main, total_sections, segs_per_sec);
2895 return -EFSCORRUPTED;
2898 if ((segment_count / segs_per_sec) < total_sections) {
2899 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2900 segment_count, segs_per_sec, total_sections);
2901 return -EFSCORRUPTED;
2904 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2905 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2906 segment_count, le64_to_cpu(raw_super->block_count));
2907 return -EFSCORRUPTED;
2910 if (RDEV(0).path[0]) {
2911 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
2914 while (i < MAX_DEVICES && RDEV(i).path[0]) {
2915 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
2918 if (segment_count != dev_seg_count) {
2919 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
2920 segment_count, dev_seg_count);
2921 return -EFSCORRUPTED;
2924 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
2925 !bdev_is_zoned(sbi->sb->s_bdev)) {
2926 f2fs_info(sbi, "Zoned block device path is missing");
2927 return -EFSCORRUPTED;
2931 if (secs_per_zone > total_sections || !secs_per_zone) {
2932 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2933 secs_per_zone, total_sections);
2934 return -EFSCORRUPTED;
2936 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2937 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2938 (le32_to_cpu(raw_super->extension_count) +
2939 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2940 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2941 le32_to_cpu(raw_super->extension_count),
2942 raw_super->hot_ext_count,
2943 F2FS_MAX_EXTENSION);
2944 return -EFSCORRUPTED;
2947 if (le32_to_cpu(raw_super->cp_payload) >=
2948 (blocks_per_seg - F2FS_CP_PACKS -
2949 NR_CURSEG_PERSIST_TYPE)) {
2950 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
2951 le32_to_cpu(raw_super->cp_payload),
2952 blocks_per_seg - F2FS_CP_PACKS -
2953 NR_CURSEG_PERSIST_TYPE);
2954 return -EFSCORRUPTED;
2957 /* check reserved ino info */
2958 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2959 le32_to_cpu(raw_super->meta_ino) != 2 ||
2960 le32_to_cpu(raw_super->root_ino) != 3) {
2961 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2962 le32_to_cpu(raw_super->node_ino),
2963 le32_to_cpu(raw_super->meta_ino),
2964 le32_to_cpu(raw_super->root_ino));
2965 return -EFSCORRUPTED;
2968 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2969 if (sanity_check_area_boundary(sbi, bh))
2970 return -EFSCORRUPTED;
2975 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2977 unsigned int total, fsmeta;
2978 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2979 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2980 unsigned int ovp_segments, reserved_segments;
2981 unsigned int main_segs, blocks_per_seg;
2982 unsigned int sit_segs, nat_segs;
2983 unsigned int sit_bitmap_size, nat_bitmap_size;
2984 unsigned int log_blocks_per_seg;
2985 unsigned int segment_count_main;
2986 unsigned int cp_pack_start_sum, cp_payload;
2987 block_t user_block_count, valid_user_blocks;
2988 block_t avail_node_count, valid_node_count;
2989 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
2992 total = le32_to_cpu(raw_super->segment_count);
2993 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2994 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2996 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2998 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2999 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3001 if (unlikely(fsmeta >= total))
3004 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3005 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3007 if (unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3008 ovp_segments == 0 || reserved_segments == 0)) {
3009 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3013 user_block_count = le64_to_cpu(ckpt->user_block_count);
3014 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3015 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3016 if (!user_block_count || user_block_count >=
3017 segment_count_main << log_blocks_per_seg) {
3018 f2fs_err(sbi, "Wrong user_block_count: %u",
3023 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3024 if (valid_user_blocks > user_block_count) {
3025 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3026 valid_user_blocks, user_block_count);
3030 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3031 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3032 if (valid_node_count > avail_node_count) {
3033 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3034 valid_node_count, avail_node_count);
3038 main_segs = le32_to_cpu(raw_super->segment_count_main);
3039 blocks_per_seg = sbi->blocks_per_seg;
3041 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3042 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3043 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3045 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3046 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3047 le32_to_cpu(ckpt->cur_node_segno[j])) {
3048 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3050 le32_to_cpu(ckpt->cur_node_segno[i]));
3055 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3056 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3057 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3059 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3060 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3061 le32_to_cpu(ckpt->cur_data_segno[j])) {
3062 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3064 le32_to_cpu(ckpt->cur_data_segno[i]));
3069 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3070 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3071 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3072 le32_to_cpu(ckpt->cur_data_segno[j])) {
3073 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3075 le32_to_cpu(ckpt->cur_node_segno[i]));
3081 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3082 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3084 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3085 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3086 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3087 sit_bitmap_size, nat_bitmap_size);
3091 cp_pack_start_sum = __start_sum_addr(sbi);
3092 cp_payload = __cp_payload(sbi);
3093 if (cp_pack_start_sum < cp_payload + 1 ||
3094 cp_pack_start_sum > blocks_per_seg - 1 -
3095 NR_CURSEG_PERSIST_TYPE) {
3096 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3101 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3102 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3103 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3104 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3105 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3106 le32_to_cpu(ckpt->checksum_offset));
3110 nat_blocks = nat_segs << log_blocks_per_seg;
3111 nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3112 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3113 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3114 (cp_payload + F2FS_CP_PACKS +
3115 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3116 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3117 cp_payload, nat_bits_blocks);
3121 if (unlikely(f2fs_cp_error(sbi))) {
3122 f2fs_err(sbi, "A bug case: need to run fsck");
3128 static void init_sb_info(struct f2fs_sb_info *sbi)
3130 struct f2fs_super_block *raw_super = sbi->raw_super;
3133 sbi->log_sectors_per_block =
3134 le32_to_cpu(raw_super->log_sectors_per_block);
3135 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3136 sbi->blocksize = 1 << sbi->log_blocksize;
3137 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3138 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3139 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3140 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3141 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3142 sbi->total_node_count =
3143 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3144 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3145 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
3146 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
3147 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
3148 sbi->cur_victim_sec = NULL_SECNO;
3149 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3150 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3151 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3152 sbi->migration_granularity = sbi->segs_per_sec;
3154 sbi->dir_level = DEF_DIR_LEVEL;
3155 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3156 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3157 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3158 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3159 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3160 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3161 DEF_UMOUNT_DISCARD_TIMEOUT;
3162 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3164 for (i = 0; i < NR_COUNT_TYPE; i++)
3165 atomic_set(&sbi->nr_pages[i], 0);
3167 for (i = 0; i < META; i++)
3168 atomic_set(&sbi->wb_sync_req[i], 0);
3170 INIT_LIST_HEAD(&sbi->s_list);
3171 mutex_init(&sbi->umount_mutex);
3172 init_rwsem(&sbi->io_order_lock);
3173 spin_lock_init(&sbi->cp_lock);
3175 sbi->dirty_device = 0;
3176 spin_lock_init(&sbi->dev_lock);
3178 init_rwsem(&sbi->sb_lock);
3179 init_rwsem(&sbi->pin_sem);
3182 static int init_percpu_info(struct f2fs_sb_info *sbi)
3186 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3190 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3193 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3198 #ifdef CONFIG_BLK_DEV_ZONED
3200 struct f2fs_report_zones_args {
3201 struct f2fs_sb_info *sbi;
3202 struct f2fs_dev_info *dev;
3205 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3208 struct f2fs_report_zones_args *rz_args = data;
3209 block_t unusable_blocks = (zone->len - zone->capacity) >>
3210 F2FS_LOG_SECTORS_PER_BLOCK;
3212 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3215 set_bit(idx, rz_args->dev->blkz_seq);
3216 if (!rz_args->sbi->unusable_blocks_per_sec) {
3217 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
3220 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
3221 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
3227 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3229 struct block_device *bdev = FDEV(devi).bdev;
3230 sector_t nr_sectors = bdev->bd_part->nr_sects;
3231 struct f2fs_report_zones_args rep_zone_arg;
3234 if (!f2fs_sb_has_blkzoned(sbi))
3237 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3238 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3240 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3241 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3242 __ilog2_u32(sbi->blocks_per_blkz))
3244 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3245 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3246 sbi->log_blocks_per_blkz;
3247 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3248 FDEV(devi).nr_blkz++;
3250 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3251 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3252 * sizeof(unsigned long),
3254 if (!FDEV(devi).blkz_seq)
3257 rep_zone_arg.sbi = sbi;
3258 rep_zone_arg.dev = &FDEV(devi);
3260 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3269 * Read f2fs raw super block.
3270 * Because we have two copies of super block, so read both of them
3271 * to get the first valid one. If any one of them is broken, we pass
3272 * them recovery flag back to the caller.
3274 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3275 struct f2fs_super_block **raw_super,
3276 int *valid_super_block, int *recovery)
3278 struct super_block *sb = sbi->sb;
3280 struct buffer_head *bh;
3281 struct f2fs_super_block *super;
3284 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3288 for (block = 0; block < 2; block++) {
3289 bh = sb_bread(sb, block);
3291 f2fs_err(sbi, "Unable to read %dth superblock",
3298 /* sanity checking of raw super */
3299 err = sanity_check_raw_super(sbi, bh);
3301 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3309 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3311 *valid_super_block = block;
3317 /* No valid superblock */
3326 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3328 struct buffer_head *bh;
3332 if ((recover && f2fs_readonly(sbi->sb)) ||
3333 bdev_read_only(sbi->sb->s_bdev)) {
3334 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3338 /* we should update superblock crc here */
3339 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3340 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3341 offsetof(struct f2fs_super_block, crc));
3342 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3345 /* write back-up superblock first */
3346 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3349 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3352 /* if we are in recovery path, skip writing valid superblock */
3356 /* write current valid superblock */
3357 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3360 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3365 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3367 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3368 unsigned int max_devices = MAX_DEVICES;
3371 /* Initialize single device information */
3372 if (!RDEV(0).path[0]) {
3373 if (!bdev_is_zoned(sbi->sb->s_bdev))
3379 * Initialize multiple devices information, or single
3380 * zoned block device information.
3382 sbi->devs = f2fs_kzalloc(sbi,
3383 array_size(max_devices,
3384 sizeof(struct f2fs_dev_info)),
3389 for (i = 0; i < max_devices; i++) {
3391 if (i > 0 && !RDEV(i).path[0])
3394 if (max_devices == 1) {
3395 /* Single zoned block device mount */
3397 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3398 sbi->sb->s_mode, sbi->sb->s_type);
3400 /* Multi-device mount */
3401 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3402 FDEV(i).total_segments =
3403 le32_to_cpu(RDEV(i).total_segments);
3405 FDEV(i).start_blk = 0;
3406 FDEV(i).end_blk = FDEV(i).start_blk +
3407 (FDEV(i).total_segments <<
3408 sbi->log_blocks_per_seg) - 1 +
3409 le32_to_cpu(raw_super->segment0_blkaddr);
3411 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3412 FDEV(i).end_blk = FDEV(i).start_blk +
3413 (FDEV(i).total_segments <<
3414 sbi->log_blocks_per_seg) - 1;
3416 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3417 sbi->sb->s_mode, sbi->sb->s_type);
3419 if (IS_ERR(FDEV(i).bdev))
3420 return PTR_ERR(FDEV(i).bdev);
3422 /* to release errored devices */
3423 sbi->s_ndevs = i + 1;
3425 #ifdef CONFIG_BLK_DEV_ZONED
3426 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3427 !f2fs_sb_has_blkzoned(sbi)) {
3428 f2fs_err(sbi, "Zoned block device feature not enabled\n");
3431 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3432 if (init_blkz_info(sbi, i)) {
3433 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3436 if (max_devices == 1)
3438 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3440 FDEV(i).total_segments,
3441 FDEV(i).start_blk, FDEV(i).end_blk,
3442 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3443 "Host-aware" : "Host-managed");
3447 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3449 FDEV(i).total_segments,
3450 FDEV(i).start_blk, FDEV(i).end_blk);
3453 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3457 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3459 #ifdef CONFIG_UNICODE
3460 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3461 const struct f2fs_sb_encodings *encoding_info;
3462 struct unicode_map *encoding;
3463 __u16 encoding_flags;
3465 if (f2fs_sb_has_encrypt(sbi)) {
3467 "Can't mount with encoding and encryption");
3471 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3474 "Encoding requested by superblock is unknown");
3478 encoding = utf8_load(encoding_info->version);
3479 if (IS_ERR(encoding)) {
3481 "can't mount with superblock charset: %s-%s "
3482 "not supported by the kernel. flags: 0x%x.",
3483 encoding_info->name, encoding_info->version,
3485 return PTR_ERR(encoding);
3487 f2fs_info(sbi, "Using encoding defined by superblock: "
3488 "%s-%s with flags 0x%hx", encoding_info->name,
3489 encoding_info->version?:"\b", encoding_flags);
3491 sbi->sb->s_encoding = encoding;
3492 sbi->sb->s_encoding_flags = encoding_flags;
3493 sbi->sb->s_d_op = &f2fs_dentry_ops;
3496 if (f2fs_sb_has_casefold(sbi)) {
3497 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3504 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3506 struct f2fs_sm_info *sm_i = SM_I(sbi);
3508 /* adjust parameters according to the volume size */
3509 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3510 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3511 sm_i->dcc_info->discard_granularity = 1;
3512 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3515 sbi->readdir_ra = 1;
3518 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3520 struct f2fs_sb_info *sbi;
3521 struct f2fs_super_block *raw_super;
3524 bool skip_recovery = false, need_fsck = false;
3525 char *options = NULL;
3526 int recovery, i, valid_super_block;
3527 struct curseg_info *seg_i;
3533 valid_super_block = -1;
3536 /* allocate memory for f2fs-specific super block info */
3537 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3543 /* Load the checksum driver */
3544 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3545 if (IS_ERR(sbi->s_chksum_driver)) {
3546 f2fs_err(sbi, "Cannot load crc32 driver.");
3547 err = PTR_ERR(sbi->s_chksum_driver);
3548 sbi->s_chksum_driver = NULL;
3552 /* set a block size */
3553 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3554 f2fs_err(sbi, "unable to set blocksize");
3558 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3563 sb->s_fs_info = sbi;
3564 sbi->raw_super = raw_super;
3566 /* precompute checksum seed for metadata */
3567 if (f2fs_sb_has_inode_chksum(sbi))
3568 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3569 sizeof(raw_super->uuid));
3571 default_options(sbi);
3572 /* parse mount options */
3573 options = kstrdup((const char *)data, GFP_KERNEL);
3574 if (data && !options) {
3579 err = parse_options(sb, options, false);
3583 sbi->max_file_blocks = max_file_blocks();
3584 sb->s_maxbytes = sbi->max_file_blocks <<
3585 le32_to_cpu(raw_super->log_blocksize);
3586 sb->s_max_links = F2FS_LINK_MAX;
3588 err = f2fs_setup_casefold(sbi);
3593 sb->dq_op = &f2fs_quota_operations;
3594 sb->s_qcop = &f2fs_quotactl_ops;
3595 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3597 if (f2fs_sb_has_quota_ino(sbi)) {
3598 for (i = 0; i < MAXQUOTAS; i++) {
3599 if (f2fs_qf_ino(sbi->sb, i))
3600 sbi->nquota_files++;
3605 sb->s_op = &f2fs_sops;
3606 #ifdef CONFIG_FS_ENCRYPTION
3607 sb->s_cop = &f2fs_cryptops;
3609 #ifdef CONFIG_FS_VERITY
3610 sb->s_vop = &f2fs_verityops;
3612 sb->s_xattr = f2fs_xattr_handlers;
3613 sb->s_export_op = &f2fs_export_ops;
3614 sb->s_magic = F2FS_SUPER_MAGIC;
3615 sb->s_time_gran = 1;
3616 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3617 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3618 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3619 sb->s_iflags |= SB_I_CGROUPWB;
3621 /* init f2fs-specific super block info */
3622 sbi->valid_super_block = valid_super_block;
3623 init_rwsem(&sbi->gc_lock);
3624 mutex_init(&sbi->writepages);
3625 mutex_init(&sbi->cp_mutex);
3626 init_rwsem(&sbi->node_write);
3627 init_rwsem(&sbi->node_change);
3629 /* disallow all the data/node/meta page writes */
3630 set_sbi_flag(sbi, SBI_POR_DOING);
3631 spin_lock_init(&sbi->stat_lock);
3633 /* init iostat info */
3634 spin_lock_init(&sbi->iostat_lock);
3635 sbi->iostat_enable = false;
3636 sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
3638 for (i = 0; i < NR_PAGE_TYPE; i++) {
3639 int n = (i == META) ? 1: NR_TEMP_TYPE;
3645 sizeof(struct f2fs_bio_info)),
3647 if (!sbi->write_io[i]) {
3652 for (j = HOT; j < n; j++) {
3653 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3654 sbi->write_io[i][j].sbi = sbi;
3655 sbi->write_io[i][j].bio = NULL;
3656 spin_lock_init(&sbi->write_io[i][j].io_lock);
3657 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3658 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
3659 init_rwsem(&sbi->write_io[i][j].bio_list_lock);
3663 init_rwsem(&sbi->cp_rwsem);
3664 init_rwsem(&sbi->quota_sem);
3665 init_waitqueue_head(&sbi->cp_wait);
3668 err = init_percpu_info(sbi);
3672 if (F2FS_IO_ALIGNED(sbi)) {
3673 sbi->write_io_dummy =
3674 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3675 if (!sbi->write_io_dummy) {
3681 /* init per sbi slab cache */
3682 err = f2fs_init_xattr_caches(sbi);
3685 err = f2fs_init_page_array_cache(sbi);
3687 goto free_xattr_cache;
3689 /* get an inode for meta space */
3690 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3691 if (IS_ERR(sbi->meta_inode)) {
3692 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3693 err = PTR_ERR(sbi->meta_inode);
3694 goto free_page_array_cache;
3697 err = f2fs_get_valid_checkpoint(sbi);
3699 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3700 goto free_meta_inode;
3703 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3704 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3705 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3706 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3707 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3710 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3711 set_sbi_flag(sbi, SBI_NEED_FSCK);
3713 /* Initialize device list */
3714 err = f2fs_scan_devices(sbi);
3716 f2fs_err(sbi, "Failed to find devices");
3720 err = f2fs_init_post_read_wq(sbi);
3722 f2fs_err(sbi, "Failed to initialize post read workqueue");
3726 sbi->total_valid_node_count =
3727 le32_to_cpu(sbi->ckpt->valid_node_count);
3728 percpu_counter_set(&sbi->total_valid_inode_count,
3729 le32_to_cpu(sbi->ckpt->valid_inode_count));
3730 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3731 sbi->total_valid_block_count =
3732 le64_to_cpu(sbi->ckpt->valid_block_count);
3733 sbi->last_valid_block_count = sbi->total_valid_block_count;
3734 sbi->reserved_blocks = 0;
3735 sbi->current_reserved_blocks = 0;
3736 limit_reserve_root(sbi);
3737 adjust_unusable_cap_perc(sbi);
3739 for (i = 0; i < NR_INODE_TYPE; i++) {
3740 INIT_LIST_HEAD(&sbi->inode_list[i]);
3741 spin_lock_init(&sbi->inode_lock[i]);
3743 mutex_init(&sbi->flush_lock);
3745 f2fs_init_extent_cache_info(sbi);
3747 f2fs_init_ino_entry_info(sbi);
3749 f2fs_init_fsync_node_info(sbi);
3751 /* setup f2fs internal modules */
3752 err = f2fs_build_segment_manager(sbi);
3754 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3758 err = f2fs_build_node_manager(sbi);
3760 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3765 err = adjust_reserved_segment(sbi);
3769 /* For write statistics */
3770 if (sb->s_bdev->bd_part)
3771 sbi->sectors_written_start =
3772 (u64)part_stat_read(sb->s_bdev->bd_part,
3773 sectors[STAT_WRITE]);
3775 /* Read accumulated write IO statistics if exists */
3776 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3777 if (__exist_node_summaries(sbi))
3778 sbi->kbytes_written =
3779 le64_to_cpu(seg_i->journal->info.kbytes_written);
3781 f2fs_build_gc_manager(sbi);
3783 err = f2fs_build_stats(sbi);
3787 /* get an inode for node space */
3788 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3789 if (IS_ERR(sbi->node_inode)) {
3790 f2fs_err(sbi, "Failed to read node inode");
3791 err = PTR_ERR(sbi->node_inode);
3795 /* read root inode and dentry */
3796 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3798 f2fs_err(sbi, "Failed to read root inode");
3799 err = PTR_ERR(root);
3800 goto free_node_inode;
3802 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3803 !root->i_size || !root->i_nlink) {
3806 goto free_node_inode;
3809 sb->s_root = d_make_root(root); /* allocate root dentry */
3812 goto free_node_inode;
3815 err = f2fs_register_sysfs(sbi);
3817 goto free_root_inode;
3820 /* Enable quota usage during mount */
3821 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3822 err = f2fs_enable_quotas(sb);
3824 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
3827 /* if there are any orphan inodes, free them */
3828 err = f2fs_recover_orphan_inodes(sbi);
3832 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3833 goto reset_checkpoint;
3835 /* recover fsynced data */
3836 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
3837 !test_opt(sbi, NORECOVERY)) {
3839 * mount should be failed, when device has readonly mode, and
3840 * previous checkpoint was not done by clean system shutdown.
3842 if (f2fs_hw_is_readonly(sbi)) {
3843 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3845 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3848 f2fs_info(sbi, "write access unavailable, skipping recovery");
3849 goto reset_checkpoint;
3853 set_sbi_flag(sbi, SBI_NEED_FSCK);
3856 goto reset_checkpoint;
3858 err = f2fs_recover_fsync_data(sbi, false);
3861 skip_recovery = true;
3863 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3868 err = f2fs_recover_fsync_data(sbi, true);
3870 if (!f2fs_readonly(sb) && err > 0) {
3872 f2fs_err(sbi, "Need to recover fsync data");
3878 * If the f2fs is not readonly and fsync data recovery succeeds,
3879 * check zoned block devices' write pointer consistency.
3881 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
3882 err = f2fs_check_write_pointer(sbi);
3888 f2fs_init_inmem_curseg(sbi);
3890 /* f2fs_recover_fsync_data() cleared this already */
3891 clear_sbi_flag(sbi, SBI_POR_DOING);
3893 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
3894 err = f2fs_disable_checkpoint(sbi);
3896 goto sync_free_meta;
3897 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
3898 f2fs_enable_checkpoint(sbi);
3902 * If filesystem is not mounted as read-only then
3903 * do start the gc_thread.
3905 if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) {
3906 /* After POR, we can run background GC thread.*/
3907 err = f2fs_start_gc_thread(sbi);
3909 goto sync_free_meta;
3913 /* recover broken superblock */
3915 err = f2fs_commit_super(sbi, true);
3916 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3917 sbi->valid_super_block ? 1 : 2, err);
3920 f2fs_join_shrinker(sbi);
3922 f2fs_tuning_parameters(sbi);
3924 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3925 cur_cp_version(F2FS_CKPT(sbi)));
3926 f2fs_update_time(sbi, CP_TIME);
3927 f2fs_update_time(sbi, REQ_TIME);
3928 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3932 /* safe to flush all the data */
3933 sync_filesystem(sbi->sb);
3938 f2fs_truncate_quota_inode_pages(sb);
3939 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
3940 f2fs_quota_off_umount(sbi->sb);
3943 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3944 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3945 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3946 * falls into an infinite loop in f2fs_sync_meta_pages().
3948 truncate_inode_pages_final(META_MAPPING(sbi));
3949 /* evict some inodes being cached by GC */
3951 f2fs_unregister_sysfs(sbi);
3956 f2fs_release_ino_entry(sbi, true);
3957 truncate_inode_pages_final(NODE_MAPPING(sbi));
3958 iput(sbi->node_inode);
3959 sbi->node_inode = NULL;
3961 f2fs_destroy_stats(sbi);
3963 f2fs_destroy_node_manager(sbi);
3965 f2fs_destroy_segment_manager(sbi);
3966 f2fs_destroy_post_read_wq(sbi);
3968 destroy_device_list(sbi);
3971 make_bad_inode(sbi->meta_inode);
3972 iput(sbi->meta_inode);
3973 sbi->meta_inode = NULL;
3974 free_page_array_cache:
3975 f2fs_destroy_page_array_cache(sbi);
3977 f2fs_destroy_xattr_caches(sbi);
3979 mempool_destroy(sbi->write_io_dummy);
3981 destroy_percpu_info(sbi);
3983 for (i = 0; i < NR_PAGE_TYPE; i++)
3984 kvfree(sbi->write_io[i]);
3986 #ifdef CONFIG_UNICODE
3987 utf8_unload(sb->s_encoding);
3988 sb->s_encoding = NULL;
3992 for (i = 0; i < MAXQUOTAS; i++)
3993 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
3995 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4000 if (sbi->s_chksum_driver)
4001 crypto_free_shash(sbi->s_chksum_driver);
4004 /* give only one another chance */
4005 if (retry_cnt > 0 && skip_recovery) {
4007 shrink_dcache_sb(sb);
4013 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4014 const char *dev_name, void *data)
4016 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4019 static void kill_f2fs_super(struct super_block *sb)
4022 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4024 set_sbi_flag(sbi, SBI_IS_CLOSE);
4025 f2fs_stop_gc_thread(sbi);
4026 f2fs_stop_discard_thread(sbi);
4028 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4029 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4030 struct cp_control cpc = {
4031 .reason = CP_UMOUNT,
4033 f2fs_write_checkpoint(sbi, &cpc);
4036 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4037 sb->s_flags &= ~SB_RDONLY;
4039 kill_block_super(sb);
4042 static struct file_system_type f2fs_fs_type = {
4043 .owner = THIS_MODULE,
4045 .mount = f2fs_mount,
4046 .kill_sb = kill_f2fs_super,
4047 .fs_flags = FS_REQUIRES_DEV,
4049 MODULE_ALIAS_FS("f2fs");
4051 static int __init init_inodecache(void)
4053 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4054 sizeof(struct f2fs_inode_info), 0,
4055 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4056 if (!f2fs_inode_cachep)
4061 static void destroy_inodecache(void)
4064 * Make sure all delayed rcu free inodes are flushed before we
4068 kmem_cache_destroy(f2fs_inode_cachep);
4071 static int __init init_f2fs_fs(void)
4075 if (PAGE_SIZE != F2FS_BLKSIZE) {
4076 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4077 PAGE_SIZE, F2FS_BLKSIZE);
4081 f2fs_build_trace_ios();
4083 err = init_inodecache();
4086 err = f2fs_create_node_manager_caches();
4088 goto free_inodecache;
4089 err = f2fs_create_segment_manager_caches();
4091 goto free_node_manager_caches;
4092 err = f2fs_create_checkpoint_caches();
4094 goto free_segment_manager_caches;
4095 err = f2fs_create_recovery_cache();
4097 goto free_checkpoint_caches;
4098 err = f2fs_create_extent_cache();
4100 goto free_recovery_cache;
4101 err = f2fs_create_garbage_collection_cache();
4103 goto free_extent_cache;
4104 err = f2fs_init_sysfs();
4106 goto free_garbage_collection_cache;
4107 err = register_shrinker(&f2fs_shrinker_info);
4110 err = register_filesystem(&f2fs_fs_type);
4113 f2fs_create_root_stats();
4114 err = f2fs_init_post_read_processing();
4116 goto free_root_stats;
4117 err = f2fs_init_bio_entry_cache();
4119 goto free_post_read;
4120 err = f2fs_init_bioset();
4122 goto free_bio_enrty_cache;
4123 err = f2fs_init_compress_mempool();
4126 err = f2fs_init_compress_cache();
4128 goto free_compress_mempool;
4130 free_compress_mempool:
4131 f2fs_destroy_compress_mempool();
4133 f2fs_destroy_bioset();
4134 free_bio_enrty_cache:
4135 f2fs_destroy_bio_entry_cache();
4137 f2fs_destroy_post_read_processing();
4139 f2fs_destroy_root_stats();
4140 unregister_filesystem(&f2fs_fs_type);
4142 unregister_shrinker(&f2fs_shrinker_info);
4145 free_garbage_collection_cache:
4146 f2fs_destroy_garbage_collection_cache();
4148 f2fs_destroy_extent_cache();
4149 free_recovery_cache:
4150 f2fs_destroy_recovery_cache();
4151 free_checkpoint_caches:
4152 f2fs_destroy_checkpoint_caches();
4153 free_segment_manager_caches:
4154 f2fs_destroy_segment_manager_caches();
4155 free_node_manager_caches:
4156 f2fs_destroy_node_manager_caches();
4158 destroy_inodecache();
4163 static void __exit exit_f2fs_fs(void)
4165 f2fs_destroy_compress_cache();
4166 f2fs_destroy_compress_mempool();
4167 f2fs_destroy_bioset();
4168 f2fs_destroy_bio_entry_cache();
4169 f2fs_destroy_post_read_processing();
4170 f2fs_destroy_root_stats();
4171 unregister_filesystem(&f2fs_fs_type);
4172 unregister_shrinker(&f2fs_shrinker_info);
4174 f2fs_destroy_garbage_collection_cache();
4175 f2fs_destroy_extent_cache();
4176 f2fs_destroy_recovery_cache();
4177 f2fs_destroy_checkpoint_caches();
4178 f2fs_destroy_segment_manager_caches();
4179 f2fs_destroy_node_manager_caches();
4180 destroy_inodecache();
4181 f2fs_destroy_trace_ios();
4184 module_init(init_f2fs_fs)
4185 module_exit(exit_f2fs_fs)
4187 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4188 MODULE_DESCRIPTION("Flash Friendly File System");
4189 MODULE_LICENSE("GPL");
4190 MODULE_SOFTDEP("pre: crc32");