1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/fs_context.h>
12 #include <linux/sched/mm.h>
13 #include <linux/statfs.h>
14 #include <linux/buffer_head.h>
15 #include <linux/kthread.h>
16 #include <linux/parser.h>
17 #include <linux/mount.h>
18 #include <linux/seq_file.h>
19 #include <linux/proc_fs.h>
20 #include <linux/random.h>
21 #include <linux/exportfs.h>
22 #include <linux/blkdev.h>
23 #include <linux/quotaops.h>
24 #include <linux/f2fs_fs.h>
25 #include <linux/sysfs.h>
26 #include <linux/quota.h>
27 #include <linux/unicode.h>
28 #include <linux/part_stat.h>
29 #include <linux/zstd.h>
30 #include <linux/lz4.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/f2fs.h>
42 static struct kmem_cache *f2fs_inode_cachep;
44 #ifdef CONFIG_F2FS_FAULT_INJECTION
46 const char *f2fs_fault_name[FAULT_MAX] = {
47 [FAULT_KMALLOC] = "kmalloc",
48 [FAULT_KVMALLOC] = "kvmalloc",
49 [FAULT_PAGE_ALLOC] = "page alloc",
50 [FAULT_PAGE_GET] = "page get",
51 [FAULT_ALLOC_NID] = "alloc nid",
52 [FAULT_ORPHAN] = "orphan",
53 [FAULT_BLOCK] = "no more block",
54 [FAULT_DIR_DEPTH] = "too big dir depth",
55 [FAULT_EVICT_INODE] = "evict_inode fail",
56 [FAULT_TRUNCATE] = "truncate fail",
57 [FAULT_READ_IO] = "read IO error",
58 [FAULT_CHECKPOINT] = "checkpoint error",
59 [FAULT_DISCARD] = "discard error",
60 [FAULT_WRITE_IO] = "write IO error",
61 [FAULT_SLAB_ALLOC] = "slab alloc",
62 [FAULT_DQUOT_INIT] = "dquot initialize",
63 [FAULT_LOCK_OP] = "lock_op",
64 [FAULT_BLKADDR] = "invalid blkaddr",
67 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
70 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
73 atomic_set(&ffi->inject_ops, 0);
74 ffi->inject_rate = rate;
78 ffi->inject_type = type;
81 memset(ffi, 0, sizeof(struct f2fs_fault_info));
85 /* f2fs-wide shrinker description */
86 static struct shrinker *f2fs_shrinker_info;
88 static int __init f2fs_init_shrinker(void)
90 f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
91 if (!f2fs_shrinker_info)
94 f2fs_shrinker_info->count_objects = f2fs_shrink_count;
95 f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
97 shrinker_register(f2fs_shrinker_info);
102 static void f2fs_exit_shrinker(void)
104 shrinker_free(f2fs_shrinker_info);
109 Opt_disable_roll_forward,
120 Opt_disable_ext_identify,
123 Opt_inline_xattr_size,
161 Opt_test_dummy_encryption,
163 Opt_checkpoint_disable,
164 Opt_checkpoint_disable_cap,
165 Opt_checkpoint_disable_cap_perc,
166 Opt_checkpoint_enable,
167 Opt_checkpoint_merge,
168 Opt_nocheckpoint_merge,
169 Opt_compress_algorithm,
170 Opt_compress_log_size,
171 Opt_compress_extension,
172 Opt_nocompress_extension,
181 Opt_age_extent_cache,
186 static match_table_t f2fs_tokens = {
187 {Opt_gc_background, "background_gc=%s"},
188 {Opt_disable_roll_forward, "disable_roll_forward"},
189 {Opt_norecovery, "norecovery"},
190 {Opt_discard, "discard"},
191 {Opt_nodiscard, "nodiscard"},
192 {Opt_noheap, "no_heap"},
194 {Opt_user_xattr, "user_xattr"},
195 {Opt_nouser_xattr, "nouser_xattr"},
197 {Opt_noacl, "noacl"},
198 {Opt_active_logs, "active_logs=%u"},
199 {Opt_disable_ext_identify, "disable_ext_identify"},
200 {Opt_inline_xattr, "inline_xattr"},
201 {Opt_noinline_xattr, "noinline_xattr"},
202 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
203 {Opt_inline_data, "inline_data"},
204 {Opt_inline_dentry, "inline_dentry"},
205 {Opt_noinline_dentry, "noinline_dentry"},
206 {Opt_flush_merge, "flush_merge"},
207 {Opt_noflush_merge, "noflush_merge"},
208 {Opt_barrier, "barrier"},
209 {Opt_nobarrier, "nobarrier"},
210 {Opt_fastboot, "fastboot"},
211 {Opt_extent_cache, "extent_cache"},
212 {Opt_noextent_cache, "noextent_cache"},
213 {Opt_noinline_data, "noinline_data"},
214 {Opt_data_flush, "data_flush"},
215 {Opt_reserve_root, "reserve_root=%u"},
216 {Opt_resgid, "resgid=%u"},
217 {Opt_resuid, "resuid=%u"},
218 {Opt_mode, "mode=%s"},
219 {Opt_io_size_bits, "io_bits=%u"},
220 {Opt_fault_injection, "fault_injection=%u"},
221 {Opt_fault_type, "fault_type=%u"},
222 {Opt_lazytime, "lazytime"},
223 {Opt_nolazytime, "nolazytime"},
224 {Opt_quota, "quota"},
225 {Opt_noquota, "noquota"},
226 {Opt_usrquota, "usrquota"},
227 {Opt_grpquota, "grpquota"},
228 {Opt_prjquota, "prjquota"},
229 {Opt_usrjquota, "usrjquota=%s"},
230 {Opt_grpjquota, "grpjquota=%s"},
231 {Opt_prjjquota, "prjjquota=%s"},
232 {Opt_offusrjquota, "usrjquota="},
233 {Opt_offgrpjquota, "grpjquota="},
234 {Opt_offprjjquota, "prjjquota="},
235 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
236 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
237 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
238 {Opt_alloc, "alloc_mode=%s"},
239 {Opt_fsync, "fsync_mode=%s"},
240 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
241 {Opt_test_dummy_encryption, "test_dummy_encryption"},
242 {Opt_inlinecrypt, "inlinecrypt"},
243 {Opt_checkpoint_disable, "checkpoint=disable"},
244 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
245 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
246 {Opt_checkpoint_enable, "checkpoint=enable"},
247 {Opt_checkpoint_merge, "checkpoint_merge"},
248 {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
249 {Opt_compress_algorithm, "compress_algorithm=%s"},
250 {Opt_compress_log_size, "compress_log_size=%u"},
251 {Opt_compress_extension, "compress_extension=%s"},
252 {Opt_nocompress_extension, "nocompress_extension=%s"},
253 {Opt_compress_chksum, "compress_chksum"},
254 {Opt_compress_mode, "compress_mode=%s"},
255 {Opt_compress_cache, "compress_cache"},
257 {Opt_gc_merge, "gc_merge"},
258 {Opt_nogc_merge, "nogc_merge"},
259 {Opt_discard_unit, "discard_unit=%s"},
260 {Opt_memory_mode, "memory=%s"},
261 {Opt_age_extent_cache, "age_extent_cache"},
262 {Opt_errors, "errors=%s"},
266 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
268 struct va_format vaf;
274 level = printk_get_level(fmt);
275 vaf.fmt = printk_skip_level(fmt);
277 printk("%c%cF2FS-fs (%s): %pV\n",
278 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
283 #if IS_ENABLED(CONFIG_UNICODE)
284 static const struct f2fs_sb_encodings {
287 unsigned int version;
288 } f2fs_sb_encoding_map[] = {
289 {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
292 static const struct f2fs_sb_encodings *
293 f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
295 __u16 magic = le16_to_cpu(sb->s_encoding);
298 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
299 if (magic == f2fs_sb_encoding_map[i].magic)
300 return &f2fs_sb_encoding_map[i];
305 struct kmem_cache *f2fs_cf_name_slab;
306 static int __init f2fs_create_casefold_cache(void)
308 f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
310 return f2fs_cf_name_slab ? 0 : -ENOMEM;
313 static void f2fs_destroy_casefold_cache(void)
315 kmem_cache_destroy(f2fs_cf_name_slab);
318 static int __init f2fs_create_casefold_cache(void) { return 0; }
319 static void f2fs_destroy_casefold_cache(void) { }
322 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
324 block_t limit = min((sbi->user_block_count >> 3),
325 sbi->user_block_count - sbi->reserved_blocks);
328 if (test_opt(sbi, RESERVE_ROOT) &&
329 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
330 F2FS_OPTION(sbi).root_reserved_blocks = limit;
331 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
332 F2FS_OPTION(sbi).root_reserved_blocks);
334 if (!test_opt(sbi, RESERVE_ROOT) &&
335 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
336 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
337 !gid_eq(F2FS_OPTION(sbi).s_resgid,
338 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
339 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
340 from_kuid_munged(&init_user_ns,
341 F2FS_OPTION(sbi).s_resuid),
342 from_kgid_munged(&init_user_ns,
343 F2FS_OPTION(sbi).s_resgid));
346 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
348 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
349 unsigned int avg_vblocks;
350 unsigned int wanted_reserved_segments;
351 block_t avail_user_block_count;
353 if (!F2FS_IO_ALIGNED(sbi))
356 /* average valid block count in section in worst case */
357 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
360 * we need enough free space when migrating one section in worst case
362 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
363 reserved_segments(sbi);
364 wanted_reserved_segments -= reserved_segments(sbi);
366 avail_user_block_count = sbi->user_block_count -
367 sbi->current_reserved_blocks -
368 F2FS_OPTION(sbi).root_reserved_blocks;
370 if (wanted_reserved_segments * sbi->blocks_per_seg >
371 avail_user_block_count) {
372 f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
373 wanted_reserved_segments,
374 avail_user_block_count >> sbi->log_blocks_per_seg);
378 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
380 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
381 wanted_reserved_segments);
386 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
388 if (!F2FS_OPTION(sbi).unusable_cap_perc)
391 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
392 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
394 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
395 F2FS_OPTION(sbi).unusable_cap_perc;
397 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
398 F2FS_OPTION(sbi).unusable_cap,
399 F2FS_OPTION(sbi).unusable_cap_perc);
402 static void init_once(void *foo)
404 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
406 inode_init_once(&fi->vfs_inode);
410 static const char * const quotatypes[] = INITQFNAMES;
411 #define QTYPE2NAME(t) (quotatypes[t])
412 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
415 struct f2fs_sb_info *sbi = F2FS_SB(sb);
419 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
420 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
423 if (f2fs_sb_has_quota_ino(sbi)) {
424 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
428 qname = match_strdup(args);
430 f2fs_err(sbi, "Not enough memory for storing quotafile name");
433 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
434 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
437 f2fs_err(sbi, "%s quota file already specified",
441 if (strchr(qname, '/')) {
442 f2fs_err(sbi, "quotafile must be on filesystem root");
445 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
453 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
455 struct f2fs_sb_info *sbi = F2FS_SB(sb);
457 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
458 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
461 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
462 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
466 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
469 * We do the test below only for project quotas. 'usrquota' and
470 * 'grpquota' mount options are allowed even without quota feature
471 * to support legacy quotas in quota files.
473 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
474 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
477 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
478 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
479 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
480 if (test_opt(sbi, USRQUOTA) &&
481 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
482 clear_opt(sbi, USRQUOTA);
484 if (test_opt(sbi, GRPQUOTA) &&
485 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
486 clear_opt(sbi, GRPQUOTA);
488 if (test_opt(sbi, PRJQUOTA) &&
489 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
490 clear_opt(sbi, PRJQUOTA);
492 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
493 test_opt(sbi, PRJQUOTA)) {
494 f2fs_err(sbi, "old and new quota format mixing");
498 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
499 f2fs_err(sbi, "journaled quota format not specified");
504 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
505 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
506 F2FS_OPTION(sbi).s_jquota_fmt = 0;
512 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
514 const substring_t *arg,
517 struct f2fs_sb_info *sbi = F2FS_SB(sb);
518 struct fs_parameter param = {
519 .type = fs_value_is_string,
520 .string = arg->from ? arg->from : "",
522 struct fscrypt_dummy_policy *policy =
523 &F2FS_OPTION(sbi).dummy_enc_policy;
526 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
527 f2fs_warn(sbi, "test_dummy_encryption option not supported");
531 if (!f2fs_sb_has_encrypt(sbi)) {
532 f2fs_err(sbi, "Encrypt feature is off");
537 * This mount option is just for testing, and it's not worthwhile to
538 * implement the extra complexity (e.g. RCU protection) that would be
539 * needed to allow it to be set or changed during remount. We do allow
540 * it to be specified during remount, but only if there is no change.
542 if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
543 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
547 err = fscrypt_parse_test_dummy_encryption(¶m, policy);
551 "Can't change test_dummy_encryption on remount");
552 else if (err == -EINVAL)
553 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
556 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
560 f2fs_warn(sbi, "Test dummy encryption mode enabled");
564 #ifdef CONFIG_F2FS_FS_COMPRESSION
565 static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
566 const char *new_ext, bool is_ext)
568 unsigned char (*ext)[F2FS_EXTENSION_LEN];
573 ext = F2FS_OPTION(sbi).extensions;
574 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
576 ext = F2FS_OPTION(sbi).noextensions;
577 ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
580 for (i = 0; i < ext_cnt; i++) {
581 if (!strcasecmp(new_ext, ext[i]))
589 * 1. The same extension name cannot not appear in both compress and non-compress extension
591 * 2. If the compress extension specifies all files, the types specified by the non-compress
592 * extension will be treated as special cases and will not be compressed.
593 * 3. Don't allow the non-compress extension specifies all files.
595 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
597 unsigned char (*ext)[F2FS_EXTENSION_LEN];
598 unsigned char (*noext)[F2FS_EXTENSION_LEN];
599 int ext_cnt, noext_cnt, index = 0, no_index = 0;
601 ext = F2FS_OPTION(sbi).extensions;
602 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
603 noext = F2FS_OPTION(sbi).noextensions;
604 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
609 for (no_index = 0; no_index < noext_cnt; no_index++) {
610 if (!strcasecmp("*", noext[no_index])) {
611 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
614 for (index = 0; index < ext_cnt; index++) {
615 if (!strcasecmp(ext[index], noext[no_index])) {
616 f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
625 #ifdef CONFIG_F2FS_FS_LZ4
626 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
628 #ifdef CONFIG_F2FS_FS_LZ4HC
631 if (strlen(str) == 3) {
632 F2FS_OPTION(sbi).compress_level = 0;
639 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
642 if (kstrtouint(str + 1, 10, &level))
645 if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
646 f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
650 F2FS_OPTION(sbi).compress_level = level;
653 if (strlen(str) == 3) {
654 F2FS_OPTION(sbi).compress_level = 0;
657 f2fs_info(sbi, "kernel doesn't support lz4hc compression");
663 #ifdef CONFIG_F2FS_FS_ZSTD
664 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
669 if (strlen(str) == len) {
670 F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
677 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
680 if (kstrtouint(str + 1, 10, &level))
683 if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
684 f2fs_info(sbi, "invalid zstd compress level: %d", level);
688 F2FS_OPTION(sbi).compress_level = level;
694 static int parse_options(struct super_block *sb, char *options, bool is_remount)
696 struct f2fs_sb_info *sbi = F2FS_SB(sb);
697 substring_t args[MAX_OPT_ARGS];
698 #ifdef CONFIG_F2FS_FS_COMPRESSION
699 unsigned char (*ext)[F2FS_EXTENSION_LEN];
700 unsigned char (*noext)[F2FS_EXTENSION_LEN];
701 int ext_cnt, noext_cnt;
712 while ((p = strsep(&options, ",")) != NULL) {
718 * Initialize args struct so we know whether arg was
719 * found; some options take optional arguments.
721 args[0].to = args[0].from = NULL;
722 token = match_token(p, f2fs_tokens, args);
725 case Opt_gc_background:
726 name = match_strdup(&args[0]);
730 if (!strcmp(name, "on")) {
731 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
732 } else if (!strcmp(name, "off")) {
733 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
734 } else if (!strcmp(name, "sync")) {
735 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
742 case Opt_disable_roll_forward:
743 set_opt(sbi, DISABLE_ROLL_FORWARD);
746 /* this option mounts f2fs with ro */
747 set_opt(sbi, NORECOVERY);
748 if (!f2fs_readonly(sb))
752 if (!f2fs_hw_support_discard(sbi)) {
753 f2fs_warn(sbi, "device does not support discard");
756 set_opt(sbi, DISCARD);
759 if (f2fs_hw_should_discard(sbi)) {
760 f2fs_warn(sbi, "discard is required for zoned block devices");
763 clear_opt(sbi, DISCARD);
766 set_opt(sbi, NOHEAP);
769 clear_opt(sbi, NOHEAP);
771 #ifdef CONFIG_F2FS_FS_XATTR
773 set_opt(sbi, XATTR_USER);
775 case Opt_nouser_xattr:
776 clear_opt(sbi, XATTR_USER);
778 case Opt_inline_xattr:
779 set_opt(sbi, INLINE_XATTR);
781 case Opt_noinline_xattr:
782 clear_opt(sbi, INLINE_XATTR);
784 case Opt_inline_xattr_size:
785 if (args->from && match_int(args, &arg))
787 set_opt(sbi, INLINE_XATTR_SIZE);
788 F2FS_OPTION(sbi).inline_xattr_size = arg;
792 f2fs_info(sbi, "user_xattr options not supported");
794 case Opt_nouser_xattr:
795 f2fs_info(sbi, "nouser_xattr options not supported");
797 case Opt_inline_xattr:
798 f2fs_info(sbi, "inline_xattr options not supported");
800 case Opt_noinline_xattr:
801 f2fs_info(sbi, "noinline_xattr options not supported");
804 #ifdef CONFIG_F2FS_FS_POSIX_ACL
806 set_opt(sbi, POSIX_ACL);
809 clear_opt(sbi, POSIX_ACL);
813 f2fs_info(sbi, "acl options not supported");
816 f2fs_info(sbi, "noacl options not supported");
819 case Opt_active_logs:
820 if (args->from && match_int(args, &arg))
822 if (arg != 2 && arg != 4 &&
823 arg != NR_CURSEG_PERSIST_TYPE)
825 F2FS_OPTION(sbi).active_logs = arg;
827 case Opt_disable_ext_identify:
828 set_opt(sbi, DISABLE_EXT_IDENTIFY);
830 case Opt_inline_data:
831 set_opt(sbi, INLINE_DATA);
833 case Opt_inline_dentry:
834 set_opt(sbi, INLINE_DENTRY);
836 case Opt_noinline_dentry:
837 clear_opt(sbi, INLINE_DENTRY);
839 case Opt_flush_merge:
840 set_opt(sbi, FLUSH_MERGE);
842 case Opt_noflush_merge:
843 clear_opt(sbi, FLUSH_MERGE);
846 set_opt(sbi, NOBARRIER);
849 clear_opt(sbi, NOBARRIER);
852 set_opt(sbi, FASTBOOT);
854 case Opt_extent_cache:
855 set_opt(sbi, READ_EXTENT_CACHE);
857 case Opt_noextent_cache:
858 clear_opt(sbi, READ_EXTENT_CACHE);
860 case Opt_noinline_data:
861 clear_opt(sbi, INLINE_DATA);
864 set_opt(sbi, DATA_FLUSH);
866 case Opt_reserve_root:
867 if (args->from && match_int(args, &arg))
869 if (test_opt(sbi, RESERVE_ROOT)) {
870 f2fs_info(sbi, "Preserve previous reserve_root=%u",
871 F2FS_OPTION(sbi).root_reserved_blocks);
873 F2FS_OPTION(sbi).root_reserved_blocks = arg;
874 set_opt(sbi, RESERVE_ROOT);
878 if (args->from && match_int(args, &arg))
880 uid = make_kuid(current_user_ns(), arg);
881 if (!uid_valid(uid)) {
882 f2fs_err(sbi, "Invalid uid value %d", arg);
885 F2FS_OPTION(sbi).s_resuid = uid;
888 if (args->from && match_int(args, &arg))
890 gid = make_kgid(current_user_ns(), arg);
891 if (!gid_valid(gid)) {
892 f2fs_err(sbi, "Invalid gid value %d", arg);
895 F2FS_OPTION(sbi).s_resgid = gid;
898 name = match_strdup(&args[0]);
902 if (!strcmp(name, "adaptive")) {
903 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
904 } else if (!strcmp(name, "lfs")) {
905 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
906 } else if (!strcmp(name, "fragment:segment")) {
907 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
908 } else if (!strcmp(name, "fragment:block")) {
909 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
916 case Opt_io_size_bits:
917 if (args->from && match_int(args, &arg))
919 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
920 f2fs_warn(sbi, "Not support %ld, larger than %d",
921 BIT(arg), BIO_MAX_VECS);
924 F2FS_OPTION(sbi).write_io_size_bits = arg;
926 #ifdef CONFIG_F2FS_FAULT_INJECTION
927 case Opt_fault_injection:
928 if (args->from && match_int(args, &arg))
930 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
931 set_opt(sbi, FAULT_INJECTION);
935 if (args->from && match_int(args, &arg))
937 f2fs_build_fault_attr(sbi, 0, arg);
938 set_opt(sbi, FAULT_INJECTION);
941 case Opt_fault_injection:
942 f2fs_info(sbi, "fault_injection options not supported");
946 f2fs_info(sbi, "fault_type options not supported");
950 sb->s_flags |= SB_LAZYTIME;
953 sb->s_flags &= ~SB_LAZYTIME;
958 set_opt(sbi, USRQUOTA);
961 set_opt(sbi, GRPQUOTA);
964 set_opt(sbi, PRJQUOTA);
967 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
972 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
977 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
981 case Opt_offusrjquota:
982 ret = f2fs_clear_qf_name(sb, USRQUOTA);
986 case Opt_offgrpjquota:
987 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
991 case Opt_offprjjquota:
992 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
996 case Opt_jqfmt_vfsold:
997 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
999 case Opt_jqfmt_vfsv0:
1000 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
1002 case Opt_jqfmt_vfsv1:
1003 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
1006 clear_opt(sbi, QUOTA);
1007 clear_opt(sbi, USRQUOTA);
1008 clear_opt(sbi, GRPQUOTA);
1009 clear_opt(sbi, PRJQUOTA);
1019 case Opt_offusrjquota:
1020 case Opt_offgrpjquota:
1021 case Opt_offprjjquota:
1022 case Opt_jqfmt_vfsold:
1023 case Opt_jqfmt_vfsv0:
1024 case Opt_jqfmt_vfsv1:
1026 f2fs_info(sbi, "quota operations not supported");
1030 name = match_strdup(&args[0]);
1034 if (!strcmp(name, "default")) {
1035 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1036 } else if (!strcmp(name, "reuse")) {
1037 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
1045 name = match_strdup(&args[0]);
1048 if (!strcmp(name, "posix")) {
1049 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1050 } else if (!strcmp(name, "strict")) {
1051 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
1052 } else if (!strcmp(name, "nobarrier")) {
1053 F2FS_OPTION(sbi).fsync_mode =
1054 FSYNC_MODE_NOBARRIER;
1061 case Opt_test_dummy_encryption:
1062 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
1067 case Opt_inlinecrypt:
1068 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1069 sb->s_flags |= SB_INLINECRYPT;
1071 f2fs_info(sbi, "inline encryption not supported");
1074 case Opt_checkpoint_disable_cap_perc:
1075 if (args->from && match_int(args, &arg))
1077 if (arg < 0 || arg > 100)
1079 F2FS_OPTION(sbi).unusable_cap_perc = arg;
1080 set_opt(sbi, DISABLE_CHECKPOINT);
1082 case Opt_checkpoint_disable_cap:
1083 if (args->from && match_int(args, &arg))
1085 F2FS_OPTION(sbi).unusable_cap = arg;
1086 set_opt(sbi, DISABLE_CHECKPOINT);
1088 case Opt_checkpoint_disable:
1089 set_opt(sbi, DISABLE_CHECKPOINT);
1091 case Opt_checkpoint_enable:
1092 clear_opt(sbi, DISABLE_CHECKPOINT);
1094 case Opt_checkpoint_merge:
1095 set_opt(sbi, MERGE_CHECKPOINT);
1097 case Opt_nocheckpoint_merge:
1098 clear_opt(sbi, MERGE_CHECKPOINT);
1100 #ifdef CONFIG_F2FS_FS_COMPRESSION
1101 case Opt_compress_algorithm:
1102 if (!f2fs_sb_has_compression(sbi)) {
1103 f2fs_info(sbi, "Image doesn't support compression");
1106 name = match_strdup(&args[0]);
1109 if (!strcmp(name, "lzo")) {
1110 #ifdef CONFIG_F2FS_FS_LZO
1111 F2FS_OPTION(sbi).compress_level = 0;
1112 F2FS_OPTION(sbi).compress_algorithm =
1115 f2fs_info(sbi, "kernel doesn't support lzo compression");
1117 } else if (!strncmp(name, "lz4", 3)) {
1118 #ifdef CONFIG_F2FS_FS_LZ4
1119 ret = f2fs_set_lz4hc_level(sbi, name);
1124 F2FS_OPTION(sbi).compress_algorithm =
1127 f2fs_info(sbi, "kernel doesn't support lz4 compression");
1129 } else if (!strncmp(name, "zstd", 4)) {
1130 #ifdef CONFIG_F2FS_FS_ZSTD
1131 ret = f2fs_set_zstd_level(sbi, name);
1136 F2FS_OPTION(sbi).compress_algorithm =
1139 f2fs_info(sbi, "kernel doesn't support zstd compression");
1141 } else if (!strcmp(name, "lzo-rle")) {
1142 #ifdef CONFIG_F2FS_FS_LZORLE
1143 F2FS_OPTION(sbi).compress_level = 0;
1144 F2FS_OPTION(sbi).compress_algorithm =
1147 f2fs_info(sbi, "kernel doesn't support lzorle compression");
1155 case Opt_compress_log_size:
1156 if (!f2fs_sb_has_compression(sbi)) {
1157 f2fs_info(sbi, "Image doesn't support compression");
1160 if (args->from && match_int(args, &arg))
1162 if (arg < MIN_COMPRESS_LOG_SIZE ||
1163 arg > MAX_COMPRESS_LOG_SIZE) {
1165 "Compress cluster log size is out of range");
1168 F2FS_OPTION(sbi).compress_log_size = arg;
1170 case Opt_compress_extension:
1171 if (!f2fs_sb_has_compression(sbi)) {
1172 f2fs_info(sbi, "Image doesn't support compression");
1175 name = match_strdup(&args[0]);
1179 ext = F2FS_OPTION(sbi).extensions;
1180 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1182 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1183 ext_cnt >= COMPRESS_EXT_NUM) {
1185 "invalid extension length/number");
1190 if (is_compress_extension_exist(sbi, name, true)) {
1195 strcpy(ext[ext_cnt], name);
1196 F2FS_OPTION(sbi).compress_ext_cnt++;
1199 case Opt_nocompress_extension:
1200 if (!f2fs_sb_has_compression(sbi)) {
1201 f2fs_info(sbi, "Image doesn't support compression");
1204 name = match_strdup(&args[0]);
1208 noext = F2FS_OPTION(sbi).noextensions;
1209 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1211 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1212 noext_cnt >= COMPRESS_EXT_NUM) {
1214 "invalid extension length/number");
1219 if (is_compress_extension_exist(sbi, name, false)) {
1224 strcpy(noext[noext_cnt], name);
1225 F2FS_OPTION(sbi).nocompress_ext_cnt++;
1228 case Opt_compress_chksum:
1229 if (!f2fs_sb_has_compression(sbi)) {
1230 f2fs_info(sbi, "Image doesn't support compression");
1233 F2FS_OPTION(sbi).compress_chksum = true;
1235 case Opt_compress_mode:
1236 if (!f2fs_sb_has_compression(sbi)) {
1237 f2fs_info(sbi, "Image doesn't support compression");
1240 name = match_strdup(&args[0]);
1243 if (!strcmp(name, "fs")) {
1244 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1245 } else if (!strcmp(name, "user")) {
1246 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1253 case Opt_compress_cache:
1254 if (!f2fs_sb_has_compression(sbi)) {
1255 f2fs_info(sbi, "Image doesn't support compression");
1258 set_opt(sbi, COMPRESS_CACHE);
1261 case Opt_compress_algorithm:
1262 case Opt_compress_log_size:
1263 case Opt_compress_extension:
1264 case Opt_nocompress_extension:
1265 case Opt_compress_chksum:
1266 case Opt_compress_mode:
1267 case Opt_compress_cache:
1268 f2fs_info(sbi, "compression options not supported");
1275 set_opt(sbi, GC_MERGE);
1277 case Opt_nogc_merge:
1278 clear_opt(sbi, GC_MERGE);
1280 case Opt_discard_unit:
1281 name = match_strdup(&args[0]);
1284 if (!strcmp(name, "block")) {
1285 F2FS_OPTION(sbi).discard_unit =
1287 } else if (!strcmp(name, "segment")) {
1288 F2FS_OPTION(sbi).discard_unit =
1289 DISCARD_UNIT_SEGMENT;
1290 } else if (!strcmp(name, "section")) {
1291 F2FS_OPTION(sbi).discard_unit =
1292 DISCARD_UNIT_SECTION;
1299 case Opt_memory_mode:
1300 name = match_strdup(&args[0]);
1303 if (!strcmp(name, "normal")) {
1304 F2FS_OPTION(sbi).memory_mode =
1306 } else if (!strcmp(name, "low")) {
1307 F2FS_OPTION(sbi).memory_mode =
1315 case Opt_age_extent_cache:
1316 set_opt(sbi, AGE_EXTENT_CACHE);
1319 name = match_strdup(&args[0]);
1322 if (!strcmp(name, "remount-ro")) {
1323 F2FS_OPTION(sbi).errors =
1324 MOUNT_ERRORS_READONLY;
1325 } else if (!strcmp(name, "continue")) {
1326 F2FS_OPTION(sbi).errors =
1327 MOUNT_ERRORS_CONTINUE;
1328 } else if (!strcmp(name, "panic")) {
1329 F2FS_OPTION(sbi).errors =
1338 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1345 if (f2fs_check_quota_options(sbi))
1348 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1349 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1352 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1353 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1357 #if !IS_ENABLED(CONFIG_UNICODE)
1358 if (f2fs_sb_has_casefold(sbi)) {
1360 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1365 * The BLKZONED feature indicates that the drive was formatted with
1366 * zone alignment optimization. This is optional for host-aware
1367 * devices, but mandatory for host-managed zoned block devices.
1369 if (f2fs_sb_has_blkzoned(sbi)) {
1370 #ifdef CONFIG_BLK_DEV_ZONED
1371 if (F2FS_OPTION(sbi).discard_unit !=
1372 DISCARD_UNIT_SECTION) {
1373 f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1374 F2FS_OPTION(sbi).discard_unit =
1375 DISCARD_UNIT_SECTION;
1378 if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
1379 f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
1383 f2fs_err(sbi, "Zoned block device support is not enabled");
1388 #ifdef CONFIG_F2FS_FS_COMPRESSION
1389 if (f2fs_test_compress_extension(sbi)) {
1390 f2fs_err(sbi, "invalid compress or nocompress extension");
1395 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1396 f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
1397 F2FS_IO_SIZE_KB(sbi));
1401 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1402 int min_size, max_size;
1404 if (!f2fs_sb_has_extra_attr(sbi) ||
1405 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1406 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1409 if (!test_opt(sbi, INLINE_XATTR)) {
1410 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1414 min_size = MIN_INLINE_XATTR_SIZE;
1415 max_size = MAX_INLINE_XATTR_SIZE;
1417 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1418 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1419 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1420 min_size, max_size);
1425 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1426 f2fs_err(sbi, "LFS is not compatible with checkpoint=disable");
1430 if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
1431 f2fs_err(sbi, "LFS is not compatible with ATGC");
1435 if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
1436 f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
1440 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1441 f2fs_err(sbi, "Allow to mount readonly mode only");
1447 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1449 struct f2fs_inode_info *fi;
1451 if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
1454 fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1458 init_once((void *) fi);
1460 /* Initialize f2fs-specific inode info */
1461 atomic_set(&fi->dirty_pages, 0);
1462 atomic_set(&fi->i_compr_blocks, 0);
1463 init_f2fs_rwsem(&fi->i_sem);
1464 spin_lock_init(&fi->i_size_lock);
1465 INIT_LIST_HEAD(&fi->dirty_list);
1466 INIT_LIST_HEAD(&fi->gdirty_list);
1467 init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1468 init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1469 init_f2fs_rwsem(&fi->i_xattr_sem);
1471 /* Will be used by directory only */
1472 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1474 return &fi->vfs_inode;
1477 static int f2fs_drop_inode(struct inode *inode)
1479 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1483 * during filesystem shutdown, if checkpoint is disabled,
1484 * drop useless meta/node dirty pages.
1486 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1487 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1488 inode->i_ino == F2FS_META_INO(sbi)) {
1489 trace_f2fs_drop_inode(inode, 1);
1495 * This is to avoid a deadlock condition like below.
1496 * writeback_single_inode(inode)
1497 * - f2fs_write_data_page
1498 * - f2fs_gc -> iput -> evict
1499 * - inode_wait_for_writeback(inode)
1501 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1502 if (!inode->i_nlink && !is_bad_inode(inode)) {
1503 /* to avoid evict_inode call simultaneously */
1504 atomic_inc(&inode->i_count);
1505 spin_unlock(&inode->i_lock);
1507 /* should remain fi->extent_tree for writepage */
1508 f2fs_destroy_extent_node(inode);
1510 sb_start_intwrite(inode->i_sb);
1511 f2fs_i_size_write(inode, 0);
1513 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1514 inode, NULL, 0, DATA);
1515 truncate_inode_pages_final(inode->i_mapping);
1517 if (F2FS_HAS_BLOCKS(inode))
1518 f2fs_truncate(inode);
1520 sb_end_intwrite(inode->i_sb);
1522 spin_lock(&inode->i_lock);
1523 atomic_dec(&inode->i_count);
1525 trace_f2fs_drop_inode(inode, 0);
1528 ret = generic_drop_inode(inode);
1530 ret = fscrypt_drop_inode(inode);
1531 trace_f2fs_drop_inode(inode, ret);
1535 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1537 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1540 spin_lock(&sbi->inode_lock[DIRTY_META]);
1541 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1544 set_inode_flag(inode, FI_DIRTY_INODE);
1545 stat_inc_dirty_inode(sbi, DIRTY_META);
1547 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1548 list_add_tail(&F2FS_I(inode)->gdirty_list,
1549 &sbi->inode_list[DIRTY_META]);
1550 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1552 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1556 void f2fs_inode_synced(struct inode *inode)
1558 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1560 spin_lock(&sbi->inode_lock[DIRTY_META]);
1561 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1562 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1565 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1566 list_del_init(&F2FS_I(inode)->gdirty_list);
1567 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1569 clear_inode_flag(inode, FI_DIRTY_INODE);
1570 clear_inode_flag(inode, FI_AUTO_RECOVER);
1571 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1572 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1576 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1578 * We should call set_dirty_inode to write the dirty inode through write_inode.
1580 static void f2fs_dirty_inode(struct inode *inode, int flags)
1582 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1584 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1585 inode->i_ino == F2FS_META_INO(sbi))
1588 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1589 clear_inode_flag(inode, FI_AUTO_RECOVER);
1591 f2fs_inode_dirtied(inode, false);
1594 static void f2fs_free_inode(struct inode *inode)
1596 fscrypt_free_inode(inode);
1597 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1600 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1602 percpu_counter_destroy(&sbi->total_valid_inode_count);
1603 percpu_counter_destroy(&sbi->rf_node_block_count);
1604 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1607 static void destroy_device_list(struct f2fs_sb_info *sbi)
1611 for (i = 0; i < sbi->s_ndevs; i++) {
1613 bdev_release(FDEV(i).bdev_handle);
1614 #ifdef CONFIG_BLK_DEV_ZONED
1615 kvfree(FDEV(i).blkz_seq);
1621 static void f2fs_put_super(struct super_block *sb)
1623 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1628 /* unregister procfs/sysfs entries in advance to avoid race case */
1629 f2fs_unregister_sysfs(sbi);
1631 f2fs_quota_off_umount(sb);
1633 /* prevent remaining shrinker jobs */
1634 mutex_lock(&sbi->umount_mutex);
1637 * flush all issued checkpoints and stop checkpoint issue thread.
1638 * after then, all checkpoints should be done by each process context.
1640 f2fs_stop_ckpt_thread(sbi);
1643 * We don't need to do checkpoint when superblock is clean.
1644 * But, the previous checkpoint was not done by umount, it needs to do
1645 * clean checkpoint again.
1647 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1648 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1649 struct cp_control cpc = {
1650 .reason = CP_UMOUNT,
1652 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1653 err = f2fs_write_checkpoint(sbi, &cpc);
1656 /* be sure to wait for any on-going discard commands */
1657 done = f2fs_issue_discard_timeout(sbi);
1658 if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
1659 struct cp_control cpc = {
1660 .reason = CP_UMOUNT | CP_TRIMMED,
1662 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1663 err = f2fs_write_checkpoint(sbi, &cpc);
1667 * normally superblock is clean, so we need to release this.
1668 * In addition, EIO will skip do checkpoint, we need this as well.
1670 f2fs_release_ino_entry(sbi, true);
1672 f2fs_leave_shrinker(sbi);
1673 mutex_unlock(&sbi->umount_mutex);
1675 /* our cp_error case, we can wait for any writeback page */
1676 f2fs_flush_merged_writes(sbi);
1678 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1680 if (err || f2fs_cp_error(sbi)) {
1681 truncate_inode_pages_final(NODE_MAPPING(sbi));
1682 truncate_inode_pages_final(META_MAPPING(sbi));
1685 for (i = 0; i < NR_COUNT_TYPE; i++) {
1686 if (!get_pages(sbi, i))
1688 f2fs_err(sbi, "detect filesystem reference count leak during "
1689 "umount, type: %d, count: %lld", i, get_pages(sbi, i));
1690 f2fs_bug_on(sbi, 1);
1693 f2fs_bug_on(sbi, sbi->fsync_node_num);
1695 f2fs_destroy_compress_inode(sbi);
1697 iput(sbi->node_inode);
1698 sbi->node_inode = NULL;
1700 iput(sbi->meta_inode);
1701 sbi->meta_inode = NULL;
1704 * iput() can update stat information, if f2fs_write_checkpoint()
1705 * above failed with error.
1707 f2fs_destroy_stats(sbi);
1709 /* destroy f2fs internal modules */
1710 f2fs_destroy_node_manager(sbi);
1711 f2fs_destroy_segment_manager(sbi);
1713 /* flush s_error_work before sbi destroy */
1714 flush_work(&sbi->s_error_work);
1716 f2fs_destroy_post_read_wq(sbi);
1720 sb->s_fs_info = NULL;
1721 if (sbi->s_chksum_driver)
1722 crypto_free_shash(sbi->s_chksum_driver);
1723 kfree(sbi->raw_super);
1725 destroy_device_list(sbi);
1726 f2fs_destroy_page_array_cache(sbi);
1727 f2fs_destroy_xattr_caches(sbi);
1728 mempool_destroy(sbi->write_io_dummy);
1730 for (i = 0; i < MAXQUOTAS; i++)
1731 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1733 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1734 destroy_percpu_info(sbi);
1735 f2fs_destroy_iostat(sbi);
1736 for (i = 0; i < NR_PAGE_TYPE; i++)
1737 kvfree(sbi->write_io[i]);
1738 #if IS_ENABLED(CONFIG_UNICODE)
1739 utf8_unload(sb->s_encoding);
1744 int f2fs_sync_fs(struct super_block *sb, int sync)
1746 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1749 if (unlikely(f2fs_cp_error(sbi)))
1751 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1754 trace_f2fs_sync_fs(sb, sync);
1756 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1760 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1761 err = f2fs_issue_checkpoint(sbi);
1767 static int f2fs_freeze(struct super_block *sb)
1769 if (f2fs_readonly(sb))
1772 /* IO error happened before */
1773 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1776 /* must be clean, since sync_filesystem() was already called */
1777 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1780 /* Let's flush checkpoints and stop the thread. */
1781 f2fs_flush_ckpt_thread(F2FS_SB(sb));
1783 /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1784 set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1788 static int f2fs_unfreeze(struct super_block *sb)
1790 clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1795 static int f2fs_statfs_project(struct super_block *sb,
1796 kprojid_t projid, struct kstatfs *buf)
1799 struct dquot *dquot;
1803 qid = make_kqid_projid(projid);
1804 dquot = dqget(sb, qid);
1806 return PTR_ERR(dquot);
1807 spin_lock(&dquot->dq_dqb_lock);
1809 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1810 dquot->dq_dqb.dqb_bhardlimit);
1812 limit >>= sb->s_blocksize_bits;
1814 if (limit && buf->f_blocks > limit) {
1815 curblock = (dquot->dq_dqb.dqb_curspace +
1816 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1817 buf->f_blocks = limit;
1818 buf->f_bfree = buf->f_bavail =
1819 (buf->f_blocks > curblock) ?
1820 (buf->f_blocks - curblock) : 0;
1823 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1824 dquot->dq_dqb.dqb_ihardlimit);
1826 if (limit && buf->f_files > limit) {
1827 buf->f_files = limit;
1829 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1830 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1833 spin_unlock(&dquot->dq_dqb_lock);
1839 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1841 struct super_block *sb = dentry->d_sb;
1842 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1843 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1844 block_t total_count, user_block_count, start_count;
1845 u64 avail_node_count;
1846 unsigned int total_valid_node_count;
1848 total_count = le64_to_cpu(sbi->raw_super->block_count);
1849 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1850 buf->f_type = F2FS_SUPER_MAGIC;
1851 buf->f_bsize = sbi->blocksize;
1853 buf->f_blocks = total_count - start_count;
1855 spin_lock(&sbi->stat_lock);
1857 user_block_count = sbi->user_block_count;
1858 total_valid_node_count = valid_node_count(sbi);
1859 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1860 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1861 sbi->current_reserved_blocks;
1863 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1866 buf->f_bfree -= sbi->unusable_block_count;
1867 spin_unlock(&sbi->stat_lock);
1869 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1870 buf->f_bavail = buf->f_bfree -
1871 F2FS_OPTION(sbi).root_reserved_blocks;
1875 if (avail_node_count > user_block_count) {
1876 buf->f_files = user_block_count;
1877 buf->f_ffree = buf->f_bavail;
1879 buf->f_files = avail_node_count;
1880 buf->f_ffree = min(avail_node_count - total_valid_node_count,
1884 buf->f_namelen = F2FS_NAME_LEN;
1885 buf->f_fsid = u64_to_fsid(id);
1888 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1889 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1890 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1896 static inline void f2fs_show_quota_options(struct seq_file *seq,
1897 struct super_block *sb)
1900 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1902 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1905 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1916 seq_printf(seq, ",jqfmt=%s", fmtname);
1919 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1920 seq_show_option(seq, "usrjquota",
1921 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1923 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1924 seq_show_option(seq, "grpjquota",
1925 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1927 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1928 seq_show_option(seq, "prjjquota",
1929 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1933 #ifdef CONFIG_F2FS_FS_COMPRESSION
1934 static inline void f2fs_show_compress_options(struct seq_file *seq,
1935 struct super_block *sb)
1937 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1941 if (!f2fs_sb_has_compression(sbi))
1944 switch (F2FS_OPTION(sbi).compress_algorithm) {
1954 case COMPRESS_LZORLE:
1955 algtype = "lzo-rle";
1958 seq_printf(seq, ",compress_algorithm=%s", algtype);
1960 if (F2FS_OPTION(sbi).compress_level)
1961 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1963 seq_printf(seq, ",compress_log_size=%u",
1964 F2FS_OPTION(sbi).compress_log_size);
1966 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1967 seq_printf(seq, ",compress_extension=%s",
1968 F2FS_OPTION(sbi).extensions[i]);
1971 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1972 seq_printf(seq, ",nocompress_extension=%s",
1973 F2FS_OPTION(sbi).noextensions[i]);
1976 if (F2FS_OPTION(sbi).compress_chksum)
1977 seq_puts(seq, ",compress_chksum");
1979 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1980 seq_printf(seq, ",compress_mode=%s", "fs");
1981 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1982 seq_printf(seq, ",compress_mode=%s", "user");
1984 if (test_opt(sbi, COMPRESS_CACHE))
1985 seq_puts(seq, ",compress_cache");
1989 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1991 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1993 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1994 seq_printf(seq, ",background_gc=%s", "sync");
1995 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1996 seq_printf(seq, ",background_gc=%s", "on");
1997 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1998 seq_printf(seq, ",background_gc=%s", "off");
2000 if (test_opt(sbi, GC_MERGE))
2001 seq_puts(seq, ",gc_merge");
2003 seq_puts(seq, ",nogc_merge");
2005 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2006 seq_puts(seq, ",disable_roll_forward");
2007 if (test_opt(sbi, NORECOVERY))
2008 seq_puts(seq, ",norecovery");
2009 if (test_opt(sbi, DISCARD)) {
2010 seq_puts(seq, ",discard");
2011 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
2012 seq_printf(seq, ",discard_unit=%s", "block");
2013 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2014 seq_printf(seq, ",discard_unit=%s", "segment");
2015 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2016 seq_printf(seq, ",discard_unit=%s", "section");
2018 seq_puts(seq, ",nodiscard");
2020 if (test_opt(sbi, NOHEAP))
2021 seq_puts(seq, ",no_heap");
2023 seq_puts(seq, ",heap");
2024 #ifdef CONFIG_F2FS_FS_XATTR
2025 if (test_opt(sbi, XATTR_USER))
2026 seq_puts(seq, ",user_xattr");
2028 seq_puts(seq, ",nouser_xattr");
2029 if (test_opt(sbi, INLINE_XATTR))
2030 seq_puts(seq, ",inline_xattr");
2032 seq_puts(seq, ",noinline_xattr");
2033 if (test_opt(sbi, INLINE_XATTR_SIZE))
2034 seq_printf(seq, ",inline_xattr_size=%u",
2035 F2FS_OPTION(sbi).inline_xattr_size);
2037 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2038 if (test_opt(sbi, POSIX_ACL))
2039 seq_puts(seq, ",acl");
2041 seq_puts(seq, ",noacl");
2043 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
2044 seq_puts(seq, ",disable_ext_identify");
2045 if (test_opt(sbi, INLINE_DATA))
2046 seq_puts(seq, ",inline_data");
2048 seq_puts(seq, ",noinline_data");
2049 if (test_opt(sbi, INLINE_DENTRY))
2050 seq_puts(seq, ",inline_dentry");
2052 seq_puts(seq, ",noinline_dentry");
2053 if (test_opt(sbi, FLUSH_MERGE))
2054 seq_puts(seq, ",flush_merge");
2056 seq_puts(seq, ",noflush_merge");
2057 if (test_opt(sbi, NOBARRIER))
2058 seq_puts(seq, ",nobarrier");
2060 seq_puts(seq, ",barrier");
2061 if (test_opt(sbi, FASTBOOT))
2062 seq_puts(seq, ",fastboot");
2063 if (test_opt(sbi, READ_EXTENT_CACHE))
2064 seq_puts(seq, ",extent_cache");
2066 seq_puts(seq, ",noextent_cache");
2067 if (test_opt(sbi, AGE_EXTENT_CACHE))
2068 seq_puts(seq, ",age_extent_cache");
2069 if (test_opt(sbi, DATA_FLUSH))
2070 seq_puts(seq, ",data_flush");
2072 seq_puts(seq, ",mode=");
2073 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
2074 seq_puts(seq, "adaptive");
2075 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
2076 seq_puts(seq, "lfs");
2077 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
2078 seq_puts(seq, "fragment:segment");
2079 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2080 seq_puts(seq, "fragment:block");
2081 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
2082 if (test_opt(sbi, RESERVE_ROOT))
2083 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
2084 F2FS_OPTION(sbi).root_reserved_blocks,
2085 from_kuid_munged(&init_user_ns,
2086 F2FS_OPTION(sbi).s_resuid),
2087 from_kgid_munged(&init_user_ns,
2088 F2FS_OPTION(sbi).s_resgid));
2089 if (F2FS_IO_SIZE_BITS(sbi))
2090 seq_printf(seq, ",io_bits=%u",
2091 F2FS_OPTION(sbi).write_io_size_bits);
2092 #ifdef CONFIG_F2FS_FAULT_INJECTION
2093 if (test_opt(sbi, FAULT_INJECTION)) {
2094 seq_printf(seq, ",fault_injection=%u",
2095 F2FS_OPTION(sbi).fault_info.inject_rate);
2096 seq_printf(seq, ",fault_type=%u",
2097 F2FS_OPTION(sbi).fault_info.inject_type);
2101 if (test_opt(sbi, QUOTA))
2102 seq_puts(seq, ",quota");
2103 if (test_opt(sbi, USRQUOTA))
2104 seq_puts(seq, ",usrquota");
2105 if (test_opt(sbi, GRPQUOTA))
2106 seq_puts(seq, ",grpquota");
2107 if (test_opt(sbi, PRJQUOTA))
2108 seq_puts(seq, ",prjquota");
2110 f2fs_show_quota_options(seq, sbi->sb);
2112 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
2114 if (sbi->sb->s_flags & SB_INLINECRYPT)
2115 seq_puts(seq, ",inlinecrypt");
2117 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
2118 seq_printf(seq, ",alloc_mode=%s", "default");
2119 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2120 seq_printf(seq, ",alloc_mode=%s", "reuse");
2122 if (test_opt(sbi, DISABLE_CHECKPOINT))
2123 seq_printf(seq, ",checkpoint=disable:%u",
2124 F2FS_OPTION(sbi).unusable_cap);
2125 if (test_opt(sbi, MERGE_CHECKPOINT))
2126 seq_puts(seq, ",checkpoint_merge");
2128 seq_puts(seq, ",nocheckpoint_merge");
2129 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2130 seq_printf(seq, ",fsync_mode=%s", "posix");
2131 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2132 seq_printf(seq, ",fsync_mode=%s", "strict");
2133 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2134 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2136 #ifdef CONFIG_F2FS_FS_COMPRESSION
2137 f2fs_show_compress_options(seq, sbi->sb);
2140 if (test_opt(sbi, ATGC))
2141 seq_puts(seq, ",atgc");
2143 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2144 seq_printf(seq, ",memory=%s", "normal");
2145 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2146 seq_printf(seq, ",memory=%s", "low");
2148 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2149 seq_printf(seq, ",errors=%s", "remount-ro");
2150 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
2151 seq_printf(seq, ",errors=%s", "continue");
2152 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
2153 seq_printf(seq, ",errors=%s", "panic");
2158 static void default_options(struct f2fs_sb_info *sbi, bool remount)
2160 /* init some FS parameters */
2162 set_opt(sbi, READ_EXTENT_CACHE);
2163 clear_opt(sbi, DISABLE_CHECKPOINT);
2165 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2166 set_opt(sbi, DISCARD);
2168 if (f2fs_sb_has_blkzoned(sbi))
2169 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2171 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2174 if (f2fs_sb_has_readonly(sbi))
2175 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2177 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2179 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2180 if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
2181 SMALL_VOLUME_SEGMENTS)
2182 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2184 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2185 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2186 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2187 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2188 if (f2fs_sb_has_compression(sbi)) {
2189 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2190 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2191 F2FS_OPTION(sbi).compress_ext_cnt = 0;
2192 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2194 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2195 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2196 F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
2198 sbi->sb->s_flags &= ~SB_INLINECRYPT;
2200 set_opt(sbi, INLINE_XATTR);
2201 set_opt(sbi, INLINE_DATA);
2202 set_opt(sbi, INLINE_DENTRY);
2203 set_opt(sbi, NOHEAP);
2204 set_opt(sbi, MERGE_CHECKPOINT);
2205 F2FS_OPTION(sbi).unusable_cap = 0;
2206 sbi->sb->s_flags |= SB_LAZYTIME;
2207 if (!f2fs_is_readonly(sbi))
2208 set_opt(sbi, FLUSH_MERGE);
2209 if (f2fs_sb_has_blkzoned(sbi))
2210 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2212 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2214 #ifdef CONFIG_F2FS_FS_XATTR
2215 set_opt(sbi, XATTR_USER);
2217 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2218 set_opt(sbi, POSIX_ACL);
2221 f2fs_build_fault_attr(sbi, 0, 0);
2225 static int f2fs_enable_quotas(struct super_block *sb);
2228 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2230 unsigned int s_flags = sbi->sb->s_flags;
2231 struct cp_control cpc;
2232 unsigned int gc_mode = sbi->gc_mode;
2237 if (s_flags & SB_RDONLY) {
2238 f2fs_err(sbi, "checkpoint=disable on readonly fs");
2241 sbi->sb->s_flags |= SB_ACTIVE;
2243 /* check if we need more GC first */
2244 unusable = f2fs_get_unusable_blocks(sbi);
2245 if (!f2fs_disable_cp_again(sbi, unusable))
2248 f2fs_update_time(sbi, DISABLE_TIME);
2250 sbi->gc_mode = GC_URGENT_HIGH;
2252 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2253 struct f2fs_gc_control gc_control = {
2254 .victim_segno = NULL_SEGNO,
2255 .init_gc_type = FG_GC,
2256 .should_migrate_blocks = false,
2257 .err_gc_skipped = true,
2258 .nr_free_secs = 1 };
2260 f2fs_down_write(&sbi->gc_lock);
2261 stat_inc_gc_call_count(sbi, FOREGROUND);
2262 err = f2fs_gc(sbi, &gc_control);
2263 if (err == -ENODATA) {
2267 if (err && err != -EAGAIN)
2271 ret = sync_filesystem(sbi->sb);
2273 err = ret ? ret : err;
2277 unusable = f2fs_get_unusable_blocks(sbi);
2278 if (f2fs_disable_cp_again(sbi, unusable)) {
2284 f2fs_down_write(&sbi->gc_lock);
2285 cpc.reason = CP_PAUSE;
2286 set_sbi_flag(sbi, SBI_CP_DISABLED);
2287 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2288 err = f2fs_write_checkpoint(sbi, &cpc);
2292 spin_lock(&sbi->stat_lock);
2293 sbi->unusable_block_count = unusable;
2294 spin_unlock(&sbi->stat_lock);
2297 f2fs_up_write(&sbi->gc_lock);
2299 sbi->gc_mode = gc_mode;
2300 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2304 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2306 int retry = DEFAULT_RETRY_IO_COUNT;
2308 /* we should flush all the data to keep data consistency */
2310 sync_inodes_sb(sbi->sb);
2311 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2312 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2314 if (unlikely(retry < 0))
2315 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2317 f2fs_down_write(&sbi->gc_lock);
2318 f2fs_dirty_to_prefree(sbi);
2320 clear_sbi_flag(sbi, SBI_CP_DISABLED);
2321 set_sbi_flag(sbi, SBI_IS_DIRTY);
2322 f2fs_up_write(&sbi->gc_lock);
2324 f2fs_sync_fs(sbi->sb, 1);
2326 /* Let's ensure there's no pending checkpoint anymore */
2327 f2fs_flush_ckpt_thread(sbi);
2330 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2332 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2333 struct f2fs_mount_info org_mount_opt;
2334 unsigned long old_sb_flags;
2336 bool need_restart_gc = false, need_stop_gc = false;
2337 bool need_restart_flush = false, need_stop_flush = false;
2338 bool need_restart_discard = false, need_stop_discard = false;
2339 bool need_enable_checkpoint = false, need_disable_checkpoint = false;
2340 bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2341 bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2342 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2343 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2344 bool no_atgc = !test_opt(sbi, ATGC);
2345 bool no_discard = !test_opt(sbi, DISCARD);
2346 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2347 bool block_unit_discard = f2fs_block_unit_discard(sbi);
2353 * Save the old mount options in case we
2354 * need to restore them.
2356 org_mount_opt = sbi->mount_opt;
2357 old_sb_flags = sb->s_flags;
2360 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2361 for (i = 0; i < MAXQUOTAS; i++) {
2362 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2363 org_mount_opt.s_qf_names[i] =
2364 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2366 if (!org_mount_opt.s_qf_names[i]) {
2367 for (j = 0; j < i; j++)
2368 kfree(org_mount_opt.s_qf_names[j]);
2372 org_mount_opt.s_qf_names[i] = NULL;
2377 /* recover superblocks we couldn't write due to previous RO mount */
2378 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2379 err = f2fs_commit_super(sbi, false);
2380 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2383 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2386 default_options(sbi, true);
2388 /* parse mount options */
2389 err = parse_options(sb, data, true);
2393 /* flush outstanding errors before changing fs state */
2394 flush_work(&sbi->s_error_work);
2397 * Previous and new state of filesystem is RO,
2398 * so skip checking GC and FLUSH_MERGE conditions.
2400 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2403 if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
2409 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2410 err = dquot_suspend(sb, -1);
2413 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2414 /* dquot_resume needs RW */
2415 sb->s_flags &= ~SB_RDONLY;
2416 if (sb_any_quota_suspended(sb)) {
2417 dquot_resume(sb, -1);
2418 } else if (f2fs_sb_has_quota_ino(sbi)) {
2419 err = f2fs_enable_quotas(sb);
2425 if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
2427 f2fs_warn(sbi, "LFS is not compatible with IPU");
2431 /* disallow enable atgc dynamically */
2432 if (no_atgc == !!test_opt(sbi, ATGC)) {
2434 f2fs_warn(sbi, "switch atgc option is not allowed");
2438 /* disallow enable/disable extent_cache dynamically */
2439 if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2441 f2fs_warn(sbi, "switch extent_cache option is not allowed");
2444 /* disallow enable/disable age extent_cache dynamically */
2445 if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2447 f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2451 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2453 f2fs_warn(sbi, "switch io_bits option is not allowed");
2457 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2459 f2fs_warn(sbi, "switch compress_cache option is not allowed");
2463 if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2465 f2fs_warn(sbi, "switch discard_unit option is not allowed");
2469 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2471 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2476 * We stop the GC thread if FS is mounted as RO
2477 * or if background_gc = off is passed in mount
2478 * option. Also sync the filesystem.
2480 if ((*flags & SB_RDONLY) ||
2481 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2482 !test_opt(sbi, GC_MERGE))) {
2483 if (sbi->gc_thread) {
2484 f2fs_stop_gc_thread(sbi);
2485 need_restart_gc = true;
2487 } else if (!sbi->gc_thread) {
2488 err = f2fs_start_gc_thread(sbi);
2491 need_stop_gc = true;
2494 if (*flags & SB_RDONLY) {
2497 set_sbi_flag(sbi, SBI_IS_DIRTY);
2498 set_sbi_flag(sbi, SBI_IS_CLOSE);
2499 f2fs_sync_fs(sb, 1);
2500 clear_sbi_flag(sbi, SBI_IS_CLOSE);
2504 * We stop issue flush thread if FS is mounted as RO
2505 * or if flush_merge is not passed in mount option.
2507 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2508 clear_opt(sbi, FLUSH_MERGE);
2509 f2fs_destroy_flush_cmd_control(sbi, false);
2510 need_restart_flush = true;
2512 err = f2fs_create_flush_cmd_control(sbi);
2515 need_stop_flush = true;
2518 if (no_discard == !!test_opt(sbi, DISCARD)) {
2519 if (test_opt(sbi, DISCARD)) {
2520 err = f2fs_start_discard_thread(sbi);
2523 need_stop_discard = true;
2525 f2fs_stop_discard_thread(sbi);
2526 f2fs_issue_discard_timeout(sbi);
2527 need_restart_discard = true;
2531 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2532 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2533 err = f2fs_disable_checkpoint(sbi);
2535 goto restore_discard;
2536 need_enable_checkpoint = true;
2538 f2fs_enable_checkpoint(sbi);
2539 need_disable_checkpoint = true;
2544 * Place this routine at the end, since a new checkpoint would be
2545 * triggered while remount and we need to take care of it before
2546 * returning from remount.
2548 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2549 !test_opt(sbi, MERGE_CHECKPOINT)) {
2550 f2fs_stop_ckpt_thread(sbi);
2552 /* Flush if the prevous checkpoint, if exists. */
2553 f2fs_flush_ckpt_thread(sbi);
2555 err = f2fs_start_ckpt_thread(sbi);
2558 "Failed to start F2FS issue_checkpoint_thread (%d)",
2560 goto restore_checkpoint;
2566 /* Release old quota file names */
2567 for (i = 0; i < MAXQUOTAS; i++)
2568 kfree(org_mount_opt.s_qf_names[i]);
2570 /* Update the POSIXACL Flag */
2571 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2572 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2574 limit_reserve_root(sbi);
2575 adjust_unusable_cap_perc(sbi);
2576 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2579 if (need_enable_checkpoint) {
2580 f2fs_enable_checkpoint(sbi);
2581 } else if (need_disable_checkpoint) {
2582 if (f2fs_disable_checkpoint(sbi))
2583 f2fs_warn(sbi, "checkpoint has not been disabled");
2586 if (need_restart_discard) {
2587 if (f2fs_start_discard_thread(sbi))
2588 f2fs_warn(sbi, "discard has been stopped");
2589 } else if (need_stop_discard) {
2590 f2fs_stop_discard_thread(sbi);
2593 if (need_restart_flush) {
2594 if (f2fs_create_flush_cmd_control(sbi))
2595 f2fs_warn(sbi, "background flush thread has stopped");
2596 } else if (need_stop_flush) {
2597 clear_opt(sbi, FLUSH_MERGE);
2598 f2fs_destroy_flush_cmd_control(sbi, false);
2601 if (need_restart_gc) {
2602 if (f2fs_start_gc_thread(sbi))
2603 f2fs_warn(sbi, "background gc thread has stopped");
2604 } else if (need_stop_gc) {
2605 f2fs_stop_gc_thread(sbi);
2609 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2610 for (i = 0; i < MAXQUOTAS; i++) {
2611 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2612 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2615 sbi->mount_opt = org_mount_opt;
2616 sb->s_flags = old_sb_flags;
2621 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
2623 /* need to recovery orphan */
2624 if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
2626 /* need to recovery data */
2627 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2629 if (test_opt(sbi, NORECOVERY))
2631 return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
2634 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
2636 bool readonly = f2fs_readonly(sbi->sb);
2638 if (!f2fs_need_recovery(sbi))
2641 /* it doesn't need to check f2fs_sb_has_readonly() */
2642 if (f2fs_hw_is_readonly(sbi))
2646 sbi->sb->s_flags &= ~SB_RDONLY;
2647 set_sbi_flag(sbi, SBI_IS_WRITABLE);
2651 * Turn on quotas which were not enabled for read-only mounts if
2652 * filesystem has quota feature, so that they are updated correctly.
2654 return f2fs_enable_quota_files(sbi, readonly);
2657 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
2661 f2fs_quota_off_umount(sbi->sb);
2663 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
2664 clear_sbi_flag(sbi, SBI_IS_WRITABLE);
2665 sbi->sb->s_flags |= SB_RDONLY;
2669 /* Read data from quotafile */
2670 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2671 size_t len, loff_t off)
2673 struct inode *inode = sb_dqopt(sb)->files[type];
2674 struct address_space *mapping = inode->i_mapping;
2675 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2676 int offset = off & (sb->s_blocksize - 1);
2679 loff_t i_size = i_size_read(inode);
2685 if (off + len > i_size)
2688 while (toread > 0) {
2689 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2691 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2693 if (PTR_ERR(page) == -ENOMEM) {
2694 memalloc_retry_wait(GFP_NOFS);
2697 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2698 return PTR_ERR(page);
2703 if (unlikely(page->mapping != mapping)) {
2704 f2fs_put_page(page, 1);
2707 if (unlikely(!PageUptodate(page))) {
2708 f2fs_put_page(page, 1);
2709 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2713 memcpy_from_page(data, page, offset, tocopy);
2714 f2fs_put_page(page, 1);
2724 /* Write to quotafile */
2725 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2726 const char *data, size_t len, loff_t off)
2728 struct inode *inode = sb_dqopt(sb)->files[type];
2729 struct address_space *mapping = inode->i_mapping;
2730 const struct address_space_operations *a_ops = mapping->a_ops;
2731 int offset = off & (sb->s_blocksize - 1);
2732 size_t towrite = len;
2734 void *fsdata = NULL;
2738 while (towrite > 0) {
2739 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2742 err = a_ops->write_begin(NULL, mapping, off, tocopy,
2744 if (unlikely(err)) {
2745 if (err == -ENOMEM) {
2746 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2749 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2753 memcpy_to_page(page, offset, data, tocopy);
2755 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2766 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2767 f2fs_mark_inode_dirty_sync(inode, false);
2768 return len - towrite;
2771 int f2fs_dquot_initialize(struct inode *inode)
2773 if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
2776 return dquot_initialize(inode);
2779 static struct dquot **f2fs_get_dquots(struct inode *inode)
2781 return F2FS_I(inode)->i_dquot;
2784 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2786 return &F2FS_I(inode)->i_reserved_quota;
2789 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2791 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2792 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2796 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2797 F2FS_OPTION(sbi).s_jquota_fmt, type);
2800 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2805 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2806 err = f2fs_enable_quotas(sbi->sb);
2808 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2814 for (i = 0; i < MAXQUOTAS; i++) {
2815 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2816 err = f2fs_quota_on_mount(sbi, i);
2821 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2828 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2831 struct inode *qf_inode;
2832 unsigned long qf_inum;
2833 unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
2836 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2838 qf_inum = f2fs_qf_ino(sb, type);
2842 qf_inode = f2fs_iget(sb, qf_inum);
2843 if (IS_ERR(qf_inode)) {
2844 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2845 return PTR_ERR(qf_inode);
2848 /* Don't account quota for quota files to avoid recursion */
2849 inode_lock(qf_inode);
2850 qf_inode->i_flags |= S_NOQUOTA;
2852 if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
2853 F2FS_I(qf_inode)->i_flags |= qf_flag;
2854 f2fs_set_inode_flags(qf_inode);
2856 inode_unlock(qf_inode);
2858 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2863 static int f2fs_enable_quotas(struct super_block *sb)
2865 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2867 unsigned long qf_inum;
2868 bool quota_mopt[MAXQUOTAS] = {
2869 test_opt(sbi, USRQUOTA),
2870 test_opt(sbi, GRPQUOTA),
2871 test_opt(sbi, PRJQUOTA),
2874 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2875 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2879 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2881 for (type = 0; type < MAXQUOTAS; type++) {
2882 qf_inum = f2fs_qf_ino(sb, type);
2884 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2885 DQUOT_USAGE_ENABLED |
2886 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2888 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2890 for (type--; type >= 0; type--)
2891 dquot_quota_off(sb, type);
2892 set_sbi_flag(F2FS_SB(sb),
2893 SBI_QUOTA_NEED_REPAIR);
2901 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2903 struct quota_info *dqopt = sb_dqopt(sbi->sb);
2904 struct address_space *mapping = dqopt->files[type]->i_mapping;
2907 ret = dquot_writeback_dquots(sbi->sb, type);
2911 ret = filemap_fdatawrite(mapping);
2915 /* if we are using journalled quota */
2916 if (is_journalled_quota(sbi))
2919 ret = filemap_fdatawait(mapping);
2921 truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2924 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2928 int f2fs_quota_sync(struct super_block *sb, int type)
2930 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2931 struct quota_info *dqopt = sb_dqopt(sb);
2936 * Now when everything is written we can discard the pagecache so
2937 * that userspace sees the changes.
2939 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2941 if (type != -1 && cnt != type)
2944 if (!sb_has_quota_active(sb, cnt))
2947 if (!f2fs_sb_has_quota_ino(sbi))
2948 inode_lock(dqopt->files[cnt]);
2953 * f2fs_down_read(quota_sem)
2954 * dquot_writeback_dquots()
2957 * f2fs_down_read(quota_sem)
2960 f2fs_down_read(&sbi->quota_sem);
2962 ret = f2fs_quota_sync_file(sbi, cnt);
2964 f2fs_up_read(&sbi->quota_sem);
2965 f2fs_unlock_op(sbi);
2967 if (!f2fs_sb_has_quota_ino(sbi))
2968 inode_unlock(dqopt->files[cnt]);
2976 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2977 const struct path *path)
2979 struct inode *inode;
2982 /* if quota sysfile exists, deny enabling quota with specific file */
2983 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2984 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2988 if (path->dentry->d_sb != sb)
2991 err = f2fs_quota_sync(sb, type);
2995 inode = d_inode(path->dentry);
2997 err = filemap_fdatawrite(inode->i_mapping);
3001 err = filemap_fdatawait(inode->i_mapping);
3005 err = dquot_quota_on(sb, type, format_id, path);
3010 F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
3011 f2fs_set_inode_flags(inode);
3012 inode_unlock(inode);
3013 f2fs_mark_inode_dirty_sync(inode, false);
3018 static int __f2fs_quota_off(struct super_block *sb, int type)
3020 struct inode *inode = sb_dqopt(sb)->files[type];
3023 if (!inode || !igrab(inode))
3024 return dquot_quota_off(sb, type);
3026 err = f2fs_quota_sync(sb, type);
3030 err = dquot_quota_off(sb, type);
3031 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
3035 F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
3036 f2fs_set_inode_flags(inode);
3037 inode_unlock(inode);
3038 f2fs_mark_inode_dirty_sync(inode, false);
3044 static int f2fs_quota_off(struct super_block *sb, int type)
3046 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3049 err = __f2fs_quota_off(sb, type);
3052 * quotactl can shutdown journalled quota, result in inconsistence
3053 * between quota record and fs data by following updates, tag the
3054 * flag to let fsck be aware of it.
3056 if (is_journalled_quota(sbi))
3057 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3061 void f2fs_quota_off_umount(struct super_block *sb)
3066 for (type = 0; type < MAXQUOTAS; type++) {
3067 err = __f2fs_quota_off(sb, type);
3069 int ret = dquot_quota_off(sb, type);
3071 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
3073 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3077 * In case of checkpoint=disable, we must flush quota blocks.
3078 * This can cause NULL exception for node_inode in end_io, since
3079 * put_super already dropped it.
3081 sync_filesystem(sb);
3084 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
3086 struct quota_info *dqopt = sb_dqopt(sb);
3089 for (type = 0; type < MAXQUOTAS; type++) {
3090 if (!dqopt->files[type])
3092 f2fs_inode_synced(dqopt->files[type]);
3096 static int f2fs_dquot_commit(struct dquot *dquot)
3098 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3101 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
3102 ret = dquot_commit(dquot);
3104 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3105 f2fs_up_read(&sbi->quota_sem);
3109 static int f2fs_dquot_acquire(struct dquot *dquot)
3111 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3114 f2fs_down_read(&sbi->quota_sem);
3115 ret = dquot_acquire(dquot);
3117 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3118 f2fs_up_read(&sbi->quota_sem);
3122 static int f2fs_dquot_release(struct dquot *dquot)
3124 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3125 int ret = dquot_release(dquot);
3128 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3132 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
3134 struct super_block *sb = dquot->dq_sb;
3135 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3136 int ret = dquot_mark_dquot_dirty(dquot);
3138 /* if we are using journalled quota */
3139 if (is_journalled_quota(sbi))
3140 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
3145 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
3147 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3148 int ret = dquot_commit_info(sb, type);
3151 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3155 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
3157 *projid = F2FS_I(inode)->i_projid;
3161 static const struct dquot_operations f2fs_quota_operations = {
3162 .get_reserved_space = f2fs_get_reserved_space,
3163 .write_dquot = f2fs_dquot_commit,
3164 .acquire_dquot = f2fs_dquot_acquire,
3165 .release_dquot = f2fs_dquot_release,
3166 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
3167 .write_info = f2fs_dquot_commit_info,
3168 .alloc_dquot = dquot_alloc,
3169 .destroy_dquot = dquot_destroy,
3170 .get_projid = f2fs_get_projid,
3171 .get_next_id = dquot_get_next_id,
3174 static const struct quotactl_ops f2fs_quotactl_ops = {
3175 .quota_on = f2fs_quota_on,
3176 .quota_off = f2fs_quota_off,
3177 .quota_sync = f2fs_quota_sync,
3178 .get_state = dquot_get_state,
3179 .set_info = dquot_set_dqinfo,
3180 .get_dqblk = dquot_get_dqblk,
3181 .set_dqblk = dquot_set_dqblk,
3182 .get_nextdqblk = dquot_get_next_dqblk,
3185 int f2fs_dquot_initialize(struct inode *inode)
3190 int f2fs_quota_sync(struct super_block *sb, int type)
3195 void f2fs_quota_off_umount(struct super_block *sb)
3200 static const struct super_operations f2fs_sops = {
3201 .alloc_inode = f2fs_alloc_inode,
3202 .free_inode = f2fs_free_inode,
3203 .drop_inode = f2fs_drop_inode,
3204 .write_inode = f2fs_write_inode,
3205 .dirty_inode = f2fs_dirty_inode,
3206 .show_options = f2fs_show_options,
3208 .quota_read = f2fs_quota_read,
3209 .quota_write = f2fs_quota_write,
3210 .get_dquots = f2fs_get_dquots,
3212 .evict_inode = f2fs_evict_inode,
3213 .put_super = f2fs_put_super,
3214 .sync_fs = f2fs_sync_fs,
3215 .freeze_fs = f2fs_freeze,
3216 .unfreeze_fs = f2fs_unfreeze,
3217 .statfs = f2fs_statfs,
3218 .remount_fs = f2fs_remount,
3221 #ifdef CONFIG_FS_ENCRYPTION
3222 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3224 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3225 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3229 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3232 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3235 * Encrypting the root directory is not allowed because fsck
3236 * expects lost+found directory to exist and remain unencrypted
3237 * if LOST_FOUND feature is enabled.
3240 if (f2fs_sb_has_lost_found(sbi) &&
3241 inode->i_ino == F2FS_ROOT_INO(sbi))
3244 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3245 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3246 ctx, len, fs_data, XATTR_CREATE);
3249 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3251 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3254 static bool f2fs_has_stable_inodes(struct super_block *sb)
3259 static struct block_device **f2fs_get_devices(struct super_block *sb,
3260 unsigned int *num_devs)
3262 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3263 struct block_device **devs;
3266 if (!f2fs_is_multi_device(sbi))
3269 devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
3271 return ERR_PTR(-ENOMEM);
3273 for (i = 0; i < sbi->s_ndevs; i++)
3274 devs[i] = FDEV(i).bdev;
3275 *num_devs = sbi->s_ndevs;
3279 static const struct fscrypt_operations f2fs_cryptops = {
3280 .needs_bounce_pages = 1,
3281 .has_32bit_inodes = 1,
3282 .supports_subblock_data_units = 1,
3283 .legacy_key_prefix = "f2fs:",
3284 .get_context = f2fs_get_context,
3285 .set_context = f2fs_set_context,
3286 .get_dummy_policy = f2fs_get_dummy_policy,
3287 .empty_dir = f2fs_empty_dir,
3288 .has_stable_inodes = f2fs_has_stable_inodes,
3289 .get_devices = f2fs_get_devices,
3293 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3294 u64 ino, u32 generation)
3296 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3297 struct inode *inode;
3299 if (f2fs_check_nid_range(sbi, ino))
3300 return ERR_PTR(-ESTALE);
3303 * f2fs_iget isn't quite right if the inode is currently unallocated!
3304 * However f2fs_iget currently does appropriate checks to handle stale
3305 * inodes so everything is OK.
3307 inode = f2fs_iget(sb, ino);
3309 return ERR_CAST(inode);
3310 if (unlikely(generation && inode->i_generation != generation)) {
3311 /* we didn't find the right inode.. */
3313 return ERR_PTR(-ESTALE);
3318 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3319 int fh_len, int fh_type)
3321 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3322 f2fs_nfs_get_inode);
3325 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3326 int fh_len, int fh_type)
3328 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3329 f2fs_nfs_get_inode);
3332 static const struct export_operations f2fs_export_ops = {
3333 .encode_fh = generic_encode_ino32_fh,
3334 .fh_to_dentry = f2fs_fh_to_dentry,
3335 .fh_to_parent = f2fs_fh_to_parent,
3336 .get_parent = f2fs_get_parent,
3339 loff_t max_file_blocks(struct inode *inode)
3345 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3346 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3347 * space in inode.i_addr, it will be more safe to reassign
3351 if (inode && f2fs_compressed_file(inode))
3352 leaf_count = ADDRS_PER_BLOCK(inode);
3354 leaf_count = DEF_ADDRS_PER_BLOCK;
3356 /* two direct node blocks */
3357 result += (leaf_count * 2);
3359 /* two indirect node blocks */
3360 leaf_count *= NIDS_PER_BLOCK;
3361 result += (leaf_count * 2);
3363 /* one double indirect node block */
3364 leaf_count *= NIDS_PER_BLOCK;
3365 result += leaf_count;
3370 static int __f2fs_commit_super(struct buffer_head *bh,
3371 struct f2fs_super_block *super)
3375 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3376 set_buffer_dirty(bh);
3379 /* it's rare case, we can do fua all the time */
3380 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3383 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3384 struct buffer_head *bh)
3386 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3387 (bh->b_data + F2FS_SUPER_OFFSET);
3388 struct super_block *sb = sbi->sb;
3389 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3390 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3391 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3392 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3393 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3394 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3395 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3396 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3397 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3398 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3399 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3400 u32 segment_count = le32_to_cpu(raw_super->segment_count);
3401 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3402 u64 main_end_blkaddr = main_blkaddr +
3403 (segment_count_main << log_blocks_per_seg);
3404 u64 seg_end_blkaddr = segment0_blkaddr +
3405 (segment_count << log_blocks_per_seg);
3407 if (segment0_blkaddr != cp_blkaddr) {
3408 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3409 segment0_blkaddr, cp_blkaddr);
3413 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3415 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3416 cp_blkaddr, sit_blkaddr,
3417 segment_count_ckpt << log_blocks_per_seg);
3421 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3423 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3424 sit_blkaddr, nat_blkaddr,
3425 segment_count_sit << log_blocks_per_seg);
3429 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3431 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3432 nat_blkaddr, ssa_blkaddr,
3433 segment_count_nat << log_blocks_per_seg);
3437 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3439 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3440 ssa_blkaddr, main_blkaddr,
3441 segment_count_ssa << log_blocks_per_seg);
3445 if (main_end_blkaddr > seg_end_blkaddr) {
3446 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3447 main_blkaddr, seg_end_blkaddr,
3448 segment_count_main << log_blocks_per_seg);
3450 } else if (main_end_blkaddr < seg_end_blkaddr) {
3454 /* fix in-memory information all the time */
3455 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3456 segment0_blkaddr) >> log_blocks_per_seg);
3458 if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
3459 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3462 err = __f2fs_commit_super(bh, NULL);
3463 res = err ? "failed" : "done";
3465 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3466 res, main_blkaddr, seg_end_blkaddr,
3467 segment_count_main << log_blocks_per_seg);
3474 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3475 struct buffer_head *bh)
3477 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3478 block_t total_sections, blocks_per_seg;
3479 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3480 (bh->b_data + F2FS_SUPER_OFFSET);
3481 size_t crc_offset = 0;
3484 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3485 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3486 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3490 /* Check checksum_offset and crc in superblock */
3491 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3492 crc_offset = le32_to_cpu(raw_super->checksum_offset);
3494 offsetof(struct f2fs_super_block, crc)) {
3495 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3497 return -EFSCORRUPTED;
3499 crc = le32_to_cpu(raw_super->crc);
3500 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3501 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3502 return -EFSCORRUPTED;
3506 /* Currently, support only 4KB block size */
3507 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3508 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3509 le32_to_cpu(raw_super->log_blocksize),
3511 return -EFSCORRUPTED;
3514 /* check log blocks per segment */
3515 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3516 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3517 le32_to_cpu(raw_super->log_blocks_per_seg));
3518 return -EFSCORRUPTED;
3521 /* Currently, support 512/1024/2048/4096/16K bytes sector size */
3522 if (le32_to_cpu(raw_super->log_sectorsize) >
3523 F2FS_MAX_LOG_SECTOR_SIZE ||
3524 le32_to_cpu(raw_super->log_sectorsize) <
3525 F2FS_MIN_LOG_SECTOR_SIZE) {
3526 f2fs_info(sbi, "Invalid log sectorsize (%u)",
3527 le32_to_cpu(raw_super->log_sectorsize));
3528 return -EFSCORRUPTED;
3530 if (le32_to_cpu(raw_super->log_sectors_per_block) +
3531 le32_to_cpu(raw_super->log_sectorsize) !=
3532 F2FS_MAX_LOG_SECTOR_SIZE) {
3533 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3534 le32_to_cpu(raw_super->log_sectors_per_block),
3535 le32_to_cpu(raw_super->log_sectorsize));
3536 return -EFSCORRUPTED;
3539 segment_count = le32_to_cpu(raw_super->segment_count);
3540 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3541 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3542 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3543 total_sections = le32_to_cpu(raw_super->section_count);
3545 /* blocks_per_seg should be 512, given the above check */
3546 blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
3548 if (segment_count > F2FS_MAX_SEGMENT ||
3549 segment_count < F2FS_MIN_SEGMENTS) {
3550 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3551 return -EFSCORRUPTED;
3554 if (total_sections > segment_count_main || total_sections < 1 ||
3555 segs_per_sec > segment_count || !segs_per_sec) {
3556 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3557 segment_count, total_sections, segs_per_sec);
3558 return -EFSCORRUPTED;
3561 if (segment_count_main != total_sections * segs_per_sec) {
3562 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3563 segment_count_main, total_sections, segs_per_sec);
3564 return -EFSCORRUPTED;
3567 if ((segment_count / segs_per_sec) < total_sections) {
3568 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3569 segment_count, segs_per_sec, total_sections);
3570 return -EFSCORRUPTED;
3573 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3574 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3575 segment_count, le64_to_cpu(raw_super->block_count));
3576 return -EFSCORRUPTED;
3579 if (RDEV(0).path[0]) {
3580 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3583 while (i < MAX_DEVICES && RDEV(i).path[0]) {
3584 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3587 if (segment_count != dev_seg_count) {
3588 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3589 segment_count, dev_seg_count);
3590 return -EFSCORRUPTED;
3593 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3594 !bdev_is_zoned(sbi->sb->s_bdev)) {
3595 f2fs_info(sbi, "Zoned block device path is missing");
3596 return -EFSCORRUPTED;
3600 if (secs_per_zone > total_sections || !secs_per_zone) {
3601 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3602 secs_per_zone, total_sections);
3603 return -EFSCORRUPTED;
3605 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3606 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3607 (le32_to_cpu(raw_super->extension_count) +
3608 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3609 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3610 le32_to_cpu(raw_super->extension_count),
3611 raw_super->hot_ext_count,
3612 F2FS_MAX_EXTENSION);
3613 return -EFSCORRUPTED;
3616 if (le32_to_cpu(raw_super->cp_payload) >=
3617 (blocks_per_seg - F2FS_CP_PACKS -
3618 NR_CURSEG_PERSIST_TYPE)) {
3619 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3620 le32_to_cpu(raw_super->cp_payload),
3621 blocks_per_seg - F2FS_CP_PACKS -
3622 NR_CURSEG_PERSIST_TYPE);
3623 return -EFSCORRUPTED;
3626 /* check reserved ino info */
3627 if (le32_to_cpu(raw_super->node_ino) != 1 ||
3628 le32_to_cpu(raw_super->meta_ino) != 2 ||
3629 le32_to_cpu(raw_super->root_ino) != 3) {
3630 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3631 le32_to_cpu(raw_super->node_ino),
3632 le32_to_cpu(raw_super->meta_ino),
3633 le32_to_cpu(raw_super->root_ino));
3634 return -EFSCORRUPTED;
3637 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3638 if (sanity_check_area_boundary(sbi, bh))
3639 return -EFSCORRUPTED;
3644 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3646 unsigned int total, fsmeta;
3647 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3648 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3649 unsigned int ovp_segments, reserved_segments;
3650 unsigned int main_segs, blocks_per_seg;
3651 unsigned int sit_segs, nat_segs;
3652 unsigned int sit_bitmap_size, nat_bitmap_size;
3653 unsigned int log_blocks_per_seg;
3654 unsigned int segment_count_main;
3655 unsigned int cp_pack_start_sum, cp_payload;
3656 block_t user_block_count, valid_user_blocks;
3657 block_t avail_node_count, valid_node_count;
3658 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3661 total = le32_to_cpu(raw_super->segment_count);
3662 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3663 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3665 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3667 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3668 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3670 if (unlikely(fsmeta >= total))
3673 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3674 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3676 if (!f2fs_sb_has_readonly(sbi) &&
3677 unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3678 ovp_segments == 0 || reserved_segments == 0)) {
3679 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3682 user_block_count = le64_to_cpu(ckpt->user_block_count);
3683 segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3684 (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3685 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3686 if (!user_block_count || user_block_count >=
3687 segment_count_main << log_blocks_per_seg) {
3688 f2fs_err(sbi, "Wrong user_block_count: %u",
3693 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3694 if (valid_user_blocks > user_block_count) {
3695 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3696 valid_user_blocks, user_block_count);
3700 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3701 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3702 if (valid_node_count > avail_node_count) {
3703 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3704 valid_node_count, avail_node_count);
3708 main_segs = le32_to_cpu(raw_super->segment_count_main);
3709 blocks_per_seg = sbi->blocks_per_seg;
3711 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3712 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3713 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3716 if (f2fs_sb_has_readonly(sbi))
3719 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3720 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3721 le32_to_cpu(ckpt->cur_node_segno[j])) {
3722 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3724 le32_to_cpu(ckpt->cur_node_segno[i]));
3730 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3731 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3732 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3735 if (f2fs_sb_has_readonly(sbi))
3738 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3739 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3740 le32_to_cpu(ckpt->cur_data_segno[j])) {
3741 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3743 le32_to_cpu(ckpt->cur_data_segno[i]));
3748 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3749 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3750 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3751 le32_to_cpu(ckpt->cur_data_segno[j])) {
3752 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3754 le32_to_cpu(ckpt->cur_node_segno[i]));
3760 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3761 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3763 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3764 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3765 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3766 sit_bitmap_size, nat_bitmap_size);
3770 cp_pack_start_sum = __start_sum_addr(sbi);
3771 cp_payload = __cp_payload(sbi);
3772 if (cp_pack_start_sum < cp_payload + 1 ||
3773 cp_pack_start_sum > blocks_per_seg - 1 -
3774 NR_CURSEG_PERSIST_TYPE) {
3775 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3780 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3781 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3782 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3783 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3784 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3785 le32_to_cpu(ckpt->checksum_offset));
3789 nat_blocks = nat_segs << log_blocks_per_seg;
3790 nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3791 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3792 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3793 (cp_payload + F2FS_CP_PACKS +
3794 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3795 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3796 cp_payload, nat_bits_blocks);
3800 if (unlikely(f2fs_cp_error(sbi))) {
3801 f2fs_err(sbi, "A bug case: need to run fsck");
3807 static void init_sb_info(struct f2fs_sb_info *sbi)
3809 struct f2fs_super_block *raw_super = sbi->raw_super;
3812 sbi->log_sectors_per_block =
3813 le32_to_cpu(raw_super->log_sectors_per_block);
3814 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3815 sbi->blocksize = BIT(sbi->log_blocksize);
3816 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3817 sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
3818 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3819 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3820 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3821 sbi->total_node_count =
3822 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3823 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3824 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3825 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3826 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3827 sbi->cur_victim_sec = NULL_SECNO;
3828 sbi->gc_mode = GC_NORMAL;
3829 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3830 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3831 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3832 sbi->migration_granularity = sbi->segs_per_sec;
3833 sbi->seq_file_ra_mul = MIN_RA_MUL;
3834 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3835 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3836 spin_lock_init(&sbi->gc_remaining_trials_lock);
3837 atomic64_set(&sbi->current_atomic_write, 0);
3839 sbi->dir_level = DEF_DIR_LEVEL;
3840 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3841 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3842 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3843 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3844 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3845 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3846 DEF_UMOUNT_DISCARD_TIMEOUT;
3847 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3849 for (i = 0; i < NR_COUNT_TYPE; i++)
3850 atomic_set(&sbi->nr_pages[i], 0);
3852 for (i = 0; i < META; i++)
3853 atomic_set(&sbi->wb_sync_req[i], 0);
3855 INIT_LIST_HEAD(&sbi->s_list);
3856 mutex_init(&sbi->umount_mutex);
3857 init_f2fs_rwsem(&sbi->io_order_lock);
3858 spin_lock_init(&sbi->cp_lock);
3860 sbi->dirty_device = 0;
3861 spin_lock_init(&sbi->dev_lock);
3863 init_f2fs_rwsem(&sbi->sb_lock);
3864 init_f2fs_rwsem(&sbi->pin_sem);
3867 static int init_percpu_info(struct f2fs_sb_info *sbi)
3871 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3875 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
3877 goto err_valid_block;
3879 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3882 goto err_node_block;
3886 percpu_counter_destroy(&sbi->rf_node_block_count);
3888 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3892 #ifdef CONFIG_BLK_DEV_ZONED
3894 struct f2fs_report_zones_args {
3895 struct f2fs_sb_info *sbi;
3896 struct f2fs_dev_info *dev;
3899 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3902 struct f2fs_report_zones_args *rz_args = data;
3903 block_t unusable_blocks = (zone->len - zone->capacity) >>
3904 F2FS_LOG_SECTORS_PER_BLOCK;
3906 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3909 set_bit(idx, rz_args->dev->blkz_seq);
3910 if (!rz_args->sbi->unusable_blocks_per_sec) {
3911 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
3914 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
3915 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
3921 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3923 struct block_device *bdev = FDEV(devi).bdev;
3924 sector_t nr_sectors = bdev_nr_sectors(bdev);
3925 struct f2fs_report_zones_args rep_zone_arg;
3929 if (!f2fs_sb_has_blkzoned(sbi))
3932 zone_sectors = bdev_zone_sectors(bdev);
3933 if (!is_power_of_2(zone_sectors)) {
3934 f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
3938 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3939 SECTOR_TO_BLOCK(zone_sectors))
3941 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
3942 FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
3943 sbi->blocks_per_blkz);
3944 if (nr_sectors & (zone_sectors - 1))
3945 FDEV(devi).nr_blkz++;
3947 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3948 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3949 * sizeof(unsigned long),
3951 if (!FDEV(devi).blkz_seq)
3954 rep_zone_arg.sbi = sbi;
3955 rep_zone_arg.dev = &FDEV(devi);
3957 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3966 * Read f2fs raw super block.
3967 * Because we have two copies of super block, so read both of them
3968 * to get the first valid one. If any one of them is broken, we pass
3969 * them recovery flag back to the caller.
3971 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3972 struct f2fs_super_block **raw_super,
3973 int *valid_super_block, int *recovery)
3975 struct super_block *sb = sbi->sb;
3977 struct buffer_head *bh;
3978 struct f2fs_super_block *super;
3981 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3985 for (block = 0; block < 2; block++) {
3986 bh = sb_bread(sb, block);
3988 f2fs_err(sbi, "Unable to read %dth superblock",
3995 /* sanity checking of raw super */
3996 err = sanity_check_raw_super(sbi, bh);
3998 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
4006 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
4008 *valid_super_block = block;
4014 /* No valid superblock */
4023 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
4025 struct buffer_head *bh;
4029 if ((recover && f2fs_readonly(sbi->sb)) ||
4030 f2fs_hw_is_readonly(sbi)) {
4031 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
4035 /* we should update superblock crc here */
4036 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
4037 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
4038 offsetof(struct f2fs_super_block, crc));
4039 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
4042 /* write back-up superblock first */
4043 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
4046 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
4049 /* if we are in recovery path, skip writing valid superblock */
4053 /* write current valid superblock */
4054 bh = sb_bread(sbi->sb, sbi->valid_super_block);
4057 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
4062 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
4064 unsigned long flags;
4066 spin_lock_irqsave(&sbi->error_lock, flags);
4067 if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
4068 sbi->stop_reason[reason]++;
4069 spin_unlock_irqrestore(&sbi->error_lock, flags);
4072 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
4074 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4075 unsigned long flags;
4078 f2fs_down_write(&sbi->sb_lock);
4080 spin_lock_irqsave(&sbi->error_lock, flags);
4081 if (sbi->error_dirty) {
4082 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4084 sbi->error_dirty = false;
4086 memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
4087 spin_unlock_irqrestore(&sbi->error_lock, flags);
4089 err = f2fs_commit_super(sbi, false);
4091 f2fs_up_write(&sbi->sb_lock);
4093 f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err);
4096 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
4098 unsigned long flags;
4100 spin_lock_irqsave(&sbi->error_lock, flags);
4101 if (!test_bit(flag, (unsigned long *)sbi->errors)) {
4102 set_bit(flag, (unsigned long *)sbi->errors);
4103 sbi->error_dirty = true;
4105 spin_unlock_irqrestore(&sbi->error_lock, flags);
4108 static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
4110 unsigned long flags;
4111 bool need_update = false;
4113 spin_lock_irqsave(&sbi->error_lock, flags);
4114 if (sbi->error_dirty) {
4115 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4117 sbi->error_dirty = false;
4120 spin_unlock_irqrestore(&sbi->error_lock, flags);
4125 static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
4129 f2fs_down_write(&sbi->sb_lock);
4131 if (!f2fs_update_errors(sbi))
4134 err = f2fs_commit_super(sbi, false);
4136 f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d",
4139 f2fs_up_write(&sbi->sb_lock);
4142 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
4144 f2fs_save_errors(sbi, error);
4145 f2fs_record_errors(sbi, error);
4148 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error)
4150 f2fs_save_errors(sbi, error);
4152 if (!sbi->error_dirty)
4154 if (!test_bit(error, (unsigned long *)sbi->errors))
4156 schedule_work(&sbi->s_error_work);
4159 static bool system_going_down(void)
4161 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
4162 || system_state == SYSTEM_RESTART;
4165 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
4168 struct super_block *sb = sbi->sb;
4169 bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
4170 bool continue_fs = !shutdown &&
4171 F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
4173 set_ckpt_flags(sbi, CP_ERROR_FLAG);
4175 if (!f2fs_hw_is_readonly(sbi)) {
4176 save_stop_reason(sbi, reason);
4178 if (irq_context && !shutdown)
4179 schedule_work(&sbi->s_error_work);
4181 f2fs_record_stop_reason(sbi);
4185 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
4186 * could panic during 'reboot -f' as the underlying device got already
4189 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
4190 !shutdown && !system_going_down() &&
4191 !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
4192 panic("F2FS-fs (device %s): panic forced after error\n",
4196 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
4198 /* continue filesystem operators if errors=continue */
4199 if (continue_fs || f2fs_readonly(sb))
4202 f2fs_warn(sbi, "Remounting filesystem read-only");
4204 * Make sure updated value of ->s_mount_flags will be visible before
4208 sb->s_flags |= SB_RDONLY;
4211 static void f2fs_record_error_work(struct work_struct *work)
4213 struct f2fs_sb_info *sbi = container_of(work,
4214 struct f2fs_sb_info, s_error_work);
4216 f2fs_record_stop_reason(sbi);
4219 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
4221 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4222 unsigned int max_devices = MAX_DEVICES;
4223 unsigned int logical_blksize;
4224 blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
4227 /* Initialize single device information */
4228 if (!RDEV(0).path[0]) {
4229 if (!bdev_is_zoned(sbi->sb->s_bdev))
4235 * Initialize multiple devices information, or single
4236 * zoned block device information.
4238 sbi->devs = f2fs_kzalloc(sbi,
4239 array_size(max_devices,
4240 sizeof(struct f2fs_dev_info)),
4245 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
4246 sbi->aligned_blksize = true;
4248 for (i = 0; i < max_devices; i++) {
4250 FDEV(0).bdev_handle = sbi->sb->s_bdev_handle;
4251 else if (!RDEV(i).path[0])
4254 if (max_devices > 1) {
4255 /* Multi-device mount */
4256 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
4257 FDEV(i).total_segments =
4258 le32_to_cpu(RDEV(i).total_segments);
4260 FDEV(i).start_blk = 0;
4261 FDEV(i).end_blk = FDEV(i).start_blk +
4262 (FDEV(i).total_segments <<
4263 sbi->log_blocks_per_seg) - 1 +
4264 le32_to_cpu(raw_super->segment0_blkaddr);
4266 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
4267 FDEV(i).end_blk = FDEV(i).start_blk +
4268 (FDEV(i).total_segments <<
4269 sbi->log_blocks_per_seg) - 1;
4270 FDEV(i).bdev_handle = bdev_open_by_path(
4271 FDEV(i).path, mode, sbi->sb, NULL);
4274 if (IS_ERR(FDEV(i).bdev_handle))
4275 return PTR_ERR(FDEV(i).bdev_handle);
4277 FDEV(i).bdev = FDEV(i).bdev_handle->bdev;
4278 /* to release errored devices */
4279 sbi->s_ndevs = i + 1;
4281 if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
4282 sbi->aligned_blksize = false;
4284 #ifdef CONFIG_BLK_DEV_ZONED
4285 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
4286 !f2fs_sb_has_blkzoned(sbi)) {
4287 f2fs_err(sbi, "Zoned block device feature not enabled");
4290 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
4291 if (init_blkz_info(sbi, i)) {
4292 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
4295 if (max_devices == 1)
4297 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
4299 FDEV(i).total_segments,
4300 FDEV(i).start_blk, FDEV(i).end_blk,
4301 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
4302 "Host-aware" : "Host-managed");
4306 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4308 FDEV(i).total_segments,
4309 FDEV(i).start_blk, FDEV(i).end_blk);
4312 "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
4316 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
4318 #if IS_ENABLED(CONFIG_UNICODE)
4319 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
4320 const struct f2fs_sb_encodings *encoding_info;
4321 struct unicode_map *encoding;
4322 __u16 encoding_flags;
4324 encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
4325 if (!encoding_info) {
4327 "Encoding requested by superblock is unknown");
4331 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
4332 encoding = utf8_load(encoding_info->version);
4333 if (IS_ERR(encoding)) {
4335 "can't mount with superblock charset: %s-%u.%u.%u "
4336 "not supported by the kernel. flags: 0x%x.",
4337 encoding_info->name,
4338 unicode_major(encoding_info->version),
4339 unicode_minor(encoding_info->version),
4340 unicode_rev(encoding_info->version),
4342 return PTR_ERR(encoding);
4344 f2fs_info(sbi, "Using encoding defined by superblock: "
4345 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4346 unicode_major(encoding_info->version),
4347 unicode_minor(encoding_info->version),
4348 unicode_rev(encoding_info->version),
4351 sbi->sb->s_encoding = encoding;
4352 sbi->sb->s_encoding_flags = encoding_flags;
4355 if (f2fs_sb_has_casefold(sbi)) {
4356 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4363 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
4365 /* adjust parameters according to the volume size */
4366 if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
4367 if (f2fs_block_unit_discard(sbi))
4368 SM_I(sbi)->dcc_info->discard_granularity =
4369 MIN_DISCARD_GRANULARITY;
4370 if (!f2fs_lfs_mode(sbi))
4371 SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
4372 BIT(F2FS_IPU_HONOR_OPU_WRITE);
4375 sbi->readdir_ra = true;
4378 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
4380 struct f2fs_sb_info *sbi;
4381 struct f2fs_super_block *raw_super;
4384 bool skip_recovery = false, need_fsck = false;
4385 char *options = NULL;
4386 int recovery, i, valid_super_block;
4387 struct curseg_info *seg_i;
4390 bool quota_enabled = false;
4396 valid_super_block = -1;
4399 /* allocate memory for f2fs-specific super block info */
4400 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4406 /* initialize locks within allocated memory */
4407 init_f2fs_rwsem(&sbi->gc_lock);
4408 mutex_init(&sbi->writepages);
4409 init_f2fs_rwsem(&sbi->cp_global_sem);
4410 init_f2fs_rwsem(&sbi->node_write);
4411 init_f2fs_rwsem(&sbi->node_change);
4412 spin_lock_init(&sbi->stat_lock);
4413 init_f2fs_rwsem(&sbi->cp_rwsem);
4414 init_f2fs_rwsem(&sbi->quota_sem);
4415 init_waitqueue_head(&sbi->cp_wait);
4416 spin_lock_init(&sbi->error_lock);
4418 for (i = 0; i < NR_INODE_TYPE; i++) {
4419 INIT_LIST_HEAD(&sbi->inode_list[i]);
4420 spin_lock_init(&sbi->inode_lock[i]);
4422 mutex_init(&sbi->flush_lock);
4424 /* Load the checksum driver */
4425 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
4426 if (IS_ERR(sbi->s_chksum_driver)) {
4427 f2fs_err(sbi, "Cannot load crc32 driver.");
4428 err = PTR_ERR(sbi->s_chksum_driver);
4429 sbi->s_chksum_driver = NULL;
4433 /* set a block size */
4434 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4435 f2fs_err(sbi, "unable to set blocksize");
4439 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4444 sb->s_fs_info = sbi;
4445 sbi->raw_super = raw_super;
4447 INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
4448 memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
4449 memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
4451 /* precompute checksum seed for metadata */
4452 if (f2fs_sb_has_inode_chksum(sbi))
4453 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
4454 sizeof(raw_super->uuid));
4456 default_options(sbi, false);
4457 /* parse mount options */
4458 options = kstrdup((const char *)data, GFP_KERNEL);
4459 if (data && !options) {
4464 err = parse_options(sb, options, false);
4468 sb->s_maxbytes = max_file_blocks(NULL) <<
4469 le32_to_cpu(raw_super->log_blocksize);
4470 sb->s_max_links = F2FS_LINK_MAX;
4472 err = f2fs_setup_casefold(sbi);
4477 sb->dq_op = &f2fs_quota_operations;
4478 sb->s_qcop = &f2fs_quotactl_ops;
4479 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4481 if (f2fs_sb_has_quota_ino(sbi)) {
4482 for (i = 0; i < MAXQUOTAS; i++) {
4483 if (f2fs_qf_ino(sbi->sb, i))
4484 sbi->nquota_files++;
4489 sb->s_op = &f2fs_sops;
4490 #ifdef CONFIG_FS_ENCRYPTION
4491 sb->s_cop = &f2fs_cryptops;
4493 #ifdef CONFIG_FS_VERITY
4494 sb->s_vop = &f2fs_verityops;
4496 sb->s_xattr = f2fs_xattr_handlers;
4497 sb->s_export_op = &f2fs_export_ops;
4498 sb->s_magic = F2FS_SUPER_MAGIC;
4499 sb->s_time_gran = 1;
4500 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4501 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4502 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4503 sb->s_iflags |= SB_I_CGROUPWB;
4505 /* init f2fs-specific super block info */
4506 sbi->valid_super_block = valid_super_block;
4508 /* disallow all the data/node/meta page writes */
4509 set_sbi_flag(sbi, SBI_POR_DOING);
4511 err = f2fs_init_write_merge_io(sbi);
4517 err = f2fs_init_iostat(sbi);
4521 err = init_percpu_info(sbi);
4525 if (F2FS_IO_ALIGNED(sbi)) {
4526 sbi->write_io_dummy =
4527 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4528 if (!sbi->write_io_dummy) {
4534 /* init per sbi slab cache */
4535 err = f2fs_init_xattr_caches(sbi);
4538 err = f2fs_init_page_array_cache(sbi);
4540 goto free_xattr_cache;
4542 /* get an inode for meta space */
4543 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4544 if (IS_ERR(sbi->meta_inode)) {
4545 f2fs_err(sbi, "Failed to read F2FS meta data inode");
4546 err = PTR_ERR(sbi->meta_inode);
4547 goto free_page_array_cache;
4550 err = f2fs_get_valid_checkpoint(sbi);
4552 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4553 goto free_meta_inode;
4556 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4557 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4558 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4559 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4560 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4563 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4564 set_sbi_flag(sbi, SBI_NEED_FSCK);
4566 /* Initialize device list */
4567 err = f2fs_scan_devices(sbi);
4569 f2fs_err(sbi, "Failed to find devices");
4573 err = f2fs_init_post_read_wq(sbi);
4575 f2fs_err(sbi, "Failed to initialize post read workqueue");
4579 sbi->total_valid_node_count =
4580 le32_to_cpu(sbi->ckpt->valid_node_count);
4581 percpu_counter_set(&sbi->total_valid_inode_count,
4582 le32_to_cpu(sbi->ckpt->valid_inode_count));
4583 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4584 sbi->total_valid_block_count =
4585 le64_to_cpu(sbi->ckpt->valid_block_count);
4586 sbi->last_valid_block_count = sbi->total_valid_block_count;
4587 sbi->reserved_blocks = 0;
4588 sbi->current_reserved_blocks = 0;
4589 limit_reserve_root(sbi);
4590 adjust_unusable_cap_perc(sbi);
4592 f2fs_init_extent_cache_info(sbi);
4594 f2fs_init_ino_entry_info(sbi);
4596 f2fs_init_fsync_node_info(sbi);
4598 /* setup checkpoint request control and start checkpoint issue thread */
4599 f2fs_init_ckpt_req_control(sbi);
4600 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4601 test_opt(sbi, MERGE_CHECKPOINT)) {
4602 err = f2fs_start_ckpt_thread(sbi);
4605 "Failed to start F2FS issue_checkpoint_thread (%d)",
4607 goto stop_ckpt_thread;
4611 /* setup f2fs internal modules */
4612 err = f2fs_build_segment_manager(sbi);
4614 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4618 err = f2fs_build_node_manager(sbi);
4620 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4625 err = adjust_reserved_segment(sbi);
4629 /* For write statistics */
4630 sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4632 /* Read accumulated write IO statistics if exists */
4633 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4634 if (__exist_node_summaries(sbi))
4635 sbi->kbytes_written =
4636 le64_to_cpu(seg_i->journal->info.kbytes_written);
4638 f2fs_build_gc_manager(sbi);
4640 err = f2fs_build_stats(sbi);
4644 /* get an inode for node space */
4645 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4646 if (IS_ERR(sbi->node_inode)) {
4647 f2fs_err(sbi, "Failed to read node inode");
4648 err = PTR_ERR(sbi->node_inode);
4652 /* read root inode and dentry */
4653 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4655 f2fs_err(sbi, "Failed to read root inode");
4656 err = PTR_ERR(root);
4657 goto free_node_inode;
4659 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4660 !root->i_size || !root->i_nlink) {
4663 goto free_node_inode;
4666 sb->s_root = d_make_root(root); /* allocate root dentry */
4669 goto free_node_inode;
4672 err = f2fs_init_compress_inode(sbi);
4674 goto free_root_inode;
4676 err = f2fs_register_sysfs(sbi);
4678 goto free_compress_inode;
4681 /* Enable quota usage during mount */
4682 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4683 err = f2fs_enable_quotas(sb);
4685 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4688 quota_enabled = f2fs_recover_quota_begin(sbi);
4690 /* if there are any orphan inodes, free them */
4691 err = f2fs_recover_orphan_inodes(sbi);
4695 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4696 goto reset_checkpoint;
4698 /* recover fsynced data */
4699 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4700 !test_opt(sbi, NORECOVERY)) {
4702 * mount should be failed, when device has readonly mode, and
4703 * previous checkpoint was not done by clean system shutdown.
4705 if (f2fs_hw_is_readonly(sbi)) {
4706 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4707 err = f2fs_recover_fsync_data(sbi, true);
4710 f2fs_err(sbi, "Need to recover fsync data, but "
4711 "write access unavailable, please try "
4712 "mount w/ disable_roll_forward or norecovery");
4717 f2fs_info(sbi, "write access unavailable, skipping recovery");
4718 goto reset_checkpoint;
4722 set_sbi_flag(sbi, SBI_NEED_FSCK);
4725 goto reset_checkpoint;
4727 err = f2fs_recover_fsync_data(sbi, false);
4730 skip_recovery = true;
4732 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4737 err = f2fs_recover_fsync_data(sbi, true);
4739 if (!f2fs_readonly(sb) && err > 0) {
4741 f2fs_err(sbi, "Need to recover fsync data");
4747 f2fs_recover_quota_end(sbi, quota_enabled);
4751 * If the f2fs is not readonly and fsync data recovery succeeds,
4752 * check zoned block devices' write pointer consistency.
4754 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4755 err = f2fs_check_write_pointer(sbi);
4761 f2fs_init_inmem_curseg(sbi);
4763 /* f2fs_recover_fsync_data() cleared this already */
4764 clear_sbi_flag(sbi, SBI_POR_DOING);
4766 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4767 err = f2fs_disable_checkpoint(sbi);
4769 goto sync_free_meta;
4770 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4771 f2fs_enable_checkpoint(sbi);
4775 * If filesystem is not mounted as read-only then
4776 * do start the gc_thread.
4778 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4779 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4780 /* After POR, we can run background GC thread.*/
4781 err = f2fs_start_gc_thread(sbi);
4783 goto sync_free_meta;
4787 /* recover broken superblock */
4789 err = f2fs_commit_super(sbi, true);
4790 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4791 sbi->valid_super_block ? 1 : 2, err);
4794 f2fs_join_shrinker(sbi);
4796 f2fs_tuning_parameters(sbi);
4798 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4799 cur_cp_version(F2FS_CKPT(sbi)));
4800 f2fs_update_time(sbi, CP_TIME);
4801 f2fs_update_time(sbi, REQ_TIME);
4802 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4806 /* safe to flush all the data */
4807 sync_filesystem(sbi->sb);
4812 f2fs_truncate_quota_inode_pages(sb);
4813 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4814 f2fs_quota_off_umount(sbi->sb);
4817 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4818 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4819 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4820 * falls into an infinite loop in f2fs_sync_meta_pages().
4822 truncate_inode_pages_final(META_MAPPING(sbi));
4823 /* evict some inodes being cached by GC */
4825 f2fs_unregister_sysfs(sbi);
4826 free_compress_inode:
4827 f2fs_destroy_compress_inode(sbi);
4832 f2fs_release_ino_entry(sbi, true);
4833 truncate_inode_pages_final(NODE_MAPPING(sbi));
4834 iput(sbi->node_inode);
4835 sbi->node_inode = NULL;
4837 f2fs_destroy_stats(sbi);
4839 /* stop discard thread before destroying node manager */
4840 f2fs_stop_discard_thread(sbi);
4841 f2fs_destroy_node_manager(sbi);
4843 f2fs_destroy_segment_manager(sbi);
4845 f2fs_stop_ckpt_thread(sbi);
4846 /* flush s_error_work before sbi destroy */
4847 flush_work(&sbi->s_error_work);
4848 f2fs_destroy_post_read_wq(sbi);
4850 destroy_device_list(sbi);
4853 make_bad_inode(sbi->meta_inode);
4854 iput(sbi->meta_inode);
4855 sbi->meta_inode = NULL;
4856 free_page_array_cache:
4857 f2fs_destroy_page_array_cache(sbi);
4859 f2fs_destroy_xattr_caches(sbi);
4861 mempool_destroy(sbi->write_io_dummy);
4863 destroy_percpu_info(sbi);
4865 f2fs_destroy_iostat(sbi);
4867 for (i = 0; i < NR_PAGE_TYPE; i++)
4868 kvfree(sbi->write_io[i]);
4870 #if IS_ENABLED(CONFIG_UNICODE)
4871 utf8_unload(sb->s_encoding);
4872 sb->s_encoding = NULL;
4876 for (i = 0; i < MAXQUOTAS; i++)
4877 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4879 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4884 if (sbi->s_chksum_driver)
4885 crypto_free_shash(sbi->s_chksum_driver);
4888 /* give only one another chance */
4889 if (retry_cnt > 0 && skip_recovery) {
4891 shrink_dcache_sb(sb);
4897 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4898 const char *dev_name, void *data)
4900 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4903 static void kill_f2fs_super(struct super_block *sb)
4906 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4908 set_sbi_flag(sbi, SBI_IS_CLOSE);
4909 f2fs_stop_gc_thread(sbi);
4910 f2fs_stop_discard_thread(sbi);
4912 #ifdef CONFIG_F2FS_FS_COMPRESSION
4914 * latter evict_inode() can bypass checking and invalidating
4915 * compress inode cache.
4917 if (test_opt(sbi, COMPRESS_CACHE))
4918 truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4921 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4922 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4923 struct cp_control cpc = {
4924 .reason = CP_UMOUNT,
4926 stat_inc_cp_call_count(sbi, TOTAL_CALL);
4927 f2fs_write_checkpoint(sbi, &cpc);
4930 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4931 sb->s_flags &= ~SB_RDONLY;
4933 kill_block_super(sb);
4936 static struct file_system_type f2fs_fs_type = {
4937 .owner = THIS_MODULE,
4939 .mount = f2fs_mount,
4940 .kill_sb = kill_f2fs_super,
4941 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
4943 MODULE_ALIAS_FS("f2fs");
4945 static int __init init_inodecache(void)
4947 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4948 sizeof(struct f2fs_inode_info), 0,
4949 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4950 return f2fs_inode_cachep ? 0 : -ENOMEM;
4953 static void destroy_inodecache(void)
4956 * Make sure all delayed rcu free inodes are flushed before we
4960 kmem_cache_destroy(f2fs_inode_cachep);
4963 static int __init init_f2fs_fs(void)
4967 if (PAGE_SIZE != F2FS_BLKSIZE) {
4968 printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
4969 PAGE_SIZE, F2FS_BLKSIZE);
4973 err = init_inodecache();
4976 err = f2fs_create_node_manager_caches();
4978 goto free_inodecache;
4979 err = f2fs_create_segment_manager_caches();
4981 goto free_node_manager_caches;
4982 err = f2fs_create_checkpoint_caches();
4984 goto free_segment_manager_caches;
4985 err = f2fs_create_recovery_cache();
4987 goto free_checkpoint_caches;
4988 err = f2fs_create_extent_cache();
4990 goto free_recovery_cache;
4991 err = f2fs_create_garbage_collection_cache();
4993 goto free_extent_cache;
4994 err = f2fs_init_sysfs();
4996 goto free_garbage_collection_cache;
4997 err = f2fs_init_shrinker();
5000 err = register_filesystem(&f2fs_fs_type);
5003 f2fs_create_root_stats();
5004 err = f2fs_init_post_read_processing();
5006 goto free_root_stats;
5007 err = f2fs_init_iostat_processing();
5009 goto free_post_read;
5010 err = f2fs_init_bio_entry_cache();
5013 err = f2fs_init_bioset();
5015 goto free_bio_entry_cache;
5016 err = f2fs_init_compress_mempool();
5019 err = f2fs_init_compress_cache();
5021 goto free_compress_mempool;
5022 err = f2fs_create_casefold_cache();
5024 goto free_compress_cache;
5026 free_compress_cache:
5027 f2fs_destroy_compress_cache();
5028 free_compress_mempool:
5029 f2fs_destroy_compress_mempool();
5031 f2fs_destroy_bioset();
5032 free_bio_entry_cache:
5033 f2fs_destroy_bio_entry_cache();
5035 f2fs_destroy_iostat_processing();
5037 f2fs_destroy_post_read_processing();
5039 f2fs_destroy_root_stats();
5040 unregister_filesystem(&f2fs_fs_type);
5042 f2fs_exit_shrinker();
5045 free_garbage_collection_cache:
5046 f2fs_destroy_garbage_collection_cache();
5048 f2fs_destroy_extent_cache();
5049 free_recovery_cache:
5050 f2fs_destroy_recovery_cache();
5051 free_checkpoint_caches:
5052 f2fs_destroy_checkpoint_caches();
5053 free_segment_manager_caches:
5054 f2fs_destroy_segment_manager_caches();
5055 free_node_manager_caches:
5056 f2fs_destroy_node_manager_caches();
5058 destroy_inodecache();
5063 static void __exit exit_f2fs_fs(void)
5065 f2fs_destroy_casefold_cache();
5066 f2fs_destroy_compress_cache();
5067 f2fs_destroy_compress_mempool();
5068 f2fs_destroy_bioset();
5069 f2fs_destroy_bio_entry_cache();
5070 f2fs_destroy_iostat_processing();
5071 f2fs_destroy_post_read_processing();
5072 f2fs_destroy_root_stats();
5073 unregister_filesystem(&f2fs_fs_type);
5074 f2fs_exit_shrinker();
5076 f2fs_destroy_garbage_collection_cache();
5077 f2fs_destroy_extent_cache();
5078 f2fs_destroy_recovery_cache();
5079 f2fs_destroy_checkpoint_caches();
5080 f2fs_destroy_segment_manager_caches();
5081 f2fs_destroy_node_manager_caches();
5082 destroy_inodecache();
5085 module_init(init_f2fs_fs)
5086 module_exit(exit_f2fs_fs)
5088 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
5089 MODULE_DESCRIPTION("Flash Friendly File System");
5090 MODULE_LICENSE("GPL");
5091 MODULE_SOFTDEP("pre: crc32");