1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
15 * Roll forward recovery scenarios.
17 * [Term] F: fsync_mark, D: dentry_mark
19 * 1. inode(x) | CP | inode(x) | dnode(F)
20 * -> Update the latest inode(x).
22 * 2. inode(x) | CP | inode(F) | dnode(F)
25 * 3. inode(x) | CP | dnode(F) | inode(x)
26 * -> Recover to the latest dnode(F), and drop the last inode(x)
28 * 4. inode(x) | CP | dnode(F) | inode(F)
31 * 5. CP | inode(x) | dnode(F)
32 * -> The inode(DF) was missing. Should drop this dnode(F).
34 * 6. CP | inode(DF) | dnode(F)
37 * 7. CP | dnode(F) | inode(DF)
38 * -> If f2fs_iget fails, then goto next to find inode(DF).
40 * 8. CP | dnode(F) | inode(x)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 * But it will fail due to no inode(DF).
45 static struct kmem_cache *fsync_entry_slab;
47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
49 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
56 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59 struct fsync_inode_entry *entry;
61 list_for_each_entry(entry, head, list)
62 if (entry->inode->i_ino == ino)
68 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
69 struct list_head *head, nid_t ino, bool quota_inode)
72 struct fsync_inode_entry *entry;
75 inode = f2fs_iget_retry(sbi->sb, ino);
77 return ERR_CAST(inode);
79 err = dquot_initialize(inode);
84 err = dquot_alloc_inode(inode);
89 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
91 list_add_tail(&entry->list, head);
99 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
102 /* inode should not be recovered, drop it */
103 f2fs_inode_synced(entry->inode);
106 list_del(&entry->list);
107 kmem_cache_free(fsync_entry_slab, entry);
110 static int init_recovered_filename(const struct inode *dir,
111 struct f2fs_inode *raw_inode,
112 struct f2fs_filename *fname,
113 struct qstr *usr_fname)
117 memset(fname, 0, sizeof(*fname));
118 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
119 fname->disk_name.name = raw_inode->i_name;
121 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
122 return -ENAMETOOLONG;
124 if (!IS_ENCRYPTED(dir)) {
125 usr_fname->name = fname->disk_name.name;
126 usr_fname->len = fname->disk_name.len;
127 fname->usr_fname = usr_fname;
130 /* Compute the hash of the filename */
131 if (IS_CASEFOLDED(dir)) {
132 err = f2fs_init_casefolded_name(dir, fname);
135 f2fs_hash_filename(dir, fname);
136 #ifdef CONFIG_UNICODE
137 /* Case-sensitive match is fine for recovery */
138 kfree(fname->cf_name.name);
139 fname->cf_name.name = NULL;
142 f2fs_hash_filename(dir, fname);
147 static int recover_dentry(struct inode *inode, struct page *ipage,
148 struct list_head *dir_list)
150 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
151 nid_t pino = le32_to_cpu(raw_inode->i_pino);
152 struct f2fs_dir_entry *de;
153 struct f2fs_filename fname;
154 struct qstr usr_fname;
156 struct inode *dir, *einode;
157 struct fsync_inode_entry *entry;
161 entry = get_fsync_inode(dir_list, pino);
163 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
166 dir = ERR_CAST(entry);
167 err = PTR_ERR(entry);
173 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
177 de = __f2fs_find_entry(dir, &fname, &page);
178 if (de && inode->i_ino == le32_to_cpu(de->ino))
182 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
183 if (IS_ERR(einode)) {
185 err = PTR_ERR(einode);
191 err = dquot_initialize(einode);
197 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
202 f2fs_delete_entry(de, page, dir, einode);
205 } else if (IS_ERR(page)) {
208 err = f2fs_add_dentry(dir, &fname, inode,
209 inode->i_ino, inode->i_mode);
216 f2fs_put_page(page, 0);
218 if (file_enc_name(inode))
219 name = "<encrypted>";
221 name = raw_inode->i_name;
222 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
223 __func__, ino_of_node(ipage), name,
224 IS_ERR(dir) ? 0 : dir->i_ino, err);
228 static int recover_quota_data(struct inode *inode, struct page *page)
230 struct f2fs_inode *raw = F2FS_INODE(page);
232 uid_t i_uid = le32_to_cpu(raw->i_uid);
233 gid_t i_gid = le32_to_cpu(raw->i_gid);
236 memset(&attr, 0, sizeof(attr));
238 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
239 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
241 if (!uid_eq(attr.ia_uid, inode->i_uid))
242 attr.ia_valid |= ATTR_UID;
243 if (!gid_eq(attr.ia_gid, inode->i_gid))
244 attr.ia_valid |= ATTR_GID;
249 err = dquot_transfer(inode, &attr);
251 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
255 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
257 if (ri->i_inline & F2FS_PIN_FILE)
258 set_inode_flag(inode, FI_PIN_FILE);
260 clear_inode_flag(inode, FI_PIN_FILE);
261 if (ri->i_inline & F2FS_DATA_EXIST)
262 set_inode_flag(inode, FI_DATA_EXIST);
264 clear_inode_flag(inode, FI_DATA_EXIST);
267 static int recover_inode(struct inode *inode, struct page *page)
269 struct f2fs_inode *raw = F2FS_INODE(page);
273 inode->i_mode = le16_to_cpu(raw->i_mode);
275 err = recover_quota_data(inode, page);
279 i_uid_write(inode, le32_to_cpu(raw->i_uid));
280 i_gid_write(inode, le32_to_cpu(raw->i_gid));
282 if (raw->i_inline & F2FS_EXTRA_ATTR) {
283 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
284 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
289 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
290 kprojid = make_kprojid(&init_user_ns, i_projid);
292 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
293 err = f2fs_transfer_project_quota(inode,
297 F2FS_I(inode)->i_projid = kprojid;
302 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
303 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
304 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
305 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
306 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
307 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
308 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
310 F2FS_I(inode)->i_advise = raw->i_advise;
311 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
312 f2fs_set_inode_flags(inode);
313 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
314 le16_to_cpu(raw->i_gc_failures);
316 recover_inline_flags(inode, raw);
318 f2fs_mark_inode_dirty_sync(inode, true);
320 if (file_enc_name(inode))
321 name = "<encrypted>";
323 name = F2FS_INODE(page)->i_name;
325 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
326 ino_of_node(page), name, raw->i_inline);
330 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
333 struct curseg_info *curseg;
334 struct page *page = NULL;
336 unsigned int loop_cnt = 0;
337 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
338 valid_user_blocks(sbi);
341 /* get node pages in the current segment */
342 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
343 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
346 struct fsync_inode_entry *entry;
348 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
351 page = f2fs_get_tmp_page(sbi, blkaddr);
357 if (!is_recoverable_dnode(page)) {
358 f2fs_put_page(page, 1);
362 if (!is_fsync_dnode(page))
365 entry = get_fsync_inode(head, ino_of_node(page));
367 bool quota_inode = false;
370 IS_INODE(page) && is_dent_dnode(page)) {
371 err = f2fs_recover_inode_page(sbi, page);
373 f2fs_put_page(page, 1);
380 * CP | dnode(F) | inode(DF)
381 * For this case, we should not give up now.
383 entry = add_fsync_inode(sbi, head, ino_of_node(page),
386 err = PTR_ERR(entry);
387 if (err == -ENOENT) {
391 f2fs_put_page(page, 1);
395 entry->blkaddr = blkaddr;
397 if (IS_INODE(page) && is_dent_dnode(page))
398 entry->last_dentry = blkaddr;
400 /* sanity check in order to detect looped node chain */
401 if (++loop_cnt >= free_blocks ||
402 blkaddr == next_blkaddr_of_node(page)) {
403 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
405 next_blkaddr_of_node(page));
406 f2fs_put_page(page, 1);
411 /* check next segment */
412 blkaddr = next_blkaddr_of_node(page);
413 f2fs_put_page(page, 1);
415 f2fs_ra_meta_pages_cond(sbi, blkaddr);
420 static void destroy_fsync_dnodes(struct list_head *head, int drop)
422 struct fsync_inode_entry *entry, *tmp;
424 list_for_each_entry_safe(entry, tmp, head, list)
425 del_fsync_inode(entry, drop);
428 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
429 block_t blkaddr, struct dnode_of_data *dn)
431 struct seg_entry *sentry;
432 unsigned int segno = GET_SEGNO(sbi, blkaddr);
433 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
434 struct f2fs_summary_block *sum_node;
435 struct f2fs_summary sum;
436 struct page *sum_page, *node_page;
437 struct dnode_of_data tdn = *dn;
440 unsigned int offset, ofs_in_node, max_addrs;
444 sentry = get_seg_entry(sbi, segno);
445 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
448 /* Get the previous summary */
449 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
450 struct curseg_info *curseg = CURSEG_I(sbi, i);
451 if (curseg->segno == segno) {
452 sum = curseg->sum_blk->entries[blkoff];
457 sum_page = f2fs_get_sum_page(sbi, segno);
458 if (IS_ERR(sum_page))
459 return PTR_ERR(sum_page);
460 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
461 sum = sum_node->entries[blkoff];
462 f2fs_put_page(sum_page, 1);
464 /* Use the locked dnode page and inode */
465 nid = le32_to_cpu(sum.nid);
466 ofs_in_node = le16_to_cpu(sum.ofs_in_node);
468 max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
469 if (ofs_in_node >= max_addrs) {
470 f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
471 ofs_in_node, dn->inode->i_ino, nid, max_addrs);
472 return -EFSCORRUPTED;
475 if (dn->inode->i_ino == nid) {
477 if (!dn->inode_page_locked)
478 lock_page(dn->inode_page);
479 tdn.node_page = dn->inode_page;
480 tdn.ofs_in_node = ofs_in_node;
482 } else if (dn->nid == nid) {
483 tdn.ofs_in_node = ofs_in_node;
487 /* Get the node page */
488 node_page = f2fs_get_node_page(sbi, nid);
489 if (IS_ERR(node_page))
490 return PTR_ERR(node_page);
492 offset = ofs_of_node(node_page);
493 ino = ino_of_node(node_page);
494 f2fs_put_page(node_page, 1);
496 if (ino != dn->inode->i_ino) {
499 /* Deallocate previous index in the node page */
500 inode = f2fs_iget_retry(sbi->sb, ino);
502 return PTR_ERR(inode);
504 ret = dquot_initialize(inode);
513 bidx = f2fs_start_bidx_of_node(offset, inode) +
514 le16_to_cpu(sum.ofs_in_node);
517 * if inode page is locked, unlock temporarily, but its reference
520 if (ino == dn->inode->i_ino && dn->inode_page_locked)
521 unlock_page(dn->inode_page);
523 set_new_dnode(&tdn, inode, NULL, NULL, 0);
524 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
527 if (tdn.data_blkaddr == blkaddr)
528 f2fs_truncate_data_blocks_range(&tdn, 1);
530 f2fs_put_dnode(&tdn);
532 if (ino != dn->inode->i_ino)
534 else if (dn->inode_page_locked)
535 lock_page(dn->inode_page);
539 if (f2fs_data_blkaddr(&tdn) == blkaddr)
540 f2fs_truncate_data_blocks_range(&tdn, 1);
541 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
542 unlock_page(dn->inode_page);
546 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
549 struct dnode_of_data dn;
551 unsigned int start, end;
552 int err = 0, recovered = 0;
554 /* step 1: recover xattr */
555 if (IS_INODE(page)) {
556 err = f2fs_recover_inline_xattr(inode, page);
559 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
560 err = f2fs_recover_xattr_data(inode, page);
566 /* step 2: recover inline data */
567 err = f2fs_recover_inline_data(inode, page);
574 /* step 3: recover data indices */
575 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
576 end = start + ADDRS_PER_PAGE(page, inode);
578 set_new_dnode(&dn, inode, NULL, NULL, 0);
580 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
582 if (err == -ENOMEM) {
583 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
589 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
591 err = f2fs_get_node_info(sbi, dn.nid, &ni);
595 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
597 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
598 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
599 inode->i_ino, ofs_of_node(dn.node_page),
605 for (; start < end; start++, dn.ofs_in_node++) {
608 src = f2fs_data_blkaddr(&dn);
609 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
611 if (__is_valid_data_blkaddr(src) &&
612 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
617 if (__is_valid_data_blkaddr(dest) &&
618 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
623 /* skip recovering if dest is the same as src */
627 /* dest is invalid, just invalidate src block */
628 if (dest == NULL_ADDR) {
629 f2fs_truncate_data_blocks_range(&dn, 1);
633 if (!file_keep_isize(inode) &&
634 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
635 f2fs_i_size_write(inode,
636 (loff_t)(start + 1) << PAGE_SHIFT);
639 * dest is reserved block, invalidate src block
640 * and then reserve one new block in dnode page.
642 if (dest == NEW_ADDR) {
643 f2fs_truncate_data_blocks_range(&dn, 1);
645 err = f2fs_reserve_new_block(&dn);
646 if (err == -ENOSPC) {
651 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
657 /* dest is valid block, try to recover from src to dest */
658 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
660 if (src == NULL_ADDR) {
662 err = f2fs_reserve_new_block(&dn);
663 if (err == -ENOSPC) {
668 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
673 /* Check the previous node page having this index */
674 err = check_index_in_prev_nodes(sbi, dest, &dn);
676 if (err == -ENOMEM) {
677 congestion_wait(BLK_RW_ASYNC,
684 if (f2fs_is_valid_blkaddr(sbi, dest,
685 DATA_GENERIC_ENHANCE_UPDATE)) {
686 f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
687 dest, inode->i_ino, dn.ofs_in_node);
692 /* write dummy data page */
693 f2fs_replace_block(sbi, &dn, src, dest,
694 ni.version, false, false);
699 copy_node_footer(dn.node_page, page);
700 fill_node_footer(dn.node_page, dn.nid, ni.ino,
701 ofs_of_node(page), false);
702 set_page_dirty(dn.node_page);
706 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
707 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
712 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
713 struct list_head *tmp_inode_list, struct list_head *dir_list)
715 struct curseg_info *curseg;
716 struct page *page = NULL;
720 /* get node pages in the current segment */
721 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
722 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
725 struct fsync_inode_entry *entry;
727 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
730 f2fs_ra_meta_pages_cond(sbi, blkaddr);
732 page = f2fs_get_tmp_page(sbi, blkaddr);
738 if (!is_recoverable_dnode(page)) {
739 f2fs_put_page(page, 1);
743 entry = get_fsync_inode(inode_list, ino_of_node(page));
747 * inode(x) | CP | inode(x) | dnode(F)
748 * In this case, we can lose the latest inode(x).
749 * So, call recover_inode for the inode update.
751 if (IS_INODE(page)) {
752 err = recover_inode(entry->inode, page);
754 f2fs_put_page(page, 1);
758 if (entry->last_dentry == blkaddr) {
759 err = recover_dentry(entry->inode, page, dir_list);
761 f2fs_put_page(page, 1);
765 err = do_recover_data(sbi, entry->inode, page);
767 f2fs_put_page(page, 1);
771 if (entry->blkaddr == blkaddr)
772 list_move_tail(&entry->list, tmp_inode_list);
774 /* check next segment */
775 blkaddr = next_blkaddr_of_node(page);
776 f2fs_put_page(page, 1);
779 f2fs_allocate_new_segments(sbi);
783 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
785 struct list_head inode_list, tmp_inode_list;
786 struct list_head dir_list;
789 unsigned long s_flags = sbi->sb->s_flags;
790 bool need_writecp = false;
791 bool fix_curseg_write_pointer = false;
796 if (s_flags & SB_RDONLY) {
797 f2fs_info(sbi, "recover fsync data on readonly fs");
798 sbi->sb->s_flags &= ~SB_RDONLY;
802 /* Needed for iput() to work correctly and not trash data */
803 sbi->sb->s_flags |= SB_ACTIVE;
804 /* Turn on quotas so that they are updated correctly */
805 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
808 INIT_LIST_HEAD(&inode_list);
809 INIT_LIST_HEAD(&tmp_inode_list);
810 INIT_LIST_HEAD(&dir_list);
812 /* prevent checkpoint */
813 mutex_lock(&sbi->cp_mutex);
815 /* step #1: find fsynced inode numbers */
816 err = find_fsync_dnodes(sbi, &inode_list, check_only);
817 if (err || list_empty(&inode_list))
827 /* step #2: recover data */
828 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
830 f2fs_bug_on(sbi, !list_empty(&inode_list));
832 /* restore s_flags to let iput() trash data */
833 sbi->sb->s_flags = s_flags;
836 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
838 destroy_fsync_dnodes(&inode_list, err);
839 destroy_fsync_dnodes(&tmp_inode_list, err);
841 /* truncate meta pages to be used by the recovery */
842 truncate_inode_pages_range(META_MAPPING(sbi),
843 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
846 truncate_inode_pages_final(NODE_MAPPING(sbi));
847 truncate_inode_pages_final(META_MAPPING(sbi));
851 * If fsync data succeeds or there is no fsync data to recover,
852 * and the f2fs is not read only, check and fix zoned block devices'
853 * write pointer consistency.
855 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
856 f2fs_sb_has_blkzoned(sbi)) {
857 err = f2fs_fix_curseg_write_pointer(sbi);
859 err = f2fs_check_write_pointer(sbi);
864 clear_sbi_flag(sbi, SBI_POR_DOING);
866 mutex_unlock(&sbi->cp_mutex);
868 /* let's drop all the directory inodes for clean checkpoint */
869 destroy_fsync_dnodes(&dir_list, err);
872 set_sbi_flag(sbi, SBI_IS_RECOVERED);
875 struct cp_control cpc = {
876 .reason = CP_RECOVERY,
878 err = f2fs_write_checkpoint(sbi, &cpc);
883 /* Turn quotas off */
885 f2fs_quota_off_umount(sbi->sb);
887 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
889 return ret ? ret: err;
892 int __init f2fs_create_recovery_cache(void)
894 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
895 sizeof(struct fsync_inode_entry));
896 if (!fsync_entry_slab)
901 void f2fs_destroy_recovery_cache(void)
903 kmem_cache_destroy(fsync_entry_slab);