4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
52 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
54 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
59 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
62 struct fsync_inode_entry *entry;
64 list_for_each_entry(entry, head, list)
65 if (entry->inode->i_ino == ino)
71 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
72 struct list_head *head, nid_t ino)
75 struct fsync_inode_entry *entry;
77 inode = f2fs_iget_retry(sbi->sb, ino);
79 return ERR_CAST(inode);
81 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
83 list_add_tail(&entry->list, head);
88 static void del_fsync_inode(struct fsync_inode_entry *entry)
91 list_del(&entry->list);
92 kmem_cache_free(fsync_entry_slab, entry);
95 static int recover_dentry(struct inode *inode, struct page *ipage,
96 struct list_head *dir_list)
98 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
99 nid_t pino = le32_to_cpu(raw_inode->i_pino);
100 struct f2fs_dir_entry *de;
101 struct fscrypt_name fname;
103 struct inode *dir, *einode;
104 struct fsync_inode_entry *entry;
108 entry = get_fsync_inode(dir_list, pino);
110 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
112 dir = ERR_CAST(entry);
113 err = PTR_ERR(entry);
120 memset(&fname, 0, sizeof(struct fscrypt_name));
121 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
122 fname.disk_name.name = raw_inode->i_name;
124 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
130 de = __f2fs_find_entry(dir, &fname, &page);
131 if (de && inode->i_ino == le32_to_cpu(de->ino))
135 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
136 if (IS_ERR(einode)) {
138 err = PTR_ERR(einode);
143 err = acquire_orphan_inode(F2FS_I_SB(inode));
148 f2fs_delete_entry(de, page, dir, einode);
151 } else if (IS_ERR(page)) {
154 err = __f2fs_do_add_link(dir, &fname, inode,
155 inode->i_ino, inode->i_mode);
162 f2fs_dentry_kunmap(dir, page);
163 f2fs_put_page(page, 0);
165 if (file_enc_name(inode))
166 name = "<encrypted>";
168 name = raw_inode->i_name;
169 f2fs_msg(inode->i_sb, KERN_NOTICE,
170 "%s: ino = %x, name = %s, dir = %lx, err = %d",
171 __func__, ino_of_node(ipage), name,
172 IS_ERR(dir) ? 0 : dir->i_ino, err);
176 static void recover_inode(struct inode *inode, struct page *page)
178 struct f2fs_inode *raw = F2FS_INODE(page);
181 inode->i_mode = le16_to_cpu(raw->i_mode);
182 i_uid_write(inode, le32_to_cpu(raw->i_uid));
183 i_gid_write(inode, le32_to_cpu(raw->i_gid));
184 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
185 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
186 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
187 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
188 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
189 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
190 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
192 if (file_enc_name(inode))
193 name = "<encrypted>";
195 name = F2FS_INODE(page)->i_name;
197 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
198 ino_of_node(page), name);
201 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
203 struct curseg_info *curseg;
204 struct page *page = NULL;
208 /* get node pages in the current segment */
209 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
210 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
213 struct fsync_inode_entry *entry;
215 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
218 page = get_tmp_page(sbi, blkaddr);
220 if (!is_recoverable_dnode(page))
223 if (!is_fsync_dnode(page))
226 entry = get_fsync_inode(head, ino_of_node(page));
228 if (IS_INODE(page) && is_dent_dnode(page)) {
229 err = recover_inode_page(sbi, page);
235 * CP | dnode(F) | inode(DF)
236 * For this case, we should not give up now.
238 entry = add_fsync_inode(sbi, head, ino_of_node(page));
240 err = PTR_ERR(entry);
241 if (err == -ENOENT) {
248 entry->blkaddr = blkaddr;
250 if (IS_INODE(page) && is_dent_dnode(page))
251 entry->last_dentry = blkaddr;
253 /* check next segment */
254 blkaddr = next_blkaddr_of_node(page);
255 f2fs_put_page(page, 1);
257 ra_meta_pages_cond(sbi, blkaddr);
259 f2fs_put_page(page, 1);
263 static void destroy_fsync_dnodes(struct list_head *head)
265 struct fsync_inode_entry *entry, *tmp;
267 list_for_each_entry_safe(entry, tmp, head, list)
268 del_fsync_inode(entry);
271 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
272 block_t blkaddr, struct dnode_of_data *dn)
274 struct seg_entry *sentry;
275 unsigned int segno = GET_SEGNO(sbi, blkaddr);
276 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
277 struct f2fs_summary_block *sum_node;
278 struct f2fs_summary sum;
279 struct page *sum_page, *node_page;
280 struct dnode_of_data tdn = *dn;
287 sentry = get_seg_entry(sbi, segno);
288 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
291 /* Get the previous summary */
292 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
293 struct curseg_info *curseg = CURSEG_I(sbi, i);
294 if (curseg->segno == segno) {
295 sum = curseg->sum_blk->entries[blkoff];
300 sum_page = get_sum_page(sbi, segno);
301 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
302 sum = sum_node->entries[blkoff];
303 f2fs_put_page(sum_page, 1);
305 /* Use the locked dnode page and inode */
306 nid = le32_to_cpu(sum.nid);
307 if (dn->inode->i_ino == nid) {
309 if (!dn->inode_page_locked)
310 lock_page(dn->inode_page);
311 tdn.node_page = dn->inode_page;
312 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
314 } else if (dn->nid == nid) {
315 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
319 /* Get the node page */
320 node_page = get_node_page(sbi, nid);
321 if (IS_ERR(node_page))
322 return PTR_ERR(node_page);
324 offset = ofs_of_node(node_page);
325 ino = ino_of_node(node_page);
326 f2fs_put_page(node_page, 1);
328 if (ino != dn->inode->i_ino) {
329 /* Deallocate previous index in the node page */
330 inode = f2fs_iget_retry(sbi->sb, ino);
332 return PTR_ERR(inode);
337 bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
340 * if inode page is locked, unlock temporarily, but its reference
343 if (ino == dn->inode->i_ino && dn->inode_page_locked)
344 unlock_page(dn->inode_page);
346 set_new_dnode(&tdn, inode, NULL, NULL, 0);
347 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
350 if (tdn.data_blkaddr == blkaddr)
351 truncate_data_blocks_range(&tdn, 1);
353 f2fs_put_dnode(&tdn);
355 if (ino != dn->inode->i_ino)
357 else if (dn->inode_page_locked)
358 lock_page(dn->inode_page);
362 if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
363 truncate_data_blocks_range(&tdn, 1);
364 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
365 unlock_page(dn->inode_page);
369 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
370 struct page *page, block_t blkaddr)
372 struct dnode_of_data dn;
374 unsigned int start, end;
375 int err = 0, recovered = 0;
377 /* step 1: recover xattr */
378 if (IS_INODE(page)) {
379 recover_inline_xattr(inode, page);
380 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
382 * Deprecated; xattr blocks should be found from cold log.
383 * But, we should remain this for backward compatibility.
385 recover_xattr_data(inode, page, blkaddr);
389 /* step 2: recover inline data */
390 if (recover_inline_data(inode, page))
393 /* step 3: recover data indices */
394 start = start_bidx_of_node(ofs_of_node(page), inode);
395 end = start + ADDRS_PER_PAGE(page, inode);
397 set_new_dnode(&dn, inode, NULL, NULL, 0);
399 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
401 if (err == -ENOMEM) {
402 congestion_wait(BLK_RW_ASYNC, HZ/50);
408 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
410 get_node_info(sbi, dn.nid, &ni);
411 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
413 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
414 f2fs_msg(sbi->sb, KERN_WARNING,
415 "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
416 inode->i_ino, ofs_of_node(dn.node_page),
422 for (; start < end; start++, dn.ofs_in_node++) {
425 src = datablock_addr(dn.node_page, dn.ofs_in_node);
426 dest = datablock_addr(page, dn.ofs_in_node);
428 /* skip recovering if dest is the same as src */
432 /* dest is invalid, just invalidate src block */
433 if (dest == NULL_ADDR) {
434 truncate_data_blocks_range(&dn, 1);
438 if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
439 f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
442 * dest is reserved block, invalidate src block
443 * and then reserve one new block in dnode page.
445 if (dest == NEW_ADDR) {
446 truncate_data_blocks_range(&dn, 1);
447 reserve_new_block(&dn);
451 /* dest is valid block, try to recover from src to dest */
452 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
454 if (src == NULL_ADDR) {
455 err = reserve_new_block(&dn);
456 #ifdef CONFIG_F2FS_FAULT_INJECTION
458 err = reserve_new_block(&dn);
460 /* We should not get -ENOSPC */
461 f2fs_bug_on(sbi, err);
466 /* Check the previous node page having this index */
467 err = check_index_in_prev_nodes(sbi, dest, &dn);
469 if (err == -ENOMEM) {
470 congestion_wait(BLK_RW_ASYNC, HZ/50);
476 /* write dummy data page */
477 f2fs_replace_block(sbi, &dn, src, dest,
478 ni.version, false, false);
483 copy_node_footer(dn.node_page, page);
484 fill_node_footer(dn.node_page, dn.nid, ni.ino,
485 ofs_of_node(page), false);
486 set_page_dirty(dn.node_page);
490 f2fs_msg(sbi->sb, KERN_NOTICE,
491 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
492 inode->i_ino, recovered, err);
496 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
497 struct list_head *dir_list)
499 struct curseg_info *curseg;
500 struct page *page = NULL;
504 /* get node pages in the current segment */
505 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
506 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
509 struct fsync_inode_entry *entry;
511 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
514 ra_meta_pages_cond(sbi, blkaddr);
516 page = get_tmp_page(sbi, blkaddr);
518 if (!is_recoverable_dnode(page)) {
519 f2fs_put_page(page, 1);
523 entry = get_fsync_inode(inode_list, ino_of_node(page));
527 * inode(x) | CP | inode(x) | dnode(F)
528 * In this case, we can lose the latest inode(x).
529 * So, call recover_inode for the inode update.
532 recover_inode(entry->inode, page);
533 if (entry->last_dentry == blkaddr) {
534 err = recover_dentry(entry->inode, page, dir_list);
536 f2fs_put_page(page, 1);
540 err = do_recover_data(sbi, entry->inode, page, blkaddr);
542 f2fs_put_page(page, 1);
546 if (entry->blkaddr == blkaddr)
547 del_fsync_inode(entry);
549 /* check next segment */
550 blkaddr = next_blkaddr_of_node(page);
551 f2fs_put_page(page, 1);
554 allocate_new_segments(sbi);
558 int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
560 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
561 struct list_head inode_list;
562 struct list_head dir_list;
566 bool need_writecp = false;
568 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
569 sizeof(struct fsync_inode_entry));
570 if (!fsync_entry_slab)
573 INIT_LIST_HEAD(&inode_list);
574 INIT_LIST_HEAD(&dir_list);
576 /* prevent checkpoint */
577 mutex_lock(&sbi->cp_mutex);
579 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
581 /* step #1: find fsynced inode numbers */
582 err = find_fsync_dnodes(sbi, &inode_list);
583 if (err || list_empty(&inode_list))
593 /* step #2: recover data */
594 err = recover_data(sbi, &inode_list, &dir_list);
596 f2fs_bug_on(sbi, !list_empty(&inode_list));
598 destroy_fsync_dnodes(&inode_list);
600 /* truncate meta pages to be used by the recovery */
601 truncate_inode_pages_range(META_MAPPING(sbi),
602 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
605 truncate_inode_pages_final(NODE_MAPPING(sbi));
606 truncate_inode_pages_final(META_MAPPING(sbi));
609 clear_sbi_flag(sbi, SBI_POR_DOING);
610 mutex_unlock(&sbi->cp_mutex);
612 /* let's drop all the directory inodes for clean checkpoint */
613 destroy_fsync_dnodes(&dir_list);
615 if (!err && need_writecp) {
616 struct cp_control cpc = {
617 .reason = CP_RECOVERY,
619 err = write_checkpoint(sbi, &cpc);
622 kmem_cache_destroy(fsync_entry_slab);
623 return ret ? ret: err;