1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
19 #include <trace/events/f2fs.h>
21 #ifdef CONFIG_F2FS_FS_COMPRESSION
22 extern const struct address_space_operations f2fs_compress_aops;
25 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
27 if (is_inode_flag_set(inode, FI_NEW_INODE))
30 if (f2fs_inode_dirtied(inode, sync))
33 mark_inode_dirty_sync(inode);
36 void f2fs_set_inode_flags(struct inode *inode)
38 unsigned int flags = F2FS_I(inode)->i_flags;
39 unsigned int new_fl = 0;
41 if (flags & F2FS_SYNC_FL)
43 if (flags & F2FS_APPEND_FL)
45 if (flags & F2FS_IMMUTABLE_FL)
46 new_fl |= S_IMMUTABLE;
47 if (flags & F2FS_NOATIME_FL)
49 if (flags & F2FS_DIRSYNC_FL)
51 if (file_is_encrypt(inode))
52 new_fl |= S_ENCRYPTED;
53 if (file_is_verity(inode))
55 if (flags & F2FS_CASEFOLD_FL)
57 inode_set_flags(inode, new_fl,
58 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
59 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
62 static void __get_inode_rdev(struct inode *inode, struct page *node_page)
64 __le32 *addr = get_dnode_addr(inode, node_page);
66 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
69 inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
71 inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
75 static void __set_inode_rdev(struct inode *inode, struct page *node_page)
77 __le32 *addr = get_dnode_addr(inode, node_page);
79 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
80 if (old_valid_dev(inode->i_rdev)) {
81 addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
85 addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
91 static void __recover_inline_status(struct inode *inode, struct page *ipage)
93 void *inline_data = inline_data_addr(inode, ipage);
94 __le32 *start = inline_data;
95 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
99 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
101 set_inode_flag(inode, FI_DATA_EXIST);
102 set_raw_inline(inode, F2FS_INODE(ipage));
103 set_page_dirty(ipage);
110 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
112 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
114 if (!f2fs_sb_has_inode_chksum(sbi))
117 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
120 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
127 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
129 struct f2fs_node *node = F2FS_NODE(page);
130 struct f2fs_inode *ri = &node->i;
131 __le32 ino = node->footer.ino;
132 __le32 gen = ri->i_generation;
133 __u32 chksum, chksum_seed;
135 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
136 unsigned int cs_size = sizeof(dummy_cs);
138 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
140 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
142 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
143 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
145 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
146 F2FS_BLKSIZE - offset);
150 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
152 struct f2fs_inode *ri;
153 __u32 provided, calculated;
155 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
158 #ifdef CONFIG_F2FS_CHECK_FS
159 if (!f2fs_enable_inode_chksum(sbi, page))
161 if (!f2fs_enable_inode_chksum(sbi, page) ||
162 PageDirty(page) || PageWriteback(page))
166 ri = &F2FS_NODE(page)->i;
167 provided = le32_to_cpu(ri->i_inode_checksum);
168 calculated = f2fs_inode_chksum(sbi, page);
170 if (provided != calculated)
171 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
172 page->index, ino_of_node(page), provided, calculated);
174 return provided == calculated;
177 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
179 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
181 if (!f2fs_enable_inode_chksum(sbi, page))
184 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
187 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 struct f2fs_inode_info *fi = F2FS_I(inode);
191 struct f2fs_inode *ri = F2FS_INODE(node_page);
192 unsigned long long iblocks;
194 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
196 set_sbi_flag(sbi, SBI_NEED_FSCK);
197 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
198 __func__, inode->i_ino, iblocks);
202 if (ino_of_node(node_page) != nid_of_node(node_page)) {
203 set_sbi_flag(sbi, SBI_NEED_FSCK);
204 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
205 __func__, inode->i_ino,
206 ino_of_node(node_page), nid_of_node(node_page));
210 if (f2fs_sb_has_flexible_inline_xattr(sbi)
211 && !f2fs_has_extra_attr(inode)) {
212 set_sbi_flag(sbi, SBI_NEED_FSCK);
213 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
214 __func__, inode->i_ino);
218 if (f2fs_has_extra_attr(inode) &&
219 !f2fs_sb_has_extra_attr(sbi)) {
220 set_sbi_flag(sbi, SBI_NEED_FSCK);
221 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
222 __func__, inode->i_ino);
226 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
227 fi->i_extra_isize % sizeof(__le32)) {
228 set_sbi_flag(sbi, SBI_NEED_FSCK);
229 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
230 __func__, inode->i_ino, fi->i_extra_isize,
231 F2FS_TOTAL_EXTRA_ATTR_SIZE);
235 if (f2fs_has_extra_attr(inode) &&
236 f2fs_sb_has_flexible_inline_xattr(sbi) &&
237 f2fs_has_inline_xattr(inode) &&
238 (!fi->i_inline_xattr_size ||
239 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
240 set_sbi_flag(sbi, SBI_NEED_FSCK);
241 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
242 __func__, inode->i_ino, fi->i_inline_xattr_size,
243 MAX_INLINE_XATTR_SIZE);
247 if (f2fs_sanity_check_inline_data(inode)) {
248 set_sbi_flag(sbi, SBI_NEED_FSCK);
249 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
250 __func__, inode->i_ino, inode->i_mode);
254 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
255 set_sbi_flag(sbi, SBI_NEED_FSCK);
256 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
257 __func__, inode->i_ino, inode->i_mode);
261 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
262 set_sbi_flag(sbi, SBI_NEED_FSCK);
263 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
264 __func__, inode->i_ino);
268 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
269 fi->i_flags & F2FS_COMPR_FL &&
270 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
271 i_log_cluster_size)) {
272 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
273 set_sbi_flag(sbi, SBI_NEED_FSCK);
274 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
275 "compress algorithm: %u, run fsck to fix",
276 __func__, inode->i_ino,
277 ri->i_compress_algorithm);
280 if (le64_to_cpu(ri->i_compr_blocks) >
281 SECTOR_TO_BLOCK(inode->i_blocks)) {
282 set_sbi_flag(sbi, SBI_NEED_FSCK);
283 f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
284 "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
285 __func__, inode->i_ino,
286 le64_to_cpu(ri->i_compr_blocks),
287 SECTOR_TO_BLOCK(inode->i_blocks));
290 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
291 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
292 set_sbi_flag(sbi, SBI_NEED_FSCK);
293 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
294 "log cluster size: %u, run fsck to fix",
295 __func__, inode->i_ino,
296 ri->i_log_cluster_size);
304 static void init_idisk_time(struct inode *inode)
306 struct f2fs_inode_info *fi = F2FS_I(inode);
308 fi->i_disk_time[0] = inode->i_atime;
309 fi->i_disk_time[1] = inode->i_ctime;
310 fi->i_disk_time[2] = inode->i_mtime;
311 fi->i_disk_time[3] = fi->i_crtime;
314 static int do_read_inode(struct inode *inode)
316 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
317 struct f2fs_inode_info *fi = F2FS_I(inode);
318 struct page *node_page;
319 struct f2fs_inode *ri;
322 /* Check if ino is within scope */
323 if (f2fs_check_nid_range(sbi, inode->i_ino))
326 node_page = f2fs_get_node_page(sbi, inode->i_ino);
327 if (IS_ERR(node_page))
328 return PTR_ERR(node_page);
330 ri = F2FS_INODE(node_page);
332 inode->i_mode = le16_to_cpu(ri->i_mode);
333 i_uid_write(inode, le32_to_cpu(ri->i_uid));
334 i_gid_write(inode, le32_to_cpu(ri->i_gid));
335 set_nlink(inode, le32_to_cpu(ri->i_links));
336 inode->i_size = le64_to_cpu(ri->i_size);
337 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
339 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
340 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
341 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
342 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
343 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
344 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
345 inode->i_generation = le32_to_cpu(ri->i_generation);
346 if (S_ISDIR(inode->i_mode))
347 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
348 else if (S_ISREG(inode->i_mode))
349 fi->i_gc_failures[GC_FAILURE_PIN] =
350 le16_to_cpu(ri->i_gc_failures);
351 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
352 fi->i_flags = le32_to_cpu(ri->i_flags);
353 if (S_ISREG(inode->i_mode))
354 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
355 bitmap_zero(fi->flags, FI_MAX);
356 fi->i_advise = ri->i_advise;
357 fi->i_pino = le32_to_cpu(ri->i_pino);
358 fi->i_dir_level = ri->i_dir_level;
360 get_inline_info(inode, ri);
362 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
363 le16_to_cpu(ri->i_extra_isize) : 0;
365 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
366 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
367 } else if (f2fs_has_inline_xattr(inode) ||
368 f2fs_has_inline_dentry(inode)) {
369 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
373 * Previous inline data or directory always reserved 200 bytes
374 * in inode layout, even if inline_xattr is disabled. In order
375 * to keep inline_dentry's structure for backward compatibility,
376 * we get the space back only from inline_data.
378 fi->i_inline_xattr_size = 0;
381 if (!sanity_check_inode(inode, node_page)) {
382 f2fs_put_page(node_page, 1);
383 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
384 return -EFSCORRUPTED;
387 /* check data exist */
388 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
389 __recover_inline_status(inode, node_page);
391 /* try to recover cold bit for non-dir inode */
392 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
393 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
394 set_cold_node(node_page, false);
395 set_page_dirty(node_page);
398 /* get rdev by using inline_info */
399 __get_inode_rdev(inode, node_page);
401 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
402 fi->last_disk_size = inode->i_size;
404 if (fi->i_flags & F2FS_PROJINHERIT_FL)
405 set_inode_flag(inode, FI_PROJ_INHERIT);
407 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
408 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
409 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
411 i_projid = F2FS_DEF_PROJID;
412 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
414 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
415 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
416 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
417 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
420 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
421 (fi->i_flags & F2FS_COMPR_FL)) {
422 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
423 i_log_cluster_size)) {
424 unsigned short compress_flag;
426 atomic_set(&fi->i_compr_blocks,
427 le64_to_cpu(ri->i_compr_blocks));
428 fi->i_compress_algorithm = ri->i_compress_algorithm;
429 fi->i_log_cluster_size = ri->i_log_cluster_size;
430 compress_flag = le16_to_cpu(ri->i_compress_flag);
431 fi->i_compress_level = compress_flag >>
432 COMPRESS_LEVEL_OFFSET;
433 fi->i_compress_flag = compress_flag &
434 GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
435 fi->i_cluster_size = BIT(fi->i_log_cluster_size);
436 set_inode_flag(inode, FI_COMPRESSED_FILE);
440 init_idisk_time(inode);
442 /* Need all the flag bits */
443 f2fs_init_read_extent_tree(inode, node_page);
445 if (!sanity_check_extent_cache(inode)) {
446 f2fs_put_page(node_page, 1);
447 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
448 return -EFSCORRUPTED;
451 f2fs_put_page(node_page, 1);
453 stat_inc_inline_xattr(inode);
454 stat_inc_inline_inode(inode);
455 stat_inc_inline_dir(inode);
456 stat_inc_compr_inode(inode);
457 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
462 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
464 return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
465 ino == F2FS_COMPRESS_INO(sbi);
468 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
470 struct f2fs_sb_info *sbi = F2FS_SB(sb);
474 inode = iget_locked(sb, ino);
476 return ERR_PTR(-ENOMEM);
478 if (!(inode->i_state & I_NEW)) {
479 if (is_meta_ino(sbi, ino)) {
480 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
481 set_sbi_flag(sbi, SBI_NEED_FSCK);
483 trace_f2fs_iget_exit(inode, ret);
485 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
489 trace_f2fs_iget(inode);
493 if (is_meta_ino(sbi, ino))
496 ret = do_read_inode(inode);
500 if (ino == F2FS_NODE_INO(sbi)) {
501 inode->i_mapping->a_ops = &f2fs_node_aops;
502 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
503 } else if (ino == F2FS_META_INO(sbi)) {
504 inode->i_mapping->a_ops = &f2fs_meta_aops;
505 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
506 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
507 #ifdef CONFIG_F2FS_FS_COMPRESSION
508 inode->i_mapping->a_ops = &f2fs_compress_aops;
510 * generic_error_remove_page only truncates pages of regular
513 inode->i_mode |= S_IFREG;
515 mapping_set_gfp_mask(inode->i_mapping,
516 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
517 } else if (S_ISREG(inode->i_mode)) {
518 inode->i_op = &f2fs_file_inode_operations;
519 inode->i_fop = &f2fs_file_operations;
520 inode->i_mapping->a_ops = &f2fs_dblock_aops;
521 } else if (S_ISDIR(inode->i_mode)) {
522 inode->i_op = &f2fs_dir_inode_operations;
523 inode->i_fop = &f2fs_dir_operations;
524 inode->i_mapping->a_ops = &f2fs_dblock_aops;
525 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
526 } else if (S_ISLNK(inode->i_mode)) {
527 if (file_is_encrypt(inode))
528 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
530 inode->i_op = &f2fs_symlink_inode_operations;
531 inode_nohighmem(inode);
532 inode->i_mapping->a_ops = &f2fs_dblock_aops;
533 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
534 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
535 inode->i_op = &f2fs_special_inode_operations;
536 init_special_inode(inode, inode->i_mode, inode->i_rdev);
541 f2fs_set_inode_flags(inode);
543 if (file_should_truncate(inode) &&
544 !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
545 ret = f2fs_truncate(inode);
548 file_dont_truncate(inode);
551 unlock_new_inode(inode);
552 trace_f2fs_iget(inode);
556 f2fs_inode_synced(inode);
558 trace_f2fs_iget_exit(inode, ret);
562 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
566 inode = f2fs_iget(sb, ino);
568 if (PTR_ERR(inode) == -ENOMEM) {
569 memalloc_retry_wait(GFP_NOFS);
576 void f2fs_update_inode(struct inode *inode, struct page *node_page)
578 struct f2fs_inode *ri;
579 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
581 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
582 set_page_dirty(node_page);
584 f2fs_inode_synced(inode);
586 ri = F2FS_INODE(node_page);
588 ri->i_mode = cpu_to_le16(inode->i_mode);
589 ri->i_advise = F2FS_I(inode)->i_advise;
590 ri->i_uid = cpu_to_le32(i_uid_read(inode));
591 ri->i_gid = cpu_to_le32(i_gid_read(inode));
592 ri->i_links = cpu_to_le32(inode->i_nlink);
593 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
595 if (!f2fs_is_atomic_file(inode) ||
596 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
597 ri->i_size = cpu_to_le64(i_size_read(inode));
600 read_lock(&et->lock);
601 set_raw_read_extent(&et->largest, &ri->i_ext);
602 read_unlock(&et->lock);
604 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
606 set_raw_inline(inode, ri);
608 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
609 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
610 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
611 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
612 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
613 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
614 if (S_ISDIR(inode->i_mode))
615 ri->i_current_depth =
616 cpu_to_le32(F2FS_I(inode)->i_current_depth);
617 else if (S_ISREG(inode->i_mode))
619 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
620 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
621 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
622 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
623 ri->i_generation = cpu_to_le32(inode->i_generation);
624 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
626 if (f2fs_has_extra_attr(inode)) {
627 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
629 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
630 ri->i_inline_xattr_size =
631 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
633 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
634 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
638 i_projid = from_kprojid(&init_user_ns,
639 F2FS_I(inode)->i_projid);
640 ri->i_projid = cpu_to_le32(i_projid);
643 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
644 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
647 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
649 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
652 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
653 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
654 i_log_cluster_size)) {
655 unsigned short compress_flag;
658 cpu_to_le64(atomic_read(
659 &F2FS_I(inode)->i_compr_blocks));
660 ri->i_compress_algorithm =
661 F2FS_I(inode)->i_compress_algorithm;
662 compress_flag = F2FS_I(inode)->i_compress_flag |
663 F2FS_I(inode)->i_compress_level <<
664 COMPRESS_LEVEL_OFFSET;
665 ri->i_compress_flag = cpu_to_le16(compress_flag);
666 ri->i_log_cluster_size =
667 F2FS_I(inode)->i_log_cluster_size;
671 __set_inode_rdev(inode, node_page);
674 if (inode->i_nlink == 0)
675 clear_page_private_inline(node_page);
677 init_idisk_time(inode);
678 #ifdef CONFIG_F2FS_CHECK_FS
679 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
683 void f2fs_update_inode_page(struct inode *inode)
685 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
686 struct page *node_page;
689 node_page = f2fs_get_node_page(sbi, inode->i_ino);
690 if (IS_ERR(node_page)) {
691 int err = PTR_ERR(node_page);
693 /* The node block was truncated. */
697 if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
699 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
702 f2fs_update_inode(inode, node_page);
703 f2fs_put_page(node_page, 1);
706 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
708 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
710 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
711 inode->i_ino == F2FS_META_INO(sbi))
715 * atime could be updated without dirtying f2fs inode in lazytime mode
717 if (f2fs_is_time_consistent(inode) &&
718 !is_inode_flag_set(inode, FI_DIRTY_INODE))
721 if (!f2fs_is_checkpoint_ready(sbi))
725 * We need to balance fs here to prevent from producing dirty node pages
726 * during the urgent cleaning time when running out of free sections.
728 f2fs_update_inode_page(inode);
729 if (wbc && wbc->nr_to_write)
730 f2fs_balance_fs(sbi, true);
735 * Called at the last iput() if i_nlink is zero
737 void f2fs_evict_inode(struct inode *inode)
739 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
740 struct f2fs_inode_info *fi = F2FS_I(inode);
741 nid_t xnid = fi->i_xattr_nid;
744 f2fs_abort_atomic_write(inode, true);
747 clear_inode_flag(fi->cow_inode, FI_COW_FILE);
749 fi->cow_inode = NULL;
752 trace_f2fs_evict_inode(inode);
753 truncate_inode_pages_final(&inode->i_data);
755 if ((inode->i_nlink || is_bad_inode(inode)) &&
756 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
757 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
759 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
760 inode->i_ino == F2FS_META_INO(sbi) ||
761 inode->i_ino == F2FS_COMPRESS_INO(sbi))
764 f2fs_bug_on(sbi, get_dirty_pages(inode));
765 f2fs_remove_dirty_inode(inode);
767 f2fs_destroy_extent_tree(inode);
769 if (inode->i_nlink || is_bad_inode(inode))
772 err = f2fs_dquot_initialize(inode);
775 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
778 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
779 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
780 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
782 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
783 sb_start_intwrite(inode->i_sb);
784 set_inode_flag(inode, FI_NO_ALLOC);
785 i_size_write(inode, 0);
787 if (F2FS_HAS_BLOCKS(inode))
788 err = f2fs_truncate(inode);
790 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
791 f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
797 err = f2fs_remove_inode_page(inode);
799 if (err == -ENOENT) {
803 * in fuzzed image, another node may has the same
804 * block address as inode's, if it was truncated
805 * previously, truncation of inode node will fail.
807 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
808 f2fs_warn(F2FS_I_SB(inode),
809 "f2fs_evict_inode: inconsistent node id, ino:%lu",
811 f2fs_inode_synced(inode);
812 set_sbi_flag(sbi, SBI_NEED_FSCK);
817 /* give more chances, if ENOMEM case */
818 if (err == -ENOMEM) {
824 f2fs_update_inode_page(inode);
825 if (dquot_initialize_needed(inode))
826 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
828 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
829 sb_end_intwrite(inode->i_sb);
833 stat_dec_inline_xattr(inode);
834 stat_dec_inline_dir(inode);
835 stat_dec_inline_inode(inode);
836 stat_dec_compr_inode(inode);
837 stat_sub_compr_blocks(inode,
838 atomic_read(&fi->i_compr_blocks));
840 if (likely(!f2fs_cp_error(sbi) &&
841 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
842 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
844 f2fs_inode_synced(inode);
846 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
848 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
851 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
852 if (inode->i_nlink) {
853 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
854 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
855 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
856 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
858 if (is_inode_flag_set(inode, FI_FREE_NID)) {
859 f2fs_alloc_nid_failed(sbi, inode->i_ino);
860 clear_inode_flag(inode, FI_FREE_NID);
863 * If xattr nid is corrupted, we can reach out error condition,
864 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
865 * In that case, f2fs_check_nid_range() is enough to give a clue.
869 fscrypt_put_encryption_info(inode);
870 fsverity_cleanup_inode(inode);
874 /* caller should call f2fs_lock_op() */
875 void f2fs_handle_failed_inode(struct inode *inode)
877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
882 * clear nlink of inode in order to release resource of inode
888 * we must call this to avoid inode being remained as dirty, resulting
889 * in a panic when flushing dirty inodes in gdirty_list.
891 f2fs_update_inode_page(inode);
892 f2fs_inode_synced(inode);
894 /* don't make bad inode, since it becomes a regular file. */
895 unlock_new_inode(inode);
898 * Note: we should add inode to orphan list before f2fs_unlock_op()
899 * so we can prevent losing this orphan when encoutering checkpoint
900 * and following suddenly power-off.
902 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
904 set_sbi_flag(sbi, SBI_NEED_FSCK);
905 set_inode_flag(inode, FI_FREE_NID);
906 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
910 if (ni.blk_addr != NULL_ADDR) {
911 err = f2fs_acquire_orphan_inode(sbi);
913 set_sbi_flag(sbi, SBI_NEED_FSCK);
914 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
916 f2fs_add_orphan_inode(inode);
918 f2fs_alloc_nid_done(sbi, inode->i_ino);
920 set_inode_flag(inode, FI_FREE_NID);
926 /* iput will drop the inode object */