1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
35 #include <trace/events/f2fs.h>
36 #include <uapi/linux/f2fs.h>
38 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
40 struct inode *inode = file_inode(vmf->vma->vm_file);
43 ret = filemap_fault(vmf);
45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 struct page *page = vmf->page;
56 struct inode *inode = file_inode(vmf->vma->vm_file);
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct dnode_of_data dn;
59 bool need_alloc = true;
62 if (unlikely(IS_IMMUTABLE(inode)))
63 return VM_FAULT_SIGBUS;
65 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
66 return VM_FAULT_SIGBUS;
68 if (unlikely(f2fs_cp_error(sbi))) {
73 if (!f2fs_is_checkpoint_ready(sbi)) {
78 err = f2fs_convert_inline_inode(inode);
82 #ifdef CONFIG_F2FS_FS_COMPRESSION
83 if (f2fs_compressed_file(inode)) {
84 int ret = f2fs_is_compressed_cluster(inode, page->index);
94 /* should do out of any locked page */
96 f2fs_balance_fs(sbi, true);
98 sb_start_pagefault(inode->i_sb);
100 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
102 file_update_time(vmf->vma->vm_file);
103 filemap_invalidate_lock_shared(inode->i_mapping);
105 if (unlikely(page->mapping != inode->i_mapping ||
106 page_offset(page) > i_size_read(inode) ||
107 !PageUptodate(page))) {
114 /* block allocation */
115 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
116 set_new_dnode(&dn, inode, NULL, NULL, 0);
117 err = f2fs_get_block(&dn, page->index);
118 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
121 #ifdef CONFIG_F2FS_FS_COMPRESSION
123 set_new_dnode(&dn, inode, NULL, NULL, 0);
124 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
133 f2fs_wait_on_page_writeback(page, DATA, false, true);
135 /* wait for GCed page writeback via META_MAPPING */
136 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
139 * check to see if the page is mapped already (no holes)
141 if (PageMappedToDisk(page))
144 /* page is wholly or partially inside EOF */
145 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
146 i_size_read(inode)) {
149 offset = i_size_read(inode) & ~PAGE_MASK;
150 zero_user_segment(page, offset, PAGE_SIZE);
152 set_page_dirty(page);
153 if (!PageUptodate(page))
154 SetPageUptodate(page);
156 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
157 f2fs_update_time(sbi, REQ_TIME);
159 trace_f2fs_vm_page_mkwrite(page, DATA);
161 filemap_invalidate_unlock_shared(inode->i_mapping);
163 sb_end_pagefault(inode->i_sb);
165 return block_page_mkwrite_return(err);
168 static const struct vm_operations_struct f2fs_file_vm_ops = {
169 .fault = f2fs_filemap_fault,
170 .map_pages = filemap_map_pages,
171 .page_mkwrite = f2fs_vm_page_mkwrite,
174 static int get_parent_ino(struct inode *inode, nid_t *pino)
176 struct dentry *dentry;
179 * Make sure to get the non-deleted alias. The alias associated with
180 * the open file descriptor being fsync()'ed may be deleted already.
182 dentry = d_find_alias(inode);
186 *pino = parent_ino(dentry);
191 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
194 enum cp_reason_type cp_reason = CP_NO_NEEDED;
196 if (!S_ISREG(inode->i_mode))
197 cp_reason = CP_NON_REGULAR;
198 else if (f2fs_compressed_file(inode))
199 cp_reason = CP_COMPRESSED;
200 else if (inode->i_nlink != 1)
201 cp_reason = CP_HARDLINK;
202 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
203 cp_reason = CP_SB_NEED_CP;
204 else if (file_wrong_pino(inode))
205 cp_reason = CP_WRONG_PINO;
206 else if (!f2fs_space_for_roll_forward(sbi))
207 cp_reason = CP_NO_SPC_ROLL;
208 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
209 cp_reason = CP_NODE_NEED_CP;
210 else if (test_opt(sbi, FASTBOOT))
211 cp_reason = CP_FASTBOOT_MODE;
212 else if (F2FS_OPTION(sbi).active_logs == 2)
213 cp_reason = CP_SPEC_LOG_NUM;
214 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
215 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
216 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
218 cp_reason = CP_RECOVER_DIR;
223 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
225 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
227 /* But we need to avoid that there are some inode updates */
228 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
234 static void try_to_fix_pino(struct inode *inode)
236 struct f2fs_inode_info *fi = F2FS_I(inode);
239 down_write(&fi->i_sem);
240 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
241 get_parent_ino(inode, &pino)) {
242 f2fs_i_pino_write(inode, pino);
243 file_got_pino(inode);
245 up_write(&fi->i_sem);
248 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
249 int datasync, bool atomic)
251 struct inode *inode = file->f_mapping->host;
252 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
253 nid_t ino = inode->i_ino;
255 enum cp_reason_type cp_reason = 0;
256 struct writeback_control wbc = {
257 .sync_mode = WB_SYNC_ALL,
258 .nr_to_write = LONG_MAX,
261 unsigned int seq_id = 0;
263 if (unlikely(f2fs_readonly(inode->i_sb)))
266 trace_f2fs_sync_file_enter(inode);
268 if (S_ISDIR(inode->i_mode))
271 /* if fdatasync is triggered, let's do in-place-update */
272 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
273 set_inode_flag(inode, FI_NEED_IPU);
274 ret = file_write_and_wait_range(file, start, end);
275 clear_inode_flag(inode, FI_NEED_IPU);
277 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
278 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
282 /* if the inode is dirty, let's recover all the time */
283 if (!f2fs_skip_inode_update(inode, datasync)) {
284 f2fs_write_inode(inode, NULL);
289 * if there is no written data, don't waste time to write recovery info.
291 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
292 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
294 /* it may call write_inode just prior to fsync */
295 if (need_inode_page_update(sbi, ino))
298 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
299 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
304 * for OPU case, during fsync(), node can be persisted before
305 * data when lower device doesn't support write barrier, result
306 * in data corruption after SPO.
307 * So for strict fsync mode, force to use atomic write sematics
308 * to keep write order in between data/node and last node to
309 * avoid potential data corruption.
311 if (F2FS_OPTION(sbi).fsync_mode ==
312 FSYNC_MODE_STRICT && !atomic)
317 * Both of fdatasync() and fsync() are able to be recovered from
320 down_read(&F2FS_I(inode)->i_sem);
321 cp_reason = need_do_checkpoint(inode);
322 up_read(&F2FS_I(inode)->i_sem);
325 /* all the dirty node pages should be flushed for POR */
326 ret = f2fs_sync_fs(inode->i_sb, 1);
329 * We've secured consistency through sync_fs. Following pino
330 * will be used only for fsynced inodes after checkpoint.
332 try_to_fix_pino(inode);
333 clear_inode_flag(inode, FI_APPEND_WRITE);
334 clear_inode_flag(inode, FI_UPDATE_WRITE);
338 atomic_inc(&sbi->wb_sync_req[NODE]);
339 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
340 atomic_dec(&sbi->wb_sync_req[NODE]);
344 /* if cp_error was enabled, we should avoid infinite loop */
345 if (unlikely(f2fs_cp_error(sbi))) {
350 if (f2fs_need_inode_block_update(sbi, ino)) {
351 f2fs_mark_inode_dirty_sync(inode, true);
352 f2fs_write_inode(inode, NULL);
357 * If it's atomic_write, it's just fine to keep write ordering. So
358 * here we don't need to wait for node write completion, since we use
359 * node chain which serializes node blocks. If one of node writes are
360 * reordered, we can see simply broken chain, resulting in stopping
361 * roll-forward recovery. It means we'll recover all or none node blocks
365 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
370 /* once recovery info is written, don't need to tack this */
371 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
372 clear_inode_flag(inode, FI_APPEND_WRITE);
374 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
375 ret = f2fs_issue_flush(sbi, inode->i_ino);
377 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
378 clear_inode_flag(inode, FI_UPDATE_WRITE);
379 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
381 f2fs_update_time(sbi, REQ_TIME);
383 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
387 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
389 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
391 return f2fs_do_sync_file(file, start, end, datasync, false);
394 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
395 pgoff_t index, int whence)
399 if (__is_valid_data_blkaddr(blkaddr))
401 if (blkaddr == NEW_ADDR &&
402 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
406 if (blkaddr == NULL_ADDR)
413 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
415 struct inode *inode = file->f_mapping->host;
416 loff_t maxbytes = inode->i_sb->s_maxbytes;
417 struct dnode_of_data dn;
418 pgoff_t pgofs, end_offset;
419 loff_t data_ofs = offset;
425 isize = i_size_read(inode);
429 /* handle inline data case */
430 if (f2fs_has_inline_data(inode)) {
431 if (whence == SEEK_HOLE) {
434 } else if (whence == SEEK_DATA) {
440 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
442 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
443 set_new_dnode(&dn, inode, NULL, NULL, 0);
444 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
445 if (err && err != -ENOENT) {
447 } else if (err == -ENOENT) {
448 /* direct node does not exists */
449 if (whence == SEEK_DATA) {
450 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
457 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
459 /* find data/hole in dnode block */
460 for (; dn.ofs_in_node < end_offset;
461 dn.ofs_in_node++, pgofs++,
462 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
465 blkaddr = f2fs_data_blkaddr(&dn);
467 if (__is_valid_data_blkaddr(blkaddr) &&
468 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
469 blkaddr, DATA_GENERIC_ENHANCE)) {
474 if (__found_offset(file->f_mapping, blkaddr,
483 if (whence == SEEK_DATA)
486 if (whence == SEEK_HOLE && data_ofs > isize)
489 return vfs_setpos(file, data_ofs, maxbytes);
495 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
497 struct inode *inode = file->f_mapping->host;
498 loff_t maxbytes = inode->i_sb->s_maxbytes;
500 if (f2fs_compressed_file(inode))
501 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
507 return generic_file_llseek_size(file, offset, whence,
508 maxbytes, i_size_read(inode));
513 return f2fs_seek_block(file, offset, whence);
519 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
521 struct inode *inode = file_inode(file);
523 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
526 if (!f2fs_is_compress_backend_ready(inode))
530 vma->vm_ops = &f2fs_file_vm_ops;
531 set_inode_flag(inode, FI_MMAP_FILE);
535 static int f2fs_file_open(struct inode *inode, struct file *filp)
537 int err = fscrypt_file_open(inode, filp);
542 if (!f2fs_is_compress_backend_ready(inode))
545 err = fsverity_file_open(inode, filp);
549 filp->f_mode |= FMODE_NOWAIT;
551 return dquot_file_open(inode, filp);
554 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
556 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
557 struct f2fs_node *raw_node;
558 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
561 bool compressed_cluster = false;
562 int cluster_index = 0, valid_blocks = 0;
563 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
564 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
566 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
567 base = get_extra_isize(dn->inode);
569 raw_node = F2FS_NODE(dn->node_page);
570 addr = blkaddr_in_node(raw_node) + base + ofs;
572 /* Assumption: truncateion starts with cluster */
573 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
574 block_t blkaddr = le32_to_cpu(*addr);
576 if (f2fs_compressed_file(dn->inode) &&
577 !(cluster_index & (cluster_size - 1))) {
578 if (compressed_cluster)
579 f2fs_i_compr_blocks_update(dn->inode,
580 valid_blocks, false);
581 compressed_cluster = (blkaddr == COMPRESS_ADDR);
585 if (blkaddr == NULL_ADDR)
588 dn->data_blkaddr = NULL_ADDR;
589 f2fs_set_data_blkaddr(dn);
591 if (__is_valid_data_blkaddr(blkaddr)) {
592 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
593 DATA_GENERIC_ENHANCE))
595 if (compressed_cluster)
599 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
600 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
602 f2fs_invalidate_blocks(sbi, blkaddr);
604 if (!released || blkaddr != COMPRESS_ADDR)
608 if (compressed_cluster)
609 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
614 * once we invalidate valid blkaddr in range [ofs, ofs + count],
615 * we will invalidate all blkaddr in the whole range.
617 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
619 f2fs_update_extent_cache_range(dn, fofs, 0, len);
620 dec_valid_block_count(sbi, dn->inode, nr_free);
622 dn->ofs_in_node = ofs;
624 f2fs_update_time(sbi, REQ_TIME);
625 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
626 dn->ofs_in_node, nr_free);
629 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
631 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
634 static int truncate_partial_data_page(struct inode *inode, u64 from,
637 loff_t offset = from & (PAGE_SIZE - 1);
638 pgoff_t index = from >> PAGE_SHIFT;
639 struct address_space *mapping = inode->i_mapping;
642 if (!offset && !cache_only)
646 page = find_lock_page(mapping, index);
647 if (page && PageUptodate(page))
649 f2fs_put_page(page, 1);
653 page = f2fs_get_lock_data_page(inode, index, true);
655 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
657 f2fs_wait_on_page_writeback(page, DATA, true, true);
658 zero_user(page, offset, PAGE_SIZE - offset);
660 /* An encrypted inode should have a key and truncate the last page. */
661 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
663 set_page_dirty(page);
664 f2fs_put_page(page, 1);
668 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
670 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
671 struct dnode_of_data dn;
673 int count = 0, err = 0;
675 bool truncate_page = false;
677 trace_f2fs_truncate_blocks_enter(inode, from);
679 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
681 if (free_from >= max_file_blocks(inode))
687 ipage = f2fs_get_node_page(sbi, inode->i_ino);
689 err = PTR_ERR(ipage);
693 if (f2fs_has_inline_data(inode)) {
694 f2fs_truncate_inline_inode(inode, ipage, from);
695 f2fs_put_page(ipage, 1);
696 truncate_page = true;
700 set_new_dnode(&dn, inode, ipage, NULL, 0);
701 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
708 count = ADDRS_PER_PAGE(dn.node_page, inode);
710 count -= dn.ofs_in_node;
711 f2fs_bug_on(sbi, count < 0);
713 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
714 f2fs_truncate_data_blocks_range(&dn, count);
720 err = f2fs_truncate_inode_blocks(inode, free_from);
725 /* lastly zero out the first data page */
727 err = truncate_partial_data_page(inode, from, truncate_page);
729 trace_f2fs_truncate_blocks_exit(inode, err);
733 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
735 u64 free_from = from;
738 #ifdef CONFIG_F2FS_FS_COMPRESSION
740 * for compressed file, only support cluster size
741 * aligned truncation.
743 if (f2fs_compressed_file(inode))
744 free_from = round_up(from,
745 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
748 err = f2fs_do_truncate_blocks(inode, free_from, lock);
752 #ifdef CONFIG_F2FS_FS_COMPRESSION
754 * For compressed file, after release compress blocks, don't allow write
755 * direct, but we should allow write direct after truncate to zero.
757 if (f2fs_compressed_file(inode) && !free_from
758 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
759 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
761 if (from != free_from) {
762 err = f2fs_truncate_partial_cluster(inode, from, lock);
771 int f2fs_truncate(struct inode *inode)
775 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
778 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
779 S_ISLNK(inode->i_mode)))
782 trace_f2fs_truncate(inode);
784 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
785 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
789 err = f2fs_dquot_initialize(inode);
793 /* we should check inline_data size */
794 if (!f2fs_may_inline_data(inode)) {
795 err = f2fs_convert_inline_inode(inode);
800 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
804 inode->i_mtime = inode->i_ctime = current_time(inode);
805 f2fs_mark_inode_dirty_sync(inode, false);
809 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
810 struct kstat *stat, u32 request_mask, unsigned int query_flags)
812 struct inode *inode = d_inode(path->dentry);
813 struct f2fs_inode_info *fi = F2FS_I(inode);
814 struct f2fs_inode *ri;
817 if (f2fs_has_extra_attr(inode) &&
818 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
819 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
820 stat->result_mask |= STATX_BTIME;
821 stat->btime.tv_sec = fi->i_crtime.tv_sec;
822 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
826 if (flags & F2FS_COMPR_FL)
827 stat->attributes |= STATX_ATTR_COMPRESSED;
828 if (flags & F2FS_APPEND_FL)
829 stat->attributes |= STATX_ATTR_APPEND;
830 if (IS_ENCRYPTED(inode))
831 stat->attributes |= STATX_ATTR_ENCRYPTED;
832 if (flags & F2FS_IMMUTABLE_FL)
833 stat->attributes |= STATX_ATTR_IMMUTABLE;
834 if (flags & F2FS_NODUMP_FL)
835 stat->attributes |= STATX_ATTR_NODUMP;
836 if (IS_VERITY(inode))
837 stat->attributes |= STATX_ATTR_VERITY;
839 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
841 STATX_ATTR_ENCRYPTED |
842 STATX_ATTR_IMMUTABLE |
846 generic_fillattr(&init_user_ns, inode, stat);
848 /* we need to show initial sectors used for inline_data/dentries */
849 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
850 f2fs_has_inline_dentry(inode))
851 stat->blocks += (stat->size + 511) >> 9;
856 #ifdef CONFIG_F2FS_FS_POSIX_ACL
857 static void __setattr_copy(struct user_namespace *mnt_userns,
858 struct inode *inode, const struct iattr *attr)
860 unsigned int ia_valid = attr->ia_valid;
862 if (ia_valid & ATTR_UID)
863 inode->i_uid = attr->ia_uid;
864 if (ia_valid & ATTR_GID)
865 inode->i_gid = attr->ia_gid;
866 if (ia_valid & ATTR_ATIME)
867 inode->i_atime = attr->ia_atime;
868 if (ia_valid & ATTR_MTIME)
869 inode->i_mtime = attr->ia_mtime;
870 if (ia_valid & ATTR_CTIME)
871 inode->i_ctime = attr->ia_ctime;
872 if (ia_valid & ATTR_MODE) {
873 umode_t mode = attr->ia_mode;
874 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
876 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
878 set_acl_inode(inode, mode);
882 #define __setattr_copy setattr_copy
885 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
888 struct inode *inode = d_inode(dentry);
891 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
894 if (unlikely(IS_IMMUTABLE(inode)))
897 if (unlikely(IS_APPEND(inode) &&
898 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
899 ATTR_GID | ATTR_TIMES_SET))))
902 if ((attr->ia_valid & ATTR_SIZE) &&
903 !f2fs_is_compress_backend_ready(inode))
906 err = setattr_prepare(&init_user_ns, dentry, attr);
910 err = fscrypt_prepare_setattr(dentry, attr);
914 err = fsverity_prepare_setattr(dentry, attr);
918 if (is_quota_modification(inode, attr)) {
919 err = f2fs_dquot_initialize(inode);
923 if ((attr->ia_valid & ATTR_UID &&
924 !uid_eq(attr->ia_uid, inode->i_uid)) ||
925 (attr->ia_valid & ATTR_GID &&
926 !gid_eq(attr->ia_gid, inode->i_gid))) {
927 f2fs_lock_op(F2FS_I_SB(inode));
928 err = dquot_transfer(inode, attr);
930 set_sbi_flag(F2FS_I_SB(inode),
931 SBI_QUOTA_NEED_REPAIR);
932 f2fs_unlock_op(F2FS_I_SB(inode));
936 * update uid/gid under lock_op(), so that dquot and inode can
937 * be updated atomically.
939 if (attr->ia_valid & ATTR_UID)
940 inode->i_uid = attr->ia_uid;
941 if (attr->ia_valid & ATTR_GID)
942 inode->i_gid = attr->ia_gid;
943 f2fs_mark_inode_dirty_sync(inode, true);
944 f2fs_unlock_op(F2FS_I_SB(inode));
947 if (attr->ia_valid & ATTR_SIZE) {
948 loff_t old_size = i_size_read(inode);
950 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
952 * should convert inline inode before i_size_write to
953 * keep smaller than inline_data size with inline flag.
955 err = f2fs_convert_inline_inode(inode);
960 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
961 filemap_invalidate_lock(inode->i_mapping);
963 truncate_setsize(inode, attr->ia_size);
965 if (attr->ia_size <= old_size)
966 err = f2fs_truncate(inode);
968 * do not trim all blocks after i_size if target size is
969 * larger than i_size.
971 filemap_invalidate_unlock(inode->i_mapping);
972 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
976 spin_lock(&F2FS_I(inode)->i_size_lock);
977 inode->i_mtime = inode->i_ctime = current_time(inode);
978 F2FS_I(inode)->last_disk_size = i_size_read(inode);
979 spin_unlock(&F2FS_I(inode)->i_size_lock);
982 __setattr_copy(&init_user_ns, inode, attr);
984 if (attr->ia_valid & ATTR_MODE) {
985 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
987 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
989 inode->i_mode = F2FS_I(inode)->i_acl_mode;
990 clear_inode_flag(inode, FI_ACL_MODE);
994 /* file size may changed here */
995 f2fs_mark_inode_dirty_sync(inode, true);
997 /* inode change will produce dirty node pages flushed by checkpoint */
998 f2fs_balance_fs(F2FS_I_SB(inode), true);
1003 const struct inode_operations f2fs_file_inode_operations = {
1004 .getattr = f2fs_getattr,
1005 .setattr = f2fs_setattr,
1006 .get_acl = f2fs_get_acl,
1007 .set_acl = f2fs_set_acl,
1008 .listxattr = f2fs_listxattr,
1009 .fiemap = f2fs_fiemap,
1010 .fileattr_get = f2fs_fileattr_get,
1011 .fileattr_set = f2fs_fileattr_set,
1014 static int fill_zero(struct inode *inode, pgoff_t index,
1015 loff_t start, loff_t len)
1017 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1023 f2fs_balance_fs(sbi, true);
1026 page = f2fs_get_new_data_page(inode, NULL, index, false);
1027 f2fs_unlock_op(sbi);
1030 return PTR_ERR(page);
1032 f2fs_wait_on_page_writeback(page, DATA, true, true);
1033 zero_user(page, start, len);
1034 set_page_dirty(page);
1035 f2fs_put_page(page, 1);
1039 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1043 while (pg_start < pg_end) {
1044 struct dnode_of_data dn;
1045 pgoff_t end_offset, count;
1047 set_new_dnode(&dn, inode, NULL, NULL, 0);
1048 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1050 if (err == -ENOENT) {
1051 pg_start = f2fs_get_next_page_offset(&dn,
1058 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1059 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1061 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1063 f2fs_truncate_data_blocks_range(&dn, count);
1064 f2fs_put_dnode(&dn);
1071 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1073 pgoff_t pg_start, pg_end;
1074 loff_t off_start, off_end;
1077 ret = f2fs_convert_inline_inode(inode);
1081 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1082 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1084 off_start = offset & (PAGE_SIZE - 1);
1085 off_end = (offset + len) & (PAGE_SIZE - 1);
1087 if (pg_start == pg_end) {
1088 ret = fill_zero(inode, pg_start, off_start,
1089 off_end - off_start);
1094 ret = fill_zero(inode, pg_start++, off_start,
1095 PAGE_SIZE - off_start);
1100 ret = fill_zero(inode, pg_end, 0, off_end);
1105 if (pg_start < pg_end) {
1106 loff_t blk_start, blk_end;
1107 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109 f2fs_balance_fs(sbi, true);
1111 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1112 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1114 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1115 filemap_invalidate_lock(inode->i_mapping);
1117 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1120 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1121 f2fs_unlock_op(sbi);
1123 filemap_invalidate_unlock(inode->i_mapping);
1124 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1131 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1132 int *do_replace, pgoff_t off, pgoff_t len)
1134 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1135 struct dnode_of_data dn;
1139 set_new_dnode(&dn, inode, NULL, NULL, 0);
1140 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1141 if (ret && ret != -ENOENT) {
1143 } else if (ret == -ENOENT) {
1144 if (dn.max_level == 0)
1146 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1147 dn.ofs_in_node, len);
1153 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1154 dn.ofs_in_node, len);
1155 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1156 *blkaddr = f2fs_data_blkaddr(&dn);
1158 if (__is_valid_data_blkaddr(*blkaddr) &&
1159 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1160 DATA_GENERIC_ENHANCE)) {
1161 f2fs_put_dnode(&dn);
1162 return -EFSCORRUPTED;
1165 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1167 if (f2fs_lfs_mode(sbi)) {
1168 f2fs_put_dnode(&dn);
1172 /* do not invalidate this block address */
1173 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1177 f2fs_put_dnode(&dn);
1186 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1187 int *do_replace, pgoff_t off, int len)
1189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1190 struct dnode_of_data dn;
1193 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1194 if (*do_replace == 0)
1197 set_new_dnode(&dn, inode, NULL, NULL, 0);
1198 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1200 dec_valid_block_count(sbi, inode, 1);
1201 f2fs_invalidate_blocks(sbi, *blkaddr);
1203 f2fs_update_data_blkaddr(&dn, *blkaddr);
1205 f2fs_put_dnode(&dn);
1210 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1211 block_t *blkaddr, int *do_replace,
1212 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1214 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1219 if (blkaddr[i] == NULL_ADDR && !full) {
1224 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1225 struct dnode_of_data dn;
1226 struct node_info ni;
1230 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1231 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1235 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1237 f2fs_put_dnode(&dn);
1241 ilen = min((pgoff_t)
1242 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1243 dn.ofs_in_node, len - i);
1245 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1246 f2fs_truncate_data_blocks_range(&dn, 1);
1248 if (do_replace[i]) {
1249 f2fs_i_blocks_write(src_inode,
1251 f2fs_i_blocks_write(dst_inode,
1253 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1254 blkaddr[i], ni.version, true, false);
1260 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1261 if (dst_inode->i_size < new_size)
1262 f2fs_i_size_write(dst_inode, new_size);
1263 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1265 f2fs_put_dnode(&dn);
1267 struct page *psrc, *pdst;
1269 psrc = f2fs_get_lock_data_page(src_inode,
1272 return PTR_ERR(psrc);
1273 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1276 f2fs_put_page(psrc, 1);
1277 return PTR_ERR(pdst);
1279 f2fs_copy_page(psrc, pdst);
1280 set_page_dirty(pdst);
1281 f2fs_put_page(pdst, 1);
1282 f2fs_put_page(psrc, 1);
1284 ret = f2fs_truncate_hole(src_inode,
1285 src + i, src + i + 1);
1294 static int __exchange_data_block(struct inode *src_inode,
1295 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1296 pgoff_t len, bool full)
1298 block_t *src_blkaddr;
1304 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1306 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1307 array_size(olen, sizeof(block_t)),
1312 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1313 array_size(olen, sizeof(int)),
1316 kvfree(src_blkaddr);
1320 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1321 do_replace, src, olen);
1325 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1326 do_replace, src, dst, olen, full);
1334 kvfree(src_blkaddr);
1340 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1341 kvfree(src_blkaddr);
1346 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1348 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1349 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1350 pgoff_t start = offset >> PAGE_SHIFT;
1351 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1354 f2fs_balance_fs(sbi, true);
1356 /* avoid gc operation during block exchange */
1357 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1358 filemap_invalidate_lock(inode->i_mapping);
1361 f2fs_drop_extent_tree(inode);
1362 truncate_pagecache(inode, offset);
1363 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1364 f2fs_unlock_op(sbi);
1366 filemap_invalidate_unlock(inode->i_mapping);
1367 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1371 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1376 if (offset + len >= i_size_read(inode))
1379 /* collapse range should be aligned to block size of f2fs. */
1380 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1383 ret = f2fs_convert_inline_inode(inode);
1387 /* write out all dirty pages from offset */
1388 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1392 ret = f2fs_do_collapse(inode, offset, len);
1396 /* write out all moved pages, if possible */
1397 filemap_invalidate_lock(inode->i_mapping);
1398 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1399 truncate_pagecache(inode, offset);
1401 new_size = i_size_read(inode) - len;
1402 ret = f2fs_truncate_blocks(inode, new_size, true);
1403 filemap_invalidate_unlock(inode->i_mapping);
1405 f2fs_i_size_write(inode, new_size);
1409 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1412 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1413 pgoff_t index = start;
1414 unsigned int ofs_in_node = dn->ofs_in_node;
1418 for (; index < end; index++, dn->ofs_in_node++) {
1419 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1423 dn->ofs_in_node = ofs_in_node;
1424 ret = f2fs_reserve_new_blocks(dn, count);
1428 dn->ofs_in_node = ofs_in_node;
1429 for (index = start; index < end; index++, dn->ofs_in_node++) {
1430 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1432 * f2fs_reserve_new_blocks will not guarantee entire block
1435 if (dn->data_blkaddr == NULL_ADDR) {
1440 if (dn->data_blkaddr == NEW_ADDR)
1443 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1444 DATA_GENERIC_ENHANCE)) {
1445 ret = -EFSCORRUPTED;
1449 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1450 dn->data_blkaddr = NEW_ADDR;
1451 f2fs_set_data_blkaddr(dn);
1454 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1459 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1463 struct address_space *mapping = inode->i_mapping;
1464 pgoff_t index, pg_start, pg_end;
1465 loff_t new_size = i_size_read(inode);
1466 loff_t off_start, off_end;
1469 ret = inode_newsize_ok(inode, (len + offset));
1473 ret = f2fs_convert_inline_inode(inode);
1477 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1481 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1482 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1484 off_start = offset & (PAGE_SIZE - 1);
1485 off_end = (offset + len) & (PAGE_SIZE - 1);
1487 if (pg_start == pg_end) {
1488 ret = fill_zero(inode, pg_start, off_start,
1489 off_end - off_start);
1493 new_size = max_t(loff_t, new_size, offset + len);
1496 ret = fill_zero(inode, pg_start++, off_start,
1497 PAGE_SIZE - off_start);
1501 new_size = max_t(loff_t, new_size,
1502 (loff_t)pg_start << PAGE_SHIFT);
1505 for (index = pg_start; index < pg_end;) {
1506 struct dnode_of_data dn;
1507 unsigned int end_offset;
1510 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1511 filemap_invalidate_lock(mapping);
1513 truncate_pagecache_range(inode,
1514 (loff_t)index << PAGE_SHIFT,
1515 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1519 set_new_dnode(&dn, inode, NULL, NULL, 0);
1520 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1522 f2fs_unlock_op(sbi);
1523 filemap_invalidate_unlock(mapping);
1524 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1528 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1529 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1531 ret = f2fs_do_zero_range(&dn, index, end);
1532 f2fs_put_dnode(&dn);
1534 f2fs_unlock_op(sbi);
1535 filemap_invalidate_unlock(mapping);
1536 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1538 f2fs_balance_fs(sbi, dn.node_changed);
1544 new_size = max_t(loff_t, new_size,
1545 (loff_t)index << PAGE_SHIFT);
1549 ret = fill_zero(inode, pg_end, 0, off_end);
1553 new_size = max_t(loff_t, new_size, offset + len);
1558 if (new_size > i_size_read(inode)) {
1559 if (mode & FALLOC_FL_KEEP_SIZE)
1560 file_set_keep_isize(inode);
1562 f2fs_i_size_write(inode, new_size);
1567 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1569 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1570 struct address_space *mapping = inode->i_mapping;
1571 pgoff_t nr, pg_start, pg_end, delta, idx;
1575 new_size = i_size_read(inode) + len;
1576 ret = inode_newsize_ok(inode, new_size);
1580 if (offset >= i_size_read(inode))
1583 /* insert range should be aligned to block size of f2fs. */
1584 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1587 ret = f2fs_convert_inline_inode(inode);
1591 f2fs_balance_fs(sbi, true);
1593 filemap_invalidate_lock(mapping);
1594 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1595 filemap_invalidate_unlock(mapping);
1599 /* write out all dirty pages from offset */
1600 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1604 pg_start = offset >> PAGE_SHIFT;
1605 pg_end = (offset + len) >> PAGE_SHIFT;
1606 delta = pg_end - pg_start;
1607 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1609 /* avoid gc operation during block exchange */
1610 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1611 filemap_invalidate_lock(mapping);
1612 truncate_pagecache(inode, offset);
1614 while (!ret && idx > pg_start) {
1615 nr = idx - pg_start;
1621 f2fs_drop_extent_tree(inode);
1623 ret = __exchange_data_block(inode, inode, idx,
1624 idx + delta, nr, false);
1625 f2fs_unlock_op(sbi);
1627 filemap_invalidate_unlock(mapping);
1628 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1630 /* write out all moved pages, if possible */
1631 filemap_invalidate_lock(mapping);
1632 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1633 truncate_pagecache(inode, offset);
1634 filemap_invalidate_unlock(mapping);
1637 f2fs_i_size_write(inode, new_size);
1641 static int expand_inode_data(struct inode *inode, loff_t offset,
1642 loff_t len, int mode)
1644 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1645 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1646 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1647 .m_may_create = true };
1648 pgoff_t pg_start, pg_end;
1649 loff_t new_size = i_size_read(inode);
1651 block_t expanded = 0;
1654 err = inode_newsize_ok(inode, (len + offset));
1658 err = f2fs_convert_inline_inode(inode);
1662 f2fs_balance_fs(sbi, true);
1664 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1665 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1666 off_end = (offset + len) & (PAGE_SIZE - 1);
1668 map.m_lblk = pg_start;
1669 map.m_len = pg_end - pg_start;
1676 if (f2fs_is_pinned_file(inode)) {
1677 block_t sec_blks = BLKS_PER_SEC(sbi);
1678 block_t sec_len = roundup(map.m_len, sec_blks);
1680 map.m_len = sec_blks;
1682 if (has_not_enough_free_secs(sbi, 0,
1683 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1684 down_write(&sbi->gc_lock);
1685 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1686 if (err && err != -ENODATA && err != -EAGAIN)
1690 down_write(&sbi->pin_sem);
1693 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1694 f2fs_unlock_op(sbi);
1696 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1697 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1699 up_write(&sbi->pin_sem);
1701 expanded += map.m_len;
1702 sec_len -= map.m_len;
1703 map.m_lblk += map.m_len;
1704 if (!err && sec_len)
1707 map.m_len = expanded;
1709 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1710 expanded = map.m_len;
1719 last_off = pg_start + expanded - 1;
1721 /* update new size to the failed position */
1722 new_size = (last_off == pg_end) ? offset + len :
1723 (loff_t)(last_off + 1) << PAGE_SHIFT;
1725 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1728 if (new_size > i_size_read(inode)) {
1729 if (mode & FALLOC_FL_KEEP_SIZE)
1730 file_set_keep_isize(inode);
1732 f2fs_i_size_write(inode, new_size);
1738 static long f2fs_fallocate(struct file *file, int mode,
1739 loff_t offset, loff_t len)
1741 struct inode *inode = file_inode(file);
1744 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1746 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1748 if (!f2fs_is_compress_backend_ready(inode))
1751 /* f2fs only support ->fallocate for regular file */
1752 if (!S_ISREG(inode->i_mode))
1755 if (IS_ENCRYPTED(inode) &&
1756 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1759 if (f2fs_compressed_file(inode) &&
1760 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1761 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1764 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1765 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1766 FALLOC_FL_INSERT_RANGE))
1771 ret = file_modified(file);
1775 if (mode & FALLOC_FL_PUNCH_HOLE) {
1776 if (offset >= inode->i_size)
1779 ret = punch_hole(inode, offset, len);
1780 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1781 ret = f2fs_collapse_range(inode, offset, len);
1782 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1783 ret = f2fs_zero_range(inode, offset, len, mode);
1784 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1785 ret = f2fs_insert_range(inode, offset, len);
1787 ret = expand_inode_data(inode, offset, len, mode);
1791 inode->i_mtime = inode->i_ctime = current_time(inode);
1792 f2fs_mark_inode_dirty_sync(inode, false);
1793 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1797 inode_unlock(inode);
1799 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1803 static int f2fs_release_file(struct inode *inode, struct file *filp)
1806 * f2fs_relase_file is called at every close calls. So we should
1807 * not drop any inmemory pages by close called by other process.
1809 if (!(filp->f_mode & FMODE_WRITE) ||
1810 atomic_read(&inode->i_writecount) != 1)
1813 /* some remained atomic pages should discarded */
1814 if (f2fs_is_atomic_file(inode))
1815 f2fs_drop_inmem_pages(inode);
1816 if (f2fs_is_volatile_file(inode)) {
1817 set_inode_flag(inode, FI_DROP_CACHE);
1818 filemap_fdatawrite(inode->i_mapping);
1819 clear_inode_flag(inode, FI_DROP_CACHE);
1820 clear_inode_flag(inode, FI_VOLATILE_FILE);
1821 stat_dec_volatile_write(inode);
1826 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1828 struct inode *inode = file_inode(file);
1831 * If the process doing a transaction is crashed, we should do
1832 * roll-back. Otherwise, other reader/write can see corrupted database
1833 * until all the writers close its file. Since this should be done
1834 * before dropping file lock, it needs to do in ->flush.
1836 if (f2fs_is_atomic_file(inode) &&
1837 F2FS_I(inode)->inmem_task == current)
1838 f2fs_drop_inmem_pages(inode);
1842 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1844 struct f2fs_inode_info *fi = F2FS_I(inode);
1845 u32 masked_flags = fi->i_flags & mask;
1847 /* mask can be shrunk by flags_valid selector */
1850 /* Is it quota file? Do not allow user to mess with it */
1851 if (IS_NOQUOTA(inode))
1854 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1855 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1857 if (!f2fs_empty_dir(inode))
1861 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1862 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1864 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1868 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1869 if (masked_flags & F2FS_COMPR_FL) {
1870 if (!f2fs_disable_compressed_file(inode))
1873 if (!f2fs_may_compress(inode))
1875 if (S_ISREG(inode->i_mode) && inode->i_size)
1878 set_compress_context(inode);
1882 fi->i_flags = iflags | (fi->i_flags & ~mask);
1883 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1884 (fi->i_flags & F2FS_NOCOMP_FL));
1886 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1887 set_inode_flag(inode, FI_PROJ_INHERIT);
1889 clear_inode_flag(inode, FI_PROJ_INHERIT);
1891 inode->i_ctime = current_time(inode);
1892 f2fs_set_inode_flags(inode);
1893 f2fs_mark_inode_dirty_sync(inode, true);
1897 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1900 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1901 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1902 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1903 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1905 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1906 * FS_IOC_FSSETXATTR is done by the VFS.
1909 static const struct {
1912 } f2fs_fsflags_map[] = {
1913 { F2FS_COMPR_FL, FS_COMPR_FL },
1914 { F2FS_SYNC_FL, FS_SYNC_FL },
1915 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1916 { F2FS_APPEND_FL, FS_APPEND_FL },
1917 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1918 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1919 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1920 { F2FS_INDEX_FL, FS_INDEX_FL },
1921 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1922 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1923 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1926 #define F2FS_GETTABLE_FS_FL ( \
1936 FS_PROJINHERIT_FL | \
1938 FS_INLINE_DATA_FL | \
1943 #define F2FS_SETTABLE_FS_FL ( \
1952 FS_PROJINHERIT_FL | \
1955 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1956 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1961 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1962 if (iflags & f2fs_fsflags_map[i].iflag)
1963 fsflags |= f2fs_fsflags_map[i].fsflag;
1968 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1969 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1974 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1975 if (fsflags & f2fs_fsflags_map[i].fsflag)
1976 iflags |= f2fs_fsflags_map[i].iflag;
1981 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1983 struct inode *inode = file_inode(filp);
1985 return put_user(inode->i_generation, (int __user *)arg);
1988 static int f2fs_ioc_start_atomic_write(struct file *filp)
1990 struct inode *inode = file_inode(filp);
1991 struct f2fs_inode_info *fi = F2FS_I(inode);
1992 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1995 if (!inode_owner_or_capable(&init_user_ns, inode))
1998 if (!S_ISREG(inode->i_mode))
2001 if (filp->f_flags & O_DIRECT)
2004 ret = mnt_want_write_file(filp);
2010 if (!f2fs_disable_compressed_file(inode)) {
2015 if (f2fs_is_atomic_file(inode)) {
2016 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2021 ret = f2fs_convert_inline_inode(inode);
2025 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2028 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2029 * f2fs_is_atomic_file.
2031 if (get_dirty_pages(inode))
2032 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2033 inode->i_ino, get_dirty_pages(inode));
2034 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2036 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2040 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2041 if (list_empty(&fi->inmem_ilist))
2042 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2043 sbi->atomic_files++;
2044 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2046 /* add inode in inmem_list first and set atomic_file */
2047 set_inode_flag(inode, FI_ATOMIC_FILE);
2048 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2049 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2051 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2052 F2FS_I(inode)->inmem_task = current;
2053 stat_update_max_atomic_write(inode);
2055 inode_unlock(inode);
2056 mnt_drop_write_file(filp);
2060 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2062 struct inode *inode = file_inode(filp);
2065 if (!inode_owner_or_capable(&init_user_ns, inode))
2068 ret = mnt_want_write_file(filp);
2072 f2fs_balance_fs(F2FS_I_SB(inode), true);
2076 if (f2fs_is_volatile_file(inode)) {
2081 if (f2fs_is_atomic_file(inode)) {
2082 ret = f2fs_commit_inmem_pages(inode);
2086 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2088 f2fs_drop_inmem_pages(inode);
2090 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2093 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2094 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2097 inode_unlock(inode);
2098 mnt_drop_write_file(filp);
2102 static int f2fs_ioc_start_volatile_write(struct file *filp)
2104 struct inode *inode = file_inode(filp);
2107 if (!inode_owner_or_capable(&init_user_ns, inode))
2110 if (!S_ISREG(inode->i_mode))
2113 ret = mnt_want_write_file(filp);
2119 if (f2fs_is_volatile_file(inode))
2122 ret = f2fs_convert_inline_inode(inode);
2126 stat_inc_volatile_write(inode);
2127 stat_update_max_volatile_write(inode);
2129 set_inode_flag(inode, FI_VOLATILE_FILE);
2130 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2132 inode_unlock(inode);
2133 mnt_drop_write_file(filp);
2137 static int f2fs_ioc_release_volatile_write(struct file *filp)
2139 struct inode *inode = file_inode(filp);
2142 if (!inode_owner_or_capable(&init_user_ns, inode))
2145 ret = mnt_want_write_file(filp);
2151 if (!f2fs_is_volatile_file(inode))
2154 if (!f2fs_is_first_block_written(inode)) {
2155 ret = truncate_partial_data_page(inode, 0, true);
2159 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2161 inode_unlock(inode);
2162 mnt_drop_write_file(filp);
2166 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2168 struct inode *inode = file_inode(filp);
2171 if (!inode_owner_or_capable(&init_user_ns, inode))
2174 ret = mnt_want_write_file(filp);
2180 if (f2fs_is_atomic_file(inode))
2181 f2fs_drop_inmem_pages(inode);
2182 if (f2fs_is_volatile_file(inode)) {
2183 clear_inode_flag(inode, FI_VOLATILE_FILE);
2184 stat_dec_volatile_write(inode);
2185 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2188 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2190 inode_unlock(inode);
2192 mnt_drop_write_file(filp);
2193 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2197 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2199 struct inode *inode = file_inode(filp);
2200 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2201 struct super_block *sb = sbi->sb;
2205 if (!capable(CAP_SYS_ADMIN))
2208 if (get_user(in, (__u32 __user *)arg))
2211 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2212 ret = mnt_want_write_file(filp);
2214 if (ret == -EROFS) {
2216 f2fs_stop_checkpoint(sbi, false);
2217 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2218 trace_f2fs_shutdown(sbi, in, ret);
2225 case F2FS_GOING_DOWN_FULLSYNC:
2226 ret = freeze_bdev(sb->s_bdev);
2229 f2fs_stop_checkpoint(sbi, false);
2230 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2231 thaw_bdev(sb->s_bdev);
2233 case F2FS_GOING_DOWN_METASYNC:
2234 /* do checkpoint only */
2235 ret = f2fs_sync_fs(sb, 1);
2238 f2fs_stop_checkpoint(sbi, false);
2239 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2241 case F2FS_GOING_DOWN_NOSYNC:
2242 f2fs_stop_checkpoint(sbi, false);
2243 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2245 case F2FS_GOING_DOWN_METAFLUSH:
2246 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2247 f2fs_stop_checkpoint(sbi, false);
2248 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2250 case F2FS_GOING_DOWN_NEED_FSCK:
2251 set_sbi_flag(sbi, SBI_NEED_FSCK);
2252 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2253 set_sbi_flag(sbi, SBI_IS_DIRTY);
2254 /* do checkpoint only */
2255 ret = f2fs_sync_fs(sb, 1);
2262 f2fs_stop_gc_thread(sbi);
2263 f2fs_stop_discard_thread(sbi);
2265 f2fs_drop_discard_cmd(sbi);
2266 clear_opt(sbi, DISCARD);
2268 f2fs_update_time(sbi, REQ_TIME);
2270 if (in != F2FS_GOING_DOWN_FULLSYNC)
2271 mnt_drop_write_file(filp);
2273 trace_f2fs_shutdown(sbi, in, ret);
2278 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2280 struct inode *inode = file_inode(filp);
2281 struct super_block *sb = inode->i_sb;
2282 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2283 struct fstrim_range range;
2286 if (!capable(CAP_SYS_ADMIN))
2289 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2292 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2296 ret = mnt_want_write_file(filp);
2300 range.minlen = max((unsigned int)range.minlen,
2301 q->limits.discard_granularity);
2302 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2303 mnt_drop_write_file(filp);
2307 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2310 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2314 static bool uuid_is_nonzero(__u8 u[16])
2318 for (i = 0; i < 16; i++)
2324 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2326 struct inode *inode = file_inode(filp);
2328 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2331 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2333 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2336 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2338 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2340 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2343 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2345 struct inode *inode = file_inode(filp);
2346 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2349 if (!f2fs_sb_has_encrypt(sbi))
2352 err = mnt_want_write_file(filp);
2356 down_write(&sbi->sb_lock);
2358 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2361 /* update superblock with uuid */
2362 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2364 err = f2fs_commit_super(sbi, false);
2367 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2371 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2375 up_write(&sbi->sb_lock);
2376 mnt_drop_write_file(filp);
2380 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2383 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2386 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2389 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2391 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2394 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2397 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2399 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2402 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2405 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2408 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2411 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2414 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2417 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2420 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2423 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2425 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2428 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2431 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2433 struct inode *inode = file_inode(filp);
2434 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2438 if (!capable(CAP_SYS_ADMIN))
2441 if (get_user(sync, (__u32 __user *)arg))
2444 if (f2fs_readonly(sbi->sb))
2447 ret = mnt_want_write_file(filp);
2452 if (!down_write_trylock(&sbi->gc_lock)) {
2457 down_write(&sbi->gc_lock);
2460 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2462 mnt_drop_write_file(filp);
2466 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2468 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2472 if (!capable(CAP_SYS_ADMIN))
2474 if (f2fs_readonly(sbi->sb))
2477 end = range->start + range->len;
2478 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2479 end >= MAX_BLKADDR(sbi))
2482 ret = mnt_want_write_file(filp);
2488 if (!down_write_trylock(&sbi->gc_lock)) {
2493 down_write(&sbi->gc_lock);
2496 ret = f2fs_gc(sbi, range->sync, true, false,
2497 GET_SEGNO(sbi, range->start));
2503 range->start += BLKS_PER_SEC(sbi);
2504 if (range->start <= end)
2507 mnt_drop_write_file(filp);
2511 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2513 struct f2fs_gc_range range;
2515 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2518 return __f2fs_ioc_gc_range(filp, &range);
2521 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2523 struct inode *inode = file_inode(filp);
2524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2527 if (!capable(CAP_SYS_ADMIN))
2530 if (f2fs_readonly(sbi->sb))
2533 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2534 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2538 ret = mnt_want_write_file(filp);
2542 ret = f2fs_sync_fs(sbi->sb, 1);
2544 mnt_drop_write_file(filp);
2548 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2550 struct f2fs_defragment *range)
2552 struct inode *inode = file_inode(filp);
2553 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2554 .m_seg_type = NO_CHECK_TYPE,
2555 .m_may_create = false };
2556 struct extent_info ei = {0, 0, 0};
2557 pgoff_t pg_start, pg_end, next_pgofs;
2558 unsigned int blk_per_seg = sbi->blocks_per_seg;
2559 unsigned int total = 0, sec_num;
2560 block_t blk_end = 0;
2561 bool fragmented = false;
2564 /* if in-place-update policy is enabled, don't waste time here */
2565 if (f2fs_should_update_inplace(inode, NULL))
2568 pg_start = range->start >> PAGE_SHIFT;
2569 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2571 f2fs_balance_fs(sbi, true);
2575 /* writeback all dirty pages in the range */
2576 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2577 range->start + range->len - 1);
2582 * lookup mapping info in extent cache, skip defragmenting if physical
2583 * block addresses are continuous.
2585 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2586 if (ei.fofs + ei.len >= pg_end)
2590 map.m_lblk = pg_start;
2591 map.m_next_pgofs = &next_pgofs;
2594 * lookup mapping info in dnode page cache, skip defragmenting if all
2595 * physical block addresses are continuous even if there are hole(s)
2596 * in logical blocks.
2598 while (map.m_lblk < pg_end) {
2599 map.m_len = pg_end - map.m_lblk;
2600 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2604 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2605 map.m_lblk = next_pgofs;
2609 if (blk_end && blk_end != map.m_pblk)
2612 /* record total count of block that we're going to move */
2615 blk_end = map.m_pblk + map.m_len;
2617 map.m_lblk += map.m_len;
2625 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2628 * make sure there are enough free section for LFS allocation, this can
2629 * avoid defragment running in SSR mode when free section are allocated
2632 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2637 map.m_lblk = pg_start;
2638 map.m_len = pg_end - pg_start;
2641 while (map.m_lblk < pg_end) {
2646 map.m_len = pg_end - map.m_lblk;
2647 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2651 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2652 map.m_lblk = next_pgofs;
2656 set_inode_flag(inode, FI_DO_DEFRAG);
2659 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2662 page = f2fs_get_lock_data_page(inode, idx, true);
2664 err = PTR_ERR(page);
2668 set_page_dirty(page);
2669 set_page_private_gcing(page);
2670 f2fs_put_page(page, 1);
2679 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2682 clear_inode_flag(inode, FI_DO_DEFRAG);
2684 err = filemap_fdatawrite(inode->i_mapping);
2689 clear_inode_flag(inode, FI_DO_DEFRAG);
2691 inode_unlock(inode);
2693 range->len = (u64)total << PAGE_SHIFT;
2697 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2699 struct inode *inode = file_inode(filp);
2700 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2701 struct f2fs_defragment range;
2704 if (!capable(CAP_SYS_ADMIN))
2707 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2710 if (f2fs_readonly(sbi->sb))
2713 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2717 /* verify alignment of offset & size */
2718 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2721 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2722 max_file_blocks(inode)))
2725 err = mnt_want_write_file(filp);
2729 err = f2fs_defragment_range(sbi, filp, &range);
2730 mnt_drop_write_file(filp);
2732 f2fs_update_time(sbi, REQ_TIME);
2736 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2743 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2744 struct file *file_out, loff_t pos_out, size_t len)
2746 struct inode *src = file_inode(file_in);
2747 struct inode *dst = file_inode(file_out);
2748 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2749 size_t olen = len, dst_max_i_size = 0;
2753 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2754 src->i_sb != dst->i_sb)
2757 if (unlikely(f2fs_readonly(src->i_sb)))
2760 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2763 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2766 if (pos_out < 0 || pos_in < 0)
2770 if (pos_in == pos_out)
2772 if (pos_out > pos_in && pos_out < pos_in + len)
2779 if (!inode_trylock(dst))
2784 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2787 olen = len = src->i_size - pos_in;
2788 if (pos_in + len == src->i_size)
2789 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2795 dst_osize = dst->i_size;
2796 if (pos_out + olen > dst->i_size)
2797 dst_max_i_size = pos_out + olen;
2799 /* verify the end result is block aligned */
2800 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2801 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2802 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2805 ret = f2fs_convert_inline_inode(src);
2809 ret = f2fs_convert_inline_inode(dst);
2813 /* write out all dirty pages from offset */
2814 ret = filemap_write_and_wait_range(src->i_mapping,
2815 pos_in, pos_in + len);
2819 ret = filemap_write_and_wait_range(dst->i_mapping,
2820 pos_out, pos_out + len);
2824 f2fs_balance_fs(sbi, true);
2826 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2829 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2834 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2835 pos_out >> F2FS_BLKSIZE_BITS,
2836 len >> F2FS_BLKSIZE_BITS, false);
2840 f2fs_i_size_write(dst, dst_max_i_size);
2841 else if (dst_osize != dst->i_size)
2842 f2fs_i_size_write(dst, dst_osize);
2844 f2fs_unlock_op(sbi);
2847 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2849 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2858 static int __f2fs_ioc_move_range(struct file *filp,
2859 struct f2fs_move_range *range)
2864 if (!(filp->f_mode & FMODE_READ) ||
2865 !(filp->f_mode & FMODE_WRITE))
2868 dst = fdget(range->dst_fd);
2872 if (!(dst.file->f_mode & FMODE_WRITE)) {
2877 err = mnt_want_write_file(filp);
2881 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2882 range->pos_out, range->len);
2884 mnt_drop_write_file(filp);
2890 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2892 struct f2fs_move_range range;
2894 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2897 return __f2fs_ioc_move_range(filp, &range);
2900 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2902 struct inode *inode = file_inode(filp);
2903 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2904 struct sit_info *sm = SIT_I(sbi);
2905 unsigned int start_segno = 0, end_segno = 0;
2906 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2907 struct f2fs_flush_device range;
2910 if (!capable(CAP_SYS_ADMIN))
2913 if (f2fs_readonly(sbi->sb))
2916 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2919 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2923 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2924 __is_large_section(sbi)) {
2925 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2926 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2930 ret = mnt_want_write_file(filp);
2934 if (range.dev_num != 0)
2935 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2936 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2938 start_segno = sm->last_victim[FLUSH_DEVICE];
2939 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2940 start_segno = dev_start_segno;
2941 end_segno = min(start_segno + range.segments, dev_end_segno);
2943 while (start_segno < end_segno) {
2944 if (!down_write_trylock(&sbi->gc_lock)) {
2948 sm->last_victim[GC_CB] = end_segno + 1;
2949 sm->last_victim[GC_GREEDY] = end_segno + 1;
2950 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2951 ret = f2fs_gc(sbi, true, true, true, start_segno);
2959 mnt_drop_write_file(filp);
2963 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2965 struct inode *inode = file_inode(filp);
2966 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2968 /* Must validate to set it with SQLite behavior in Android. */
2969 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2971 return put_user(sb_feature, (u32 __user *)arg);
2975 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2977 struct dquot *transfer_to[MAXQUOTAS] = {};
2978 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2979 struct super_block *sb = sbi->sb;
2982 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2983 if (IS_ERR(transfer_to[PRJQUOTA]))
2984 return PTR_ERR(transfer_to[PRJQUOTA]);
2986 err = __dquot_transfer(inode, transfer_to);
2988 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2989 dqput(transfer_to[PRJQUOTA]);
2993 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2995 struct f2fs_inode_info *fi = F2FS_I(inode);
2996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3001 if (!f2fs_sb_has_project_quota(sbi)) {
3002 if (projid != F2FS_DEF_PROJID)
3008 if (!f2fs_has_extra_attr(inode))
3011 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3013 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3017 /* Is it quota file? Do not allow user to mess with it */
3018 if (IS_NOQUOTA(inode))
3021 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3023 return PTR_ERR(ipage);
3025 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3028 f2fs_put_page(ipage, 1);
3031 f2fs_put_page(ipage, 1);
3033 err = f2fs_dquot_initialize(inode);
3038 err = f2fs_transfer_project_quota(inode, kprojid);
3042 F2FS_I(inode)->i_projid = kprojid;
3043 inode->i_ctime = current_time(inode);
3044 f2fs_mark_inode_dirty_sync(inode, true);
3046 f2fs_unlock_op(sbi);
3050 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3055 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3057 if (projid != F2FS_DEF_PROJID)
3063 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3065 struct inode *inode = d_inode(dentry);
3066 struct f2fs_inode_info *fi = F2FS_I(inode);
3067 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3069 if (IS_ENCRYPTED(inode))
3070 fsflags |= FS_ENCRYPT_FL;
3071 if (IS_VERITY(inode))
3072 fsflags |= FS_VERITY_FL;
3073 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3074 fsflags |= FS_INLINE_DATA_FL;
3075 if (is_inode_flag_set(inode, FI_PIN_FILE))
3076 fsflags |= FS_NOCOW_FL;
3078 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3080 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3081 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3086 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3087 struct dentry *dentry, struct fileattr *fa)
3089 struct inode *inode = d_inode(dentry);
3090 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3094 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3096 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3098 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3100 fsflags &= F2FS_SETTABLE_FS_FL;
3101 if (!fa->flags_valid)
3102 mask &= FS_COMMON_FL;
3104 iflags = f2fs_fsflags_to_iflags(fsflags);
3105 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3108 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3110 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3115 int f2fs_pin_file_control(struct inode *inode, bool inc)
3117 struct f2fs_inode_info *fi = F2FS_I(inode);
3118 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3120 /* Use i_gc_failures for normal file as a risk signal. */
3122 f2fs_i_gc_failures_write(inode,
3123 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3125 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3126 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3127 __func__, inode->i_ino,
3128 fi->i_gc_failures[GC_FAILURE_PIN]);
3129 clear_inode_flag(inode, FI_PIN_FILE);
3135 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3137 struct inode *inode = file_inode(filp);
3141 if (get_user(pin, (__u32 __user *)arg))
3144 if (!S_ISREG(inode->i_mode))
3147 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3150 ret = mnt_want_write_file(filp);
3157 clear_inode_flag(inode, FI_PIN_FILE);
3158 f2fs_i_gc_failures_write(inode, 0);
3162 if (f2fs_should_update_outplace(inode, NULL)) {
3167 if (f2fs_pin_file_control(inode, false)) {
3172 ret = f2fs_convert_inline_inode(inode);
3176 if (!f2fs_disable_compressed_file(inode)) {
3181 set_inode_flag(inode, FI_PIN_FILE);
3182 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3184 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3186 inode_unlock(inode);
3187 mnt_drop_write_file(filp);
3191 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3193 struct inode *inode = file_inode(filp);
3196 if (is_inode_flag_set(inode, FI_PIN_FILE))
3197 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3198 return put_user(pin, (u32 __user *)arg);
3201 int f2fs_precache_extents(struct inode *inode)
3203 struct f2fs_inode_info *fi = F2FS_I(inode);
3204 struct f2fs_map_blocks map;
3205 pgoff_t m_next_extent;
3209 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3213 map.m_next_pgofs = NULL;
3214 map.m_next_extent = &m_next_extent;
3215 map.m_seg_type = NO_CHECK_TYPE;
3216 map.m_may_create = false;
3217 end = max_file_blocks(inode);
3219 while (map.m_lblk < end) {
3220 map.m_len = end - map.m_lblk;
3222 down_write(&fi->i_gc_rwsem[WRITE]);
3223 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3224 up_write(&fi->i_gc_rwsem[WRITE]);
3228 map.m_lblk = m_next_extent;
3234 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3236 return f2fs_precache_extents(file_inode(filp));
3239 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3241 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3244 if (!capable(CAP_SYS_ADMIN))
3247 if (f2fs_readonly(sbi->sb))
3250 if (copy_from_user(&block_count, (void __user *)arg,
3251 sizeof(block_count)))
3254 return f2fs_resize_fs(filp, block_count);
3257 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3259 struct inode *inode = file_inode(filp);
3261 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3263 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3264 f2fs_warn(F2FS_I_SB(inode),
3265 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3270 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3273 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3275 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3278 return fsverity_ioctl_measure(filp, (void __user *)arg);
3281 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3283 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3286 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3289 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3291 struct inode *inode = file_inode(filp);
3292 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3297 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3301 down_read(&sbi->sb_lock);
3302 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3303 ARRAY_SIZE(sbi->raw_super->volume_name),
3304 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3305 up_read(&sbi->sb_lock);
3307 if (copy_to_user((char __user *)arg, vbuf,
3308 min(FSLABEL_MAX, count)))
3315 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3317 struct inode *inode = file_inode(filp);
3318 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3322 if (!capable(CAP_SYS_ADMIN))
3325 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3327 return PTR_ERR(vbuf);
3329 err = mnt_want_write_file(filp);
3333 down_write(&sbi->sb_lock);
3335 memset(sbi->raw_super->volume_name, 0,
3336 sizeof(sbi->raw_super->volume_name));
3337 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3338 sbi->raw_super->volume_name,
3339 ARRAY_SIZE(sbi->raw_super->volume_name));
3341 err = f2fs_commit_super(sbi, false);
3343 up_write(&sbi->sb_lock);
3345 mnt_drop_write_file(filp);
3351 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3353 struct inode *inode = file_inode(filp);
3356 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3359 if (!f2fs_compressed_file(inode))
3362 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3363 return put_user(blocks, (u64 __user *)arg);
3366 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3368 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3369 unsigned int released_blocks = 0;
3370 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3374 for (i = 0; i < count; i++) {
3375 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3376 dn->ofs_in_node + i);
3378 if (!__is_valid_data_blkaddr(blkaddr))
3380 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3381 DATA_GENERIC_ENHANCE)))
3382 return -EFSCORRUPTED;
3386 int compr_blocks = 0;
3388 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3389 blkaddr = f2fs_data_blkaddr(dn);
3392 if (blkaddr == COMPRESS_ADDR)
3394 dn->ofs_in_node += cluster_size;
3398 if (__is_valid_data_blkaddr(blkaddr))
3401 if (blkaddr != NEW_ADDR)
3404 dn->data_blkaddr = NULL_ADDR;
3405 f2fs_set_data_blkaddr(dn);
3408 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3409 dec_valid_block_count(sbi, dn->inode,
3410 cluster_size - compr_blocks);
3412 released_blocks += cluster_size - compr_blocks;
3414 count -= cluster_size;
3417 return released_blocks;
3420 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3422 struct inode *inode = file_inode(filp);
3423 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3424 pgoff_t page_idx = 0, last_idx;
3425 unsigned int released_blocks = 0;
3429 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3432 if (!f2fs_compressed_file(inode))
3435 if (f2fs_readonly(sbi->sb))
3438 ret = mnt_want_write_file(filp);
3442 f2fs_balance_fs(F2FS_I_SB(inode), true);
3446 writecount = atomic_read(&inode->i_writecount);
3447 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3448 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3453 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3458 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3462 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3463 inode->i_ctime = current_time(inode);
3464 f2fs_mark_inode_dirty_sync(inode, true);
3466 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3469 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3470 filemap_invalidate_lock(inode->i_mapping);
3472 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3474 while (page_idx < last_idx) {
3475 struct dnode_of_data dn;
3476 pgoff_t end_offset, count;
3478 set_new_dnode(&dn, inode, NULL, NULL, 0);
3479 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3481 if (ret == -ENOENT) {
3482 page_idx = f2fs_get_next_page_offset(&dn,
3490 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3491 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3492 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3494 ret = release_compress_blocks(&dn, count);
3496 f2fs_put_dnode(&dn);
3502 released_blocks += ret;
3505 filemap_invalidate_unlock(inode->i_mapping);
3506 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3508 inode_unlock(inode);
3510 mnt_drop_write_file(filp);
3513 ret = put_user(released_blocks, (u64 __user *)arg);
3514 } else if (released_blocks &&
3515 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3516 set_sbi_flag(sbi, SBI_NEED_FSCK);
3517 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3518 "iblocks=%llu, released=%u, compr_blocks=%u, "
3520 __func__, inode->i_ino, inode->i_blocks,
3522 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3528 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3530 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3531 unsigned int reserved_blocks = 0;
3532 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3536 for (i = 0; i < count; i++) {
3537 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3538 dn->ofs_in_node + i);
3540 if (!__is_valid_data_blkaddr(blkaddr))
3542 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3543 DATA_GENERIC_ENHANCE)))
3544 return -EFSCORRUPTED;
3548 int compr_blocks = 0;
3552 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3553 blkaddr = f2fs_data_blkaddr(dn);
3556 if (blkaddr == COMPRESS_ADDR)
3558 dn->ofs_in_node += cluster_size;
3562 if (__is_valid_data_blkaddr(blkaddr)) {
3567 dn->data_blkaddr = NEW_ADDR;
3568 f2fs_set_data_blkaddr(dn);
3571 reserved = cluster_size - compr_blocks;
3572 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3576 if (reserved != cluster_size - compr_blocks)
3579 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3581 reserved_blocks += reserved;
3583 count -= cluster_size;
3586 return reserved_blocks;
3589 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3591 struct inode *inode = file_inode(filp);
3592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3593 pgoff_t page_idx = 0, last_idx;
3594 unsigned int reserved_blocks = 0;
3597 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3600 if (!f2fs_compressed_file(inode))
3603 if (f2fs_readonly(sbi->sb))
3606 ret = mnt_want_write_file(filp);
3610 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3613 f2fs_balance_fs(F2FS_I_SB(inode), true);
3617 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3622 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3623 filemap_invalidate_lock(inode->i_mapping);
3625 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3627 while (page_idx < last_idx) {
3628 struct dnode_of_data dn;
3629 pgoff_t end_offset, count;
3631 set_new_dnode(&dn, inode, NULL, NULL, 0);
3632 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3634 if (ret == -ENOENT) {
3635 page_idx = f2fs_get_next_page_offset(&dn,
3643 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3644 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3645 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3647 ret = reserve_compress_blocks(&dn, count);
3649 f2fs_put_dnode(&dn);
3655 reserved_blocks += ret;
3658 filemap_invalidate_unlock(inode->i_mapping);
3659 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3662 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3663 inode->i_ctime = current_time(inode);
3664 f2fs_mark_inode_dirty_sync(inode, true);
3667 inode_unlock(inode);
3669 mnt_drop_write_file(filp);
3672 ret = put_user(reserved_blocks, (u64 __user *)arg);
3673 } else if (reserved_blocks &&
3674 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3675 set_sbi_flag(sbi, SBI_NEED_FSCK);
3676 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3677 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3679 __func__, inode->i_ino, inode->i_blocks,
3681 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3687 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3688 pgoff_t off, block_t block, block_t len, u32 flags)
3690 struct request_queue *q = bdev_get_queue(bdev);
3691 sector_t sector = SECTOR_FROM_BLOCK(block);
3692 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3698 if (flags & F2FS_TRIM_FILE_DISCARD)
3699 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3700 blk_queue_secure_erase(q) ?
3701 BLKDEV_DISCARD_SECURE : 0);
3703 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3704 if (IS_ENCRYPTED(inode))
3705 ret = fscrypt_zeroout_range(inode, off, block, len);
3707 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3714 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3716 struct inode *inode = file_inode(filp);
3717 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3718 struct address_space *mapping = inode->i_mapping;
3719 struct block_device *prev_bdev = NULL;
3720 struct f2fs_sectrim_range range;
3721 pgoff_t index, pg_end, prev_index = 0;
3722 block_t prev_block = 0, len = 0;
3724 bool to_end = false;
3727 if (!(filp->f_mode & FMODE_WRITE))
3730 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3734 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3735 !S_ISREG(inode->i_mode))
3738 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3739 !f2fs_hw_support_discard(sbi)) ||
3740 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3741 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3744 file_start_write(filp);
3747 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3748 range.start >= inode->i_size) {
3756 if (inode->i_size - range.start > range.len) {
3757 end_addr = range.start + range.len;
3759 end_addr = range.len == (u64)-1 ?
3760 sbi->sb->s_maxbytes : inode->i_size;
3764 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3765 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3770 index = F2FS_BYTES_TO_BLK(range.start);
3771 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3773 ret = f2fs_convert_inline_inode(inode);
3777 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3778 filemap_invalidate_lock(mapping);
3780 ret = filemap_write_and_wait_range(mapping, range.start,
3781 to_end ? LLONG_MAX : end_addr - 1);
3785 truncate_inode_pages_range(mapping, range.start,
3786 to_end ? -1 : end_addr - 1);
3788 while (index < pg_end) {
3789 struct dnode_of_data dn;
3790 pgoff_t end_offset, count;
3793 set_new_dnode(&dn, inode, NULL, NULL, 0);
3794 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3796 if (ret == -ENOENT) {
3797 index = f2fs_get_next_page_offset(&dn, index);
3803 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3804 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3805 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3806 struct block_device *cur_bdev;
3807 block_t blkaddr = f2fs_data_blkaddr(&dn);
3809 if (!__is_valid_data_blkaddr(blkaddr))
3812 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3813 DATA_GENERIC_ENHANCE)) {
3814 ret = -EFSCORRUPTED;
3815 f2fs_put_dnode(&dn);
3819 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3820 if (f2fs_is_multi_device(sbi)) {
3821 int di = f2fs_target_device_index(sbi, blkaddr);
3823 blkaddr -= FDEV(di).start_blk;
3827 if (prev_bdev == cur_bdev &&
3828 index == prev_index + len &&
3829 blkaddr == prev_block + len) {
3832 ret = f2fs_secure_erase(prev_bdev,
3833 inode, prev_index, prev_block,
3836 f2fs_put_dnode(&dn);
3845 prev_bdev = cur_bdev;
3847 prev_block = blkaddr;
3852 f2fs_put_dnode(&dn);
3854 if (fatal_signal_pending(current)) {
3862 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3863 prev_block, len, range.flags);
3865 filemap_invalidate_unlock(mapping);
3866 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3868 inode_unlock(inode);
3869 file_end_write(filp);
3874 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3876 struct inode *inode = file_inode(filp);
3877 struct f2fs_comp_option option;
3879 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3882 inode_lock_shared(inode);
3884 if (!f2fs_compressed_file(inode)) {
3885 inode_unlock_shared(inode);
3889 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3890 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3892 inode_unlock_shared(inode);
3894 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3901 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3903 struct inode *inode = file_inode(filp);
3904 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3905 struct f2fs_comp_option option;
3908 if (!f2fs_sb_has_compression(sbi))
3911 if (!(filp->f_mode & FMODE_WRITE))
3914 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3918 if (!f2fs_compressed_file(inode) ||
3919 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3920 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3921 option.algorithm >= COMPRESS_MAX)
3924 file_start_write(filp);
3927 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3932 if (F2FS_HAS_BLOCKS(inode)) {
3937 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3938 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3939 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3940 f2fs_mark_inode_dirty_sync(inode, true);
3942 if (!f2fs_is_compress_backend_ready(inode))
3943 f2fs_warn(sbi, "compression algorithm is successfully set, "
3944 "but current kernel doesn't support this algorithm.");
3946 inode_unlock(inode);
3947 file_end_write(filp);
3952 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3954 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3955 struct address_space *mapping = inode->i_mapping;
3957 pgoff_t redirty_idx = page_idx;
3958 int i, page_len = 0, ret = 0;
3960 page_cache_ra_unbounded(&ractl, len, 0);
3962 for (i = 0; i < len; i++, page_idx++) {
3963 page = read_cache_page(mapping, page_idx, NULL, NULL);
3965 ret = PTR_ERR(page);
3971 for (i = 0; i < page_len; i++, redirty_idx++) {
3972 page = find_lock_page(mapping, redirty_idx);
3977 set_page_dirty(page);
3978 f2fs_put_page(page, 1);
3979 f2fs_put_page(page, 0);
3985 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3987 struct inode *inode = file_inode(filp);
3988 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3989 struct f2fs_inode_info *fi = F2FS_I(inode);
3990 pgoff_t page_idx = 0, last_idx;
3991 unsigned int blk_per_seg = sbi->blocks_per_seg;
3992 int cluster_size = F2FS_I(inode)->i_cluster_size;
3995 if (!f2fs_sb_has_compression(sbi) ||
3996 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3999 if (!(filp->f_mode & FMODE_WRITE))
4002 if (!f2fs_compressed_file(inode))
4005 f2fs_balance_fs(F2FS_I_SB(inode), true);
4007 file_start_write(filp);
4010 if (!f2fs_is_compress_backend_ready(inode)) {
4015 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4020 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4024 if (!atomic_read(&fi->i_compr_blocks))
4027 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4029 count = last_idx - page_idx;
4031 int len = min(cluster_size, count);
4033 ret = redirty_blocks(inode, page_idx, len);
4037 if (get_dirty_pages(inode) >= blk_per_seg)
4038 filemap_fdatawrite(inode->i_mapping);
4045 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4049 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4052 inode_unlock(inode);
4053 file_end_write(filp);
4058 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4060 struct inode *inode = file_inode(filp);
4061 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4062 pgoff_t page_idx = 0, last_idx;
4063 unsigned int blk_per_seg = sbi->blocks_per_seg;
4064 int cluster_size = F2FS_I(inode)->i_cluster_size;
4067 if (!f2fs_sb_has_compression(sbi) ||
4068 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4071 if (!(filp->f_mode & FMODE_WRITE))
4074 if (!f2fs_compressed_file(inode))
4077 f2fs_balance_fs(F2FS_I_SB(inode), true);
4079 file_start_write(filp);
4082 if (!f2fs_is_compress_backend_ready(inode)) {
4087 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4092 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4096 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4098 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4100 count = last_idx - page_idx;
4102 int len = min(cluster_size, count);
4104 ret = redirty_blocks(inode, page_idx, len);
4108 if (get_dirty_pages(inode) >= blk_per_seg)
4109 filemap_fdatawrite(inode->i_mapping);
4116 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4119 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4122 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4125 inode_unlock(inode);
4126 file_end_write(filp);
4131 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4134 case FS_IOC_GETVERSION:
4135 return f2fs_ioc_getversion(filp, arg);
4136 case F2FS_IOC_START_ATOMIC_WRITE:
4137 return f2fs_ioc_start_atomic_write(filp);
4138 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4139 return f2fs_ioc_commit_atomic_write(filp);
4140 case F2FS_IOC_START_VOLATILE_WRITE:
4141 return f2fs_ioc_start_volatile_write(filp);
4142 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4143 return f2fs_ioc_release_volatile_write(filp);
4144 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4145 return f2fs_ioc_abort_volatile_write(filp);
4146 case F2FS_IOC_SHUTDOWN:
4147 return f2fs_ioc_shutdown(filp, arg);
4149 return f2fs_ioc_fitrim(filp, arg);
4150 case FS_IOC_SET_ENCRYPTION_POLICY:
4151 return f2fs_ioc_set_encryption_policy(filp, arg);
4152 case FS_IOC_GET_ENCRYPTION_POLICY:
4153 return f2fs_ioc_get_encryption_policy(filp, arg);
4154 case FS_IOC_GET_ENCRYPTION_PWSALT:
4155 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4156 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4157 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4158 case FS_IOC_ADD_ENCRYPTION_KEY:
4159 return f2fs_ioc_add_encryption_key(filp, arg);
4160 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4161 return f2fs_ioc_remove_encryption_key(filp, arg);
4162 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4163 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4164 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4165 return f2fs_ioc_get_encryption_key_status(filp, arg);
4166 case FS_IOC_GET_ENCRYPTION_NONCE:
4167 return f2fs_ioc_get_encryption_nonce(filp, arg);
4168 case F2FS_IOC_GARBAGE_COLLECT:
4169 return f2fs_ioc_gc(filp, arg);
4170 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4171 return f2fs_ioc_gc_range(filp, arg);
4172 case F2FS_IOC_WRITE_CHECKPOINT:
4173 return f2fs_ioc_write_checkpoint(filp, arg);
4174 case F2FS_IOC_DEFRAGMENT:
4175 return f2fs_ioc_defragment(filp, arg);
4176 case F2FS_IOC_MOVE_RANGE:
4177 return f2fs_ioc_move_range(filp, arg);
4178 case F2FS_IOC_FLUSH_DEVICE:
4179 return f2fs_ioc_flush_device(filp, arg);
4180 case F2FS_IOC_GET_FEATURES:
4181 return f2fs_ioc_get_features(filp, arg);
4182 case F2FS_IOC_GET_PIN_FILE:
4183 return f2fs_ioc_get_pin_file(filp, arg);
4184 case F2FS_IOC_SET_PIN_FILE:
4185 return f2fs_ioc_set_pin_file(filp, arg);
4186 case F2FS_IOC_PRECACHE_EXTENTS:
4187 return f2fs_ioc_precache_extents(filp, arg);
4188 case F2FS_IOC_RESIZE_FS:
4189 return f2fs_ioc_resize_fs(filp, arg);
4190 case FS_IOC_ENABLE_VERITY:
4191 return f2fs_ioc_enable_verity(filp, arg);
4192 case FS_IOC_MEASURE_VERITY:
4193 return f2fs_ioc_measure_verity(filp, arg);
4194 case FS_IOC_READ_VERITY_METADATA:
4195 return f2fs_ioc_read_verity_metadata(filp, arg);
4196 case FS_IOC_GETFSLABEL:
4197 return f2fs_ioc_getfslabel(filp, arg);
4198 case FS_IOC_SETFSLABEL:
4199 return f2fs_ioc_setfslabel(filp, arg);
4200 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4201 return f2fs_get_compress_blocks(filp, arg);
4202 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4203 return f2fs_release_compress_blocks(filp, arg);
4204 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4205 return f2fs_reserve_compress_blocks(filp, arg);
4206 case F2FS_IOC_SEC_TRIM_FILE:
4207 return f2fs_sec_trim_file(filp, arg);
4208 case F2FS_IOC_GET_COMPRESS_OPTION:
4209 return f2fs_ioc_get_compress_option(filp, arg);
4210 case F2FS_IOC_SET_COMPRESS_OPTION:
4211 return f2fs_ioc_set_compress_option(filp, arg);
4212 case F2FS_IOC_DECOMPRESS_FILE:
4213 return f2fs_ioc_decompress_file(filp, arg);
4214 case F2FS_IOC_COMPRESS_FILE:
4215 return f2fs_ioc_compress_file(filp, arg);
4221 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4223 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4225 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4228 return __f2fs_ioctl(filp, cmd, arg);
4231 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4233 struct file *file = iocb->ki_filp;
4234 struct inode *inode = file_inode(file);
4237 if (!f2fs_is_compress_backend_ready(inode))
4240 ret = generic_file_read_iter(iocb, iter);
4243 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4248 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4250 struct file *file = iocb->ki_filp;
4251 struct inode *inode = file_inode(file);
4254 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4259 if (!f2fs_is_compress_backend_ready(inode)) {
4264 if (iocb->ki_flags & IOCB_NOWAIT) {
4265 if (!inode_trylock(inode)) {
4273 if (unlikely(IS_IMMUTABLE(inode))) {
4278 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4283 ret = generic_write_checks(iocb, from);
4285 bool preallocated = false;
4286 size_t target_size = 0;
4289 if (fault_in_iov_iter_readable(from, iov_iter_count(from)))
4290 set_inode_flag(inode, FI_NO_PREALLOC);
4292 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4293 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4294 iov_iter_count(from)) ||
4295 f2fs_has_inline_data(inode) ||
4296 f2fs_force_buffered_io(inode, iocb, from)) {
4297 clear_inode_flag(inode, FI_NO_PREALLOC);
4298 inode_unlock(inode);
4305 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4308 if (iocb->ki_flags & IOCB_DIRECT) {
4310 * Convert inline data for Direct I/O before entering
4313 err = f2fs_convert_inline_inode(inode);
4317 * If force_buffere_io() is true, we have to allocate
4318 * blocks all the time, since f2fs_direct_IO will fall
4319 * back to buffered IO.
4321 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4322 f2fs_lfs_mode(F2FS_I_SB(inode)))
4325 preallocated = true;
4326 target_size = iocb->ki_pos + iov_iter_count(from);
4328 err = f2fs_preallocate_blocks(iocb, from);
4331 clear_inode_flag(inode, FI_NO_PREALLOC);
4332 inode_unlock(inode);
4337 ret = __generic_file_write_iter(iocb, from);
4338 clear_inode_flag(inode, FI_NO_PREALLOC);
4340 /* if we couldn't write data, we should deallocate blocks. */
4341 if (preallocated && i_size_read(inode) < target_size) {
4342 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4343 filemap_invalidate_lock(inode->i_mapping);
4344 f2fs_truncate(inode);
4345 filemap_invalidate_unlock(inode->i_mapping);
4346 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4350 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4353 inode_unlock(inode);
4355 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4356 iov_iter_count(from), ret);
4358 ret = generic_write_sync(iocb, ret);
4362 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4365 struct inode *inode;
4366 struct address_space *mapping;
4367 struct backing_dev_info *bdi;
4369 if (advice == POSIX_FADV_SEQUENTIAL) {
4370 inode = file_inode(filp);
4371 if (S_ISFIFO(inode->i_mode))
4374 mapping = filp->f_mapping;
4375 if (!mapping || len < 0)
4378 bdi = inode_to_bdi(mapping->host);
4379 filp->f_ra.ra_pages = bdi->ra_pages *
4380 F2FS_I_SB(inode)->seq_file_ra_mul;
4381 spin_lock(&filp->f_lock);
4382 filp->f_mode &= ~FMODE_RANDOM;
4383 spin_unlock(&filp->f_lock);
4387 return generic_fadvise(filp, offset, len, advice);
4390 #ifdef CONFIG_COMPAT
4391 struct compat_f2fs_gc_range {
4396 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4397 struct compat_f2fs_gc_range)
4399 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4401 struct compat_f2fs_gc_range __user *urange;
4402 struct f2fs_gc_range range;
4405 urange = compat_ptr(arg);
4406 err = get_user(range.sync, &urange->sync);
4407 err |= get_user(range.start, &urange->start);
4408 err |= get_user(range.len, &urange->len);
4412 return __f2fs_ioc_gc_range(file, &range);
4415 struct compat_f2fs_move_range {
4421 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4422 struct compat_f2fs_move_range)
4424 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4426 struct compat_f2fs_move_range __user *urange;
4427 struct f2fs_move_range range;
4430 urange = compat_ptr(arg);
4431 err = get_user(range.dst_fd, &urange->dst_fd);
4432 err |= get_user(range.pos_in, &urange->pos_in);
4433 err |= get_user(range.pos_out, &urange->pos_out);
4434 err |= get_user(range.len, &urange->len);
4438 return __f2fs_ioc_move_range(file, &range);
4441 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4443 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4445 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4449 case FS_IOC32_GETVERSION:
4450 cmd = FS_IOC_GETVERSION;
4452 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4453 return f2fs_compat_ioc_gc_range(file, arg);
4454 case F2FS_IOC32_MOVE_RANGE:
4455 return f2fs_compat_ioc_move_range(file, arg);
4456 case F2FS_IOC_START_ATOMIC_WRITE:
4457 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4458 case F2FS_IOC_START_VOLATILE_WRITE:
4459 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4460 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4461 case F2FS_IOC_SHUTDOWN:
4463 case FS_IOC_SET_ENCRYPTION_POLICY:
4464 case FS_IOC_GET_ENCRYPTION_PWSALT:
4465 case FS_IOC_GET_ENCRYPTION_POLICY:
4466 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4467 case FS_IOC_ADD_ENCRYPTION_KEY:
4468 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4469 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4470 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4471 case FS_IOC_GET_ENCRYPTION_NONCE:
4472 case F2FS_IOC_GARBAGE_COLLECT:
4473 case F2FS_IOC_WRITE_CHECKPOINT:
4474 case F2FS_IOC_DEFRAGMENT:
4475 case F2FS_IOC_FLUSH_DEVICE:
4476 case F2FS_IOC_GET_FEATURES:
4477 case F2FS_IOC_GET_PIN_FILE:
4478 case F2FS_IOC_SET_PIN_FILE:
4479 case F2FS_IOC_PRECACHE_EXTENTS:
4480 case F2FS_IOC_RESIZE_FS:
4481 case FS_IOC_ENABLE_VERITY:
4482 case FS_IOC_MEASURE_VERITY:
4483 case FS_IOC_READ_VERITY_METADATA:
4484 case FS_IOC_GETFSLABEL:
4485 case FS_IOC_SETFSLABEL:
4486 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4487 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4488 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4489 case F2FS_IOC_SEC_TRIM_FILE:
4490 case F2FS_IOC_GET_COMPRESS_OPTION:
4491 case F2FS_IOC_SET_COMPRESS_OPTION:
4492 case F2FS_IOC_DECOMPRESS_FILE:
4493 case F2FS_IOC_COMPRESS_FILE:
4496 return -ENOIOCTLCMD;
4498 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4502 const struct file_operations f2fs_file_operations = {
4503 .llseek = f2fs_llseek,
4504 .read_iter = f2fs_file_read_iter,
4505 .write_iter = f2fs_file_write_iter,
4506 .open = f2fs_file_open,
4507 .release = f2fs_release_file,
4508 .mmap = f2fs_file_mmap,
4509 .flush = f2fs_file_flush,
4510 .fsync = f2fs_sync_file,
4511 .fallocate = f2fs_fallocate,
4512 .unlocked_ioctl = f2fs_ioctl,
4513 #ifdef CONFIG_COMPAT
4514 .compat_ioctl = f2fs_compat_ioctl,
4516 .splice_read = generic_file_splice_read,
4517 .splice_write = iter_file_splice_write,
4518 .fadvise = f2fs_file_fadvise,