1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
38 struct inode *inode = file_inode(vmf->vma->vm_file);
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
45 if (ret & VM_FAULT_LOCKED)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
66 if (unlikely(f2fs_cp_error(sbi))) {
71 if (!f2fs_is_checkpoint_ready(sbi)) {
76 #ifdef CONFIG_F2FS_FS_COMPRESSION
77 if (f2fs_compressed_file(inode)) {
78 int ret = f2fs_is_compressed_cluster(inode, page->index);
84 if (ret < F2FS_I(inode)->i_cluster_size) {
92 /* should do out of any locked page */
94 f2fs_balance_fs(sbi, true);
96 sb_start_pagefault(inode->i_sb);
98 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
100 file_update_time(vmf->vma->vm_file);
101 down_read(&F2FS_I(inode)->i_mmap_sem);
103 if (unlikely(page->mapping != inode->i_mapping ||
104 page_offset(page) > i_size_read(inode) ||
105 !PageUptodate(page))) {
112 /* block allocation */
113 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
114 set_new_dnode(&dn, inode, NULL, NULL, 0);
115 err = f2fs_get_block(&dn, page->index);
117 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
120 #ifdef CONFIG_F2FS_FS_COMPRESSION
122 set_new_dnode(&dn, inode, NULL, NULL, 0);
123 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
132 f2fs_wait_on_page_writeback(page, DATA, false, true);
134 /* wait for GCed page writeback via META_MAPPING */
135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138 * check to see if the page is mapped already (no holes)
140 if (PageMappedToDisk(page))
143 /* page is wholly or partially inside EOF */
144 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
145 i_size_read(inode)) {
148 offset = i_size_read(inode) & ~PAGE_MASK;
149 zero_user_segment(page, offset, PAGE_SIZE);
151 set_page_dirty(page);
152 if (!PageUptodate(page))
153 SetPageUptodate(page);
155 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
156 f2fs_update_time(sbi, REQ_TIME);
158 trace_f2fs_vm_page_mkwrite(page, DATA);
160 up_read(&F2FS_I(inode)->i_mmap_sem);
162 sb_end_pagefault(inode->i_sb);
164 return block_page_mkwrite_return(err);
167 static const struct vm_operations_struct f2fs_file_vm_ops = {
168 .fault = f2fs_filemap_fault,
169 .map_pages = filemap_map_pages,
170 .page_mkwrite = f2fs_vm_page_mkwrite,
173 static int get_parent_ino(struct inode *inode, nid_t *pino)
175 struct dentry *dentry;
178 * Make sure to get the non-deleted alias. The alias associated with
179 * the open file descriptor being fsync()'ed may be deleted already.
181 dentry = d_find_alias(inode);
185 *pino = parent_ino(dentry);
190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 enum cp_reason_type cp_reason = CP_NO_NEEDED;
195 if (!S_ISREG(inode->i_mode))
196 cp_reason = CP_NON_REGULAR;
197 else if (f2fs_compressed_file(inode))
198 cp_reason = CP_COMPRESSED;
199 else if (inode->i_nlink != 1)
200 cp_reason = CP_HARDLINK;
201 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
202 cp_reason = CP_SB_NEED_CP;
203 else if (file_wrong_pino(inode))
204 cp_reason = CP_WRONG_PINO;
205 else if (!f2fs_space_for_roll_forward(sbi))
206 cp_reason = CP_NO_SPC_ROLL;
207 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
208 cp_reason = CP_NODE_NEED_CP;
209 else if (test_opt(sbi, FASTBOOT))
210 cp_reason = CP_FASTBOOT_MODE;
211 else if (F2FS_OPTION(sbi).active_logs == 2)
212 cp_reason = CP_SPEC_LOG_NUM;
213 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
214 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
217 cp_reason = CP_RECOVER_DIR;
222 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
224 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
226 /* But we need to avoid that there are some inode updates */
227 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
233 static void try_to_fix_pino(struct inode *inode)
235 struct f2fs_inode_info *fi = F2FS_I(inode);
238 down_write(&fi->i_sem);
239 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 get_parent_ino(inode, &pino)) {
241 f2fs_i_pino_write(inode, pino);
242 file_got_pino(inode);
244 up_write(&fi->i_sem);
247 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 int datasync, bool atomic)
250 struct inode *inode = file->f_mapping->host;
251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 nid_t ino = inode->i_ino;
254 enum cp_reason_type cp_reason = 0;
255 struct writeback_control wbc = {
256 .sync_mode = WB_SYNC_ALL,
257 .nr_to_write = LONG_MAX,
260 unsigned int seq_id = 0;
262 if (unlikely(f2fs_readonly(inode->i_sb)))
265 trace_f2fs_sync_file_enter(inode);
267 if (S_ISDIR(inode->i_mode))
270 /* if fdatasync is triggered, let's do in-place-update */
271 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 set_inode_flag(inode, FI_NEED_IPU);
273 ret = file_write_and_wait_range(file, start, end);
274 clear_inode_flag(inode, FI_NEED_IPU);
276 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
277 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
281 /* if the inode is dirty, let's recover all the time */
282 if (!f2fs_skip_inode_update(inode, datasync)) {
283 f2fs_write_inode(inode, NULL);
288 * if there is no written data, don't waste time to write recovery info.
290 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
293 /* it may call write_inode just prior to fsync */
294 if (need_inode_page_update(sbi, ino))
297 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
304 * Both of fdatasync() and fsync() are able to be recovered from
307 down_read(&F2FS_I(inode)->i_sem);
308 cp_reason = need_do_checkpoint(inode);
309 up_read(&F2FS_I(inode)->i_sem);
312 /* all the dirty node pages should be flushed for POR */
313 ret = f2fs_sync_fs(inode->i_sb, 1);
316 * We've secured consistency through sync_fs. Following pino
317 * will be used only for fsynced inodes after checkpoint.
319 try_to_fix_pino(inode);
320 clear_inode_flag(inode, FI_APPEND_WRITE);
321 clear_inode_flag(inode, FI_UPDATE_WRITE);
325 atomic_inc(&sbi->wb_sync_req[NODE]);
326 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
327 atomic_dec(&sbi->wb_sync_req[NODE]);
331 /* if cp_error was enabled, we should avoid infinite loop */
332 if (unlikely(f2fs_cp_error(sbi))) {
337 if (f2fs_need_inode_block_update(sbi, ino)) {
338 f2fs_mark_inode_dirty_sync(inode, true);
339 f2fs_write_inode(inode, NULL);
344 * If it's atomic_write, it's just fine to keep write ordering. So
345 * here we don't need to wait for node write completion, since we use
346 * node chain which serializes node blocks. If one of node writes are
347 * reordered, we can see simply broken chain, resulting in stopping
348 * roll-forward recovery. It means we'll recover all or none node blocks
352 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
357 /* once recovery info is written, don't need to tack this */
358 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
359 clear_inode_flag(inode, FI_APPEND_WRITE);
361 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
362 ret = f2fs_issue_flush(sbi, inode->i_ino);
364 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
365 clear_inode_flag(inode, FI_UPDATE_WRITE);
366 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
368 f2fs_update_time(sbi, REQ_TIME);
370 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
371 f2fs_trace_ios(NULL, 1);
375 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
377 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
379 return f2fs_do_sync_file(file, start, end, datasync, false);
382 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
383 pgoff_t index, int whence)
387 if (__is_valid_data_blkaddr(blkaddr))
389 if (blkaddr == NEW_ADDR &&
390 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
394 if (blkaddr == NULL_ADDR)
401 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
403 struct inode *inode = file->f_mapping->host;
404 loff_t maxbytes = inode->i_sb->s_maxbytes;
405 struct dnode_of_data dn;
406 pgoff_t pgofs, end_offset;
407 loff_t data_ofs = offset;
413 isize = i_size_read(inode);
417 /* handle inline data case */
418 if (f2fs_has_inline_data(inode)) {
419 if (whence == SEEK_HOLE) {
422 } else if (whence == SEEK_DATA) {
428 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
430 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
431 set_new_dnode(&dn, inode, NULL, NULL, 0);
432 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
433 if (err && err != -ENOENT) {
435 } else if (err == -ENOENT) {
436 /* direct node does not exists */
437 if (whence == SEEK_DATA) {
438 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
445 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
447 /* find data/hole in dnode block */
448 for (; dn.ofs_in_node < end_offset;
449 dn.ofs_in_node++, pgofs++,
450 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
453 blkaddr = f2fs_data_blkaddr(&dn);
455 if (__is_valid_data_blkaddr(blkaddr) &&
456 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
457 blkaddr, DATA_GENERIC_ENHANCE)) {
462 if (__found_offset(file->f_mapping, blkaddr,
471 if (whence == SEEK_DATA)
474 if (whence == SEEK_HOLE && data_ofs > isize)
477 return vfs_setpos(file, data_ofs, maxbytes);
483 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
485 struct inode *inode = file->f_mapping->host;
486 loff_t maxbytes = inode->i_sb->s_maxbytes;
492 return generic_file_llseek_size(file, offset, whence,
493 maxbytes, i_size_read(inode));
498 return f2fs_seek_block(file, offset, whence);
504 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
506 struct inode *inode = file_inode(file);
509 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
512 if (!f2fs_is_compress_backend_ready(inode))
515 /* we don't need to use inline_data strictly */
516 err = f2fs_convert_inline_inode(inode);
521 vma->vm_ops = &f2fs_file_vm_ops;
522 set_inode_flag(inode, FI_MMAP_FILE);
526 static int f2fs_file_open(struct inode *inode, struct file *filp)
528 int err = fscrypt_file_open(inode, filp);
533 if (!f2fs_is_compress_backend_ready(inode))
536 err = fsverity_file_open(inode, filp);
540 filp->f_mode |= FMODE_NOWAIT;
542 return dquot_file_open(inode, filp);
545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
547 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 struct f2fs_node *raw_node;
549 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
552 bool compressed_cluster = false;
553 int cluster_index = 0, valid_blocks = 0;
554 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
557 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 base = get_extra_isize(dn->inode);
560 raw_node = F2FS_NODE(dn->node_page);
561 addr = blkaddr_in_node(raw_node) + base + ofs;
563 /* Assumption: truncateion starts with cluster */
564 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 block_t blkaddr = le32_to_cpu(*addr);
567 if (f2fs_compressed_file(dn->inode) &&
568 !(cluster_index & (cluster_size - 1))) {
569 if (compressed_cluster)
570 f2fs_i_compr_blocks_update(dn->inode,
571 valid_blocks, false);
572 compressed_cluster = (blkaddr == COMPRESS_ADDR);
576 if (blkaddr == NULL_ADDR)
579 dn->data_blkaddr = NULL_ADDR;
580 f2fs_set_data_blkaddr(dn);
582 if (__is_valid_data_blkaddr(blkaddr)) {
583 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 DATA_GENERIC_ENHANCE))
586 if (compressed_cluster)
590 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
593 f2fs_invalidate_blocks(sbi, blkaddr);
595 if (!released || blkaddr != COMPRESS_ADDR)
599 if (compressed_cluster)
600 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
605 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 * we will invalidate all blkaddr in the whole range.
608 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
610 f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 dec_valid_block_count(sbi, dn->inode, nr_free);
613 dn->ofs_in_node = ofs;
615 f2fs_update_time(sbi, REQ_TIME);
616 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 dn->ofs_in_node, nr_free);
620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
622 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
625 static int truncate_partial_data_page(struct inode *inode, u64 from,
628 loff_t offset = from & (PAGE_SIZE - 1);
629 pgoff_t index = from >> PAGE_SHIFT;
630 struct address_space *mapping = inode->i_mapping;
633 if (!offset && !cache_only)
637 page = find_lock_page(mapping, index);
638 if (page && PageUptodate(page))
640 f2fs_put_page(page, 1);
644 page = f2fs_get_lock_data_page(inode, index, true);
646 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
648 f2fs_wait_on_page_writeback(page, DATA, true, true);
649 zero_user(page, offset, PAGE_SIZE - offset);
651 /* An encrypted inode should have a key and truncate the last page. */
652 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
654 set_page_dirty(page);
655 f2fs_put_page(page, 1);
659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 struct dnode_of_data dn;
664 int count = 0, err = 0;
666 bool truncate_page = false;
668 trace_f2fs_truncate_blocks_enter(inode, from);
670 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
672 if (free_from >= sbi->max_file_blocks)
678 ipage = f2fs_get_node_page(sbi, inode->i_ino);
680 err = PTR_ERR(ipage);
684 if (f2fs_has_inline_data(inode)) {
685 f2fs_truncate_inline_inode(inode, ipage, from);
686 f2fs_put_page(ipage, 1);
687 truncate_page = true;
691 set_new_dnode(&dn, inode, ipage, NULL, 0);
692 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
699 count = ADDRS_PER_PAGE(dn.node_page, inode);
701 count -= dn.ofs_in_node;
702 f2fs_bug_on(sbi, count < 0);
704 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 f2fs_truncate_data_blocks_range(&dn, count);
711 err = f2fs_truncate_inode_blocks(inode, free_from);
716 /* lastly zero out the first data page */
718 err = truncate_partial_data_page(inode, from, truncate_page);
720 trace_f2fs_truncate_blocks_exit(inode, err);
724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
726 u64 free_from = from;
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
731 * for compressed file, only support cluster size
732 * aligned truncation.
734 if (f2fs_compressed_file(inode))
735 free_from = round_up(from,
736 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
739 err = f2fs_do_truncate_blocks(inode, free_from, lock);
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 if (from != free_from) {
745 err = f2fs_truncate_partial_cluster(inode, from, lock);
754 int f2fs_truncate(struct inode *inode)
758 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
761 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 S_ISLNK(inode->i_mode)))
765 trace_f2fs_truncate(inode);
767 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
772 err = dquot_initialize(inode);
776 /* we should check inline_data size */
777 if (!f2fs_may_inline_data(inode)) {
778 err = f2fs_convert_inline_inode(inode);
783 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
787 inode->i_mtime = inode->i_ctime = current_time(inode);
788 f2fs_mark_inode_dirty_sync(inode, false);
792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793 u32 request_mask, unsigned int query_flags)
795 struct inode *inode = d_inode(path->dentry);
796 struct f2fs_inode_info *fi = F2FS_I(inode);
797 struct f2fs_inode *ri;
800 if (f2fs_has_extra_attr(inode) &&
801 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 stat->result_mask |= STATX_BTIME;
804 stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
809 if (flags & F2FS_COMPR_FL)
810 stat->attributes |= STATX_ATTR_COMPRESSED;
811 if (flags & F2FS_APPEND_FL)
812 stat->attributes |= STATX_ATTR_APPEND;
813 if (IS_ENCRYPTED(inode))
814 stat->attributes |= STATX_ATTR_ENCRYPTED;
815 if (flags & F2FS_IMMUTABLE_FL)
816 stat->attributes |= STATX_ATTR_IMMUTABLE;
817 if (flags & F2FS_NODUMP_FL)
818 stat->attributes |= STATX_ATTR_NODUMP;
819 if (IS_VERITY(inode))
820 stat->attributes |= STATX_ATTR_VERITY;
822 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
824 STATX_ATTR_ENCRYPTED |
825 STATX_ATTR_IMMUTABLE |
829 generic_fillattr(inode, stat);
831 /* we need to show initial sectors used for inline_data/dentries */
832 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 f2fs_has_inline_dentry(inode))
834 stat->blocks += (stat->size + 511) >> 9;
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
842 unsigned int ia_valid = attr->ia_valid;
844 if (ia_valid & ATTR_UID)
845 inode->i_uid = attr->ia_uid;
846 if (ia_valid & ATTR_GID)
847 inode->i_gid = attr->ia_gid;
848 if (ia_valid & ATTR_ATIME)
849 inode->i_atime = attr->ia_atime;
850 if (ia_valid & ATTR_MTIME)
851 inode->i_mtime = attr->ia_mtime;
852 if (ia_valid & ATTR_CTIME)
853 inode->i_ctime = attr->ia_ctime;
854 if (ia_valid & ATTR_MODE) {
855 umode_t mode = attr->ia_mode;
857 if (!in_group_p(inode->i_gid) &&
858 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
860 set_acl_inode(inode, mode);
864 #define __setattr_copy setattr_copy
867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
869 struct inode *inode = d_inode(dentry);
872 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
875 if (unlikely(IS_IMMUTABLE(inode)))
878 if (unlikely(IS_APPEND(inode) &&
879 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
880 ATTR_GID | ATTR_TIMES_SET))))
883 if ((attr->ia_valid & ATTR_SIZE) &&
884 !f2fs_is_compress_backend_ready(inode))
887 err = setattr_prepare(dentry, attr);
891 err = fscrypt_prepare_setattr(dentry, attr);
895 err = fsverity_prepare_setattr(dentry, attr);
899 if (is_quota_modification(inode, attr)) {
900 err = dquot_initialize(inode);
904 if ((attr->ia_valid & ATTR_UID &&
905 !uid_eq(attr->ia_uid, inode->i_uid)) ||
906 (attr->ia_valid & ATTR_GID &&
907 !gid_eq(attr->ia_gid, inode->i_gid))) {
908 f2fs_lock_op(F2FS_I_SB(inode));
909 err = dquot_transfer(inode, attr);
911 set_sbi_flag(F2FS_I_SB(inode),
912 SBI_QUOTA_NEED_REPAIR);
913 f2fs_unlock_op(F2FS_I_SB(inode));
917 * update uid/gid under lock_op(), so that dquot and inode can
918 * be updated atomically.
920 if (attr->ia_valid & ATTR_UID)
921 inode->i_uid = attr->ia_uid;
922 if (attr->ia_valid & ATTR_GID)
923 inode->i_gid = attr->ia_gid;
924 f2fs_mark_inode_dirty_sync(inode, true);
925 f2fs_unlock_op(F2FS_I_SB(inode));
928 if (attr->ia_valid & ATTR_SIZE) {
929 loff_t old_size = i_size_read(inode);
931 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
933 * should convert inline inode before i_size_write to
934 * keep smaller than inline_data size with inline flag.
936 err = f2fs_convert_inline_inode(inode);
941 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942 down_write(&F2FS_I(inode)->i_mmap_sem);
944 truncate_setsize(inode, attr->ia_size);
946 if (attr->ia_size <= old_size)
947 err = f2fs_truncate(inode);
949 * do not trim all blocks after i_size if target size is
950 * larger than i_size.
952 up_write(&F2FS_I(inode)->i_mmap_sem);
953 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
957 spin_lock(&F2FS_I(inode)->i_size_lock);
958 inode->i_mtime = inode->i_ctime = current_time(inode);
959 F2FS_I(inode)->last_disk_size = i_size_read(inode);
960 spin_unlock(&F2FS_I(inode)->i_size_lock);
963 __setattr_copy(inode, attr);
965 if (attr->ia_valid & ATTR_MODE) {
966 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
967 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
968 inode->i_mode = F2FS_I(inode)->i_acl_mode;
969 clear_inode_flag(inode, FI_ACL_MODE);
973 /* file size may changed here */
974 f2fs_mark_inode_dirty_sync(inode, true);
976 /* inode change will produce dirty node pages flushed by checkpoint */
977 f2fs_balance_fs(F2FS_I_SB(inode), true);
982 const struct inode_operations f2fs_file_inode_operations = {
983 .getattr = f2fs_getattr,
984 .setattr = f2fs_setattr,
985 .get_acl = f2fs_get_acl,
986 .set_acl = f2fs_set_acl,
987 .listxattr = f2fs_listxattr,
988 .fiemap = f2fs_fiemap,
991 static int fill_zero(struct inode *inode, pgoff_t index,
992 loff_t start, loff_t len)
994 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1000 f2fs_balance_fs(sbi, true);
1003 page = f2fs_get_new_data_page(inode, NULL, index, false);
1004 f2fs_unlock_op(sbi);
1007 return PTR_ERR(page);
1009 f2fs_wait_on_page_writeback(page, DATA, true, true);
1010 zero_user(page, start, len);
1011 set_page_dirty(page);
1012 f2fs_put_page(page, 1);
1016 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1020 while (pg_start < pg_end) {
1021 struct dnode_of_data dn;
1022 pgoff_t end_offset, count;
1024 set_new_dnode(&dn, inode, NULL, NULL, 0);
1025 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1027 if (err == -ENOENT) {
1028 pg_start = f2fs_get_next_page_offset(&dn,
1035 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1036 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1038 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1040 f2fs_truncate_data_blocks_range(&dn, count);
1041 f2fs_put_dnode(&dn);
1048 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1050 pgoff_t pg_start, pg_end;
1051 loff_t off_start, off_end;
1054 ret = f2fs_convert_inline_inode(inode);
1058 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1059 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1061 off_start = offset & (PAGE_SIZE - 1);
1062 off_end = (offset + len) & (PAGE_SIZE - 1);
1064 if (pg_start == pg_end) {
1065 ret = fill_zero(inode, pg_start, off_start,
1066 off_end - off_start);
1071 ret = fill_zero(inode, pg_start++, off_start,
1072 PAGE_SIZE - off_start);
1077 ret = fill_zero(inode, pg_end, 0, off_end);
1082 if (pg_start < pg_end) {
1083 loff_t blk_start, blk_end;
1084 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1086 f2fs_balance_fs(sbi, true);
1088 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1089 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1091 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1092 down_write(&F2FS_I(inode)->i_mmap_sem);
1094 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1097 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1098 f2fs_unlock_op(sbi);
1100 up_write(&F2FS_I(inode)->i_mmap_sem);
1101 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1108 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1109 int *do_replace, pgoff_t off, pgoff_t len)
1111 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1112 struct dnode_of_data dn;
1116 set_new_dnode(&dn, inode, NULL, NULL, 0);
1117 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1118 if (ret && ret != -ENOENT) {
1120 } else if (ret == -ENOENT) {
1121 if (dn.max_level == 0)
1123 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1124 dn.ofs_in_node, len);
1130 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1131 dn.ofs_in_node, len);
1132 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1133 *blkaddr = f2fs_data_blkaddr(&dn);
1135 if (__is_valid_data_blkaddr(*blkaddr) &&
1136 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1137 DATA_GENERIC_ENHANCE)) {
1138 f2fs_put_dnode(&dn);
1139 return -EFSCORRUPTED;
1142 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1144 if (f2fs_lfs_mode(sbi)) {
1145 f2fs_put_dnode(&dn);
1149 /* do not invalidate this block address */
1150 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1154 f2fs_put_dnode(&dn);
1163 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1164 int *do_replace, pgoff_t off, int len)
1166 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1167 struct dnode_of_data dn;
1170 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1171 if (*do_replace == 0)
1174 set_new_dnode(&dn, inode, NULL, NULL, 0);
1175 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1177 dec_valid_block_count(sbi, inode, 1);
1178 f2fs_invalidate_blocks(sbi, *blkaddr);
1180 f2fs_update_data_blkaddr(&dn, *blkaddr);
1182 f2fs_put_dnode(&dn);
1187 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1188 block_t *blkaddr, int *do_replace,
1189 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1191 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1196 if (blkaddr[i] == NULL_ADDR && !full) {
1201 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1202 struct dnode_of_data dn;
1203 struct node_info ni;
1207 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1208 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1212 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1214 f2fs_put_dnode(&dn);
1218 ilen = min((pgoff_t)
1219 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1220 dn.ofs_in_node, len - i);
1222 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1223 f2fs_truncate_data_blocks_range(&dn, 1);
1225 if (do_replace[i]) {
1226 f2fs_i_blocks_write(src_inode,
1228 f2fs_i_blocks_write(dst_inode,
1230 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1231 blkaddr[i], ni.version, true, false);
1237 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1238 if (dst_inode->i_size < new_size)
1239 f2fs_i_size_write(dst_inode, new_size);
1240 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1242 f2fs_put_dnode(&dn);
1244 struct page *psrc, *pdst;
1246 psrc = f2fs_get_lock_data_page(src_inode,
1249 return PTR_ERR(psrc);
1250 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1253 f2fs_put_page(psrc, 1);
1254 return PTR_ERR(pdst);
1256 f2fs_copy_page(psrc, pdst);
1257 set_page_dirty(pdst);
1258 f2fs_put_page(pdst, 1);
1259 f2fs_put_page(psrc, 1);
1261 ret = f2fs_truncate_hole(src_inode,
1262 src + i, src + i + 1);
1271 static int __exchange_data_block(struct inode *src_inode,
1272 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1273 pgoff_t len, bool full)
1275 block_t *src_blkaddr;
1281 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1283 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1284 array_size(olen, sizeof(block_t)),
1289 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1290 array_size(olen, sizeof(int)),
1293 kvfree(src_blkaddr);
1297 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1298 do_replace, src, olen);
1302 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1303 do_replace, src, dst, olen, full);
1311 kvfree(src_blkaddr);
1317 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1318 kvfree(src_blkaddr);
1323 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1326 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1327 pgoff_t start = offset >> PAGE_SHIFT;
1328 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1331 f2fs_balance_fs(sbi, true);
1333 /* avoid gc operation during block exchange */
1334 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1335 down_write(&F2FS_I(inode)->i_mmap_sem);
1338 f2fs_drop_extent_tree(inode);
1339 truncate_pagecache(inode, offset);
1340 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1341 f2fs_unlock_op(sbi);
1343 up_write(&F2FS_I(inode)->i_mmap_sem);
1344 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1348 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1353 if (offset + len >= i_size_read(inode))
1356 /* collapse range should be aligned to block size of f2fs. */
1357 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1360 ret = f2fs_convert_inline_inode(inode);
1364 /* write out all dirty pages from offset */
1365 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1369 ret = f2fs_do_collapse(inode, offset, len);
1373 /* write out all moved pages, if possible */
1374 down_write(&F2FS_I(inode)->i_mmap_sem);
1375 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1376 truncate_pagecache(inode, offset);
1378 new_size = i_size_read(inode) - len;
1379 ret = f2fs_truncate_blocks(inode, new_size, true);
1380 up_write(&F2FS_I(inode)->i_mmap_sem);
1382 f2fs_i_size_write(inode, new_size);
1386 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1389 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1390 pgoff_t index = start;
1391 unsigned int ofs_in_node = dn->ofs_in_node;
1395 for (; index < end; index++, dn->ofs_in_node++) {
1396 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1400 dn->ofs_in_node = ofs_in_node;
1401 ret = f2fs_reserve_new_blocks(dn, count);
1405 dn->ofs_in_node = ofs_in_node;
1406 for (index = start; index < end; index++, dn->ofs_in_node++) {
1407 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1409 * f2fs_reserve_new_blocks will not guarantee entire block
1412 if (dn->data_blkaddr == NULL_ADDR) {
1417 if (dn->data_blkaddr == NEW_ADDR)
1420 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1421 DATA_GENERIC_ENHANCE)) {
1422 ret = -EFSCORRUPTED;
1426 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1427 dn->data_blkaddr = NEW_ADDR;
1428 f2fs_set_data_blkaddr(dn);
1431 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1436 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1439 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1440 struct address_space *mapping = inode->i_mapping;
1441 pgoff_t index, pg_start, pg_end;
1442 loff_t new_size = i_size_read(inode);
1443 loff_t off_start, off_end;
1446 ret = inode_newsize_ok(inode, (len + offset));
1450 ret = f2fs_convert_inline_inode(inode);
1454 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1458 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1459 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1461 off_start = offset & (PAGE_SIZE - 1);
1462 off_end = (offset + len) & (PAGE_SIZE - 1);
1464 if (pg_start == pg_end) {
1465 ret = fill_zero(inode, pg_start, off_start,
1466 off_end - off_start);
1470 new_size = max_t(loff_t, new_size, offset + len);
1473 ret = fill_zero(inode, pg_start++, off_start,
1474 PAGE_SIZE - off_start);
1478 new_size = max_t(loff_t, new_size,
1479 (loff_t)pg_start << PAGE_SHIFT);
1482 for (index = pg_start; index < pg_end;) {
1483 struct dnode_of_data dn;
1484 unsigned int end_offset;
1487 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1488 down_write(&F2FS_I(inode)->i_mmap_sem);
1490 truncate_pagecache_range(inode,
1491 (loff_t)index << PAGE_SHIFT,
1492 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1496 set_new_dnode(&dn, inode, NULL, NULL, 0);
1497 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1499 f2fs_unlock_op(sbi);
1500 up_write(&F2FS_I(inode)->i_mmap_sem);
1501 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1505 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1506 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1508 ret = f2fs_do_zero_range(&dn, index, end);
1509 f2fs_put_dnode(&dn);
1511 f2fs_unlock_op(sbi);
1512 up_write(&F2FS_I(inode)->i_mmap_sem);
1513 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1515 f2fs_balance_fs(sbi, dn.node_changed);
1521 new_size = max_t(loff_t, new_size,
1522 (loff_t)index << PAGE_SHIFT);
1526 ret = fill_zero(inode, pg_end, 0, off_end);
1530 new_size = max_t(loff_t, new_size, offset + len);
1535 if (new_size > i_size_read(inode)) {
1536 if (mode & FALLOC_FL_KEEP_SIZE)
1537 file_set_keep_isize(inode);
1539 f2fs_i_size_write(inode, new_size);
1544 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1546 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1547 pgoff_t nr, pg_start, pg_end, delta, idx;
1551 new_size = i_size_read(inode) + len;
1552 ret = inode_newsize_ok(inode, new_size);
1556 if (offset >= i_size_read(inode))
1559 /* insert range should be aligned to block size of f2fs. */
1560 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1563 ret = f2fs_convert_inline_inode(inode);
1567 f2fs_balance_fs(sbi, true);
1569 down_write(&F2FS_I(inode)->i_mmap_sem);
1570 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1571 up_write(&F2FS_I(inode)->i_mmap_sem);
1575 /* write out all dirty pages from offset */
1576 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1580 pg_start = offset >> PAGE_SHIFT;
1581 pg_end = (offset + len) >> PAGE_SHIFT;
1582 delta = pg_end - pg_start;
1583 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1585 /* avoid gc operation during block exchange */
1586 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1587 down_write(&F2FS_I(inode)->i_mmap_sem);
1588 truncate_pagecache(inode, offset);
1590 while (!ret && idx > pg_start) {
1591 nr = idx - pg_start;
1597 f2fs_drop_extent_tree(inode);
1599 ret = __exchange_data_block(inode, inode, idx,
1600 idx + delta, nr, false);
1601 f2fs_unlock_op(sbi);
1603 up_write(&F2FS_I(inode)->i_mmap_sem);
1604 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1606 /* write out all moved pages, if possible */
1607 down_write(&F2FS_I(inode)->i_mmap_sem);
1608 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1609 truncate_pagecache(inode, offset);
1610 up_write(&F2FS_I(inode)->i_mmap_sem);
1613 f2fs_i_size_write(inode, new_size);
1617 static int expand_inode_data(struct inode *inode, loff_t offset,
1618 loff_t len, int mode)
1620 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1621 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1622 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1623 .m_may_create = true };
1624 pgoff_t pg_start, pg_end;
1625 loff_t new_size = i_size_read(inode);
1627 block_t expanded = 0;
1630 err = inode_newsize_ok(inode, (len + offset));
1634 err = f2fs_convert_inline_inode(inode);
1638 f2fs_balance_fs(sbi, true);
1640 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1641 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1642 off_end = (offset + len) & (PAGE_SIZE - 1);
1644 map.m_lblk = pg_start;
1645 map.m_len = pg_end - pg_start;
1652 if (f2fs_is_pinned_file(inode)) {
1653 block_t sec_blks = BLKS_PER_SEC(sbi);
1654 block_t sec_len = roundup(map.m_len, sec_blks);
1656 map.m_len = sec_blks;
1658 if (has_not_enough_free_secs(sbi, 0,
1659 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1660 down_write(&sbi->gc_lock);
1661 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1662 if (err && err != -ENODATA && err != -EAGAIN)
1666 down_write(&sbi->pin_sem);
1669 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1670 f2fs_unlock_op(sbi);
1672 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1673 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1675 up_write(&sbi->pin_sem);
1677 expanded += map.m_len;
1678 sec_len -= map.m_len;
1679 map.m_lblk += map.m_len;
1680 if (!err && sec_len)
1683 map.m_len = expanded;
1685 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1686 expanded = map.m_len;
1695 last_off = pg_start + expanded - 1;
1697 /* update new size to the failed position */
1698 new_size = (last_off == pg_end) ? offset + len :
1699 (loff_t)(last_off + 1) << PAGE_SHIFT;
1701 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1704 if (new_size > i_size_read(inode)) {
1705 if (mode & FALLOC_FL_KEEP_SIZE)
1706 file_set_keep_isize(inode);
1708 f2fs_i_size_write(inode, new_size);
1714 static long f2fs_fallocate(struct file *file, int mode,
1715 loff_t offset, loff_t len)
1717 struct inode *inode = file_inode(file);
1720 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1722 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1724 if (!f2fs_is_compress_backend_ready(inode))
1727 /* f2fs only support ->fallocate for regular file */
1728 if (!S_ISREG(inode->i_mode))
1731 if (IS_ENCRYPTED(inode) &&
1732 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1735 if (f2fs_compressed_file(inode) &&
1736 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1737 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1740 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1741 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1742 FALLOC_FL_INSERT_RANGE))
1747 ret = file_modified(file);
1751 if (mode & FALLOC_FL_PUNCH_HOLE) {
1752 if (offset >= inode->i_size)
1755 ret = punch_hole(inode, offset, len);
1756 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1757 ret = f2fs_collapse_range(inode, offset, len);
1758 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1759 ret = f2fs_zero_range(inode, offset, len, mode);
1760 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1761 ret = f2fs_insert_range(inode, offset, len);
1763 ret = expand_inode_data(inode, offset, len, mode);
1767 inode->i_mtime = inode->i_ctime = current_time(inode);
1768 f2fs_mark_inode_dirty_sync(inode, false);
1769 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1773 inode_unlock(inode);
1775 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1779 static int f2fs_release_file(struct inode *inode, struct file *filp)
1782 * f2fs_relase_file is called at every close calls. So we should
1783 * not drop any inmemory pages by close called by other process.
1785 if (!(filp->f_mode & FMODE_WRITE) ||
1786 atomic_read(&inode->i_writecount) != 1)
1789 /* some remained atomic pages should discarded */
1790 if (f2fs_is_atomic_file(inode))
1791 f2fs_drop_inmem_pages(inode);
1792 if (f2fs_is_volatile_file(inode)) {
1793 set_inode_flag(inode, FI_DROP_CACHE);
1794 filemap_fdatawrite(inode->i_mapping);
1795 clear_inode_flag(inode, FI_DROP_CACHE);
1796 clear_inode_flag(inode, FI_VOLATILE_FILE);
1797 stat_dec_volatile_write(inode);
1802 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1804 struct inode *inode = file_inode(file);
1807 * If the process doing a transaction is crashed, we should do
1808 * roll-back. Otherwise, other reader/write can see corrupted database
1809 * until all the writers close its file. Since this should be done
1810 * before dropping file lock, it needs to do in ->flush.
1812 if (f2fs_is_atomic_file(inode) &&
1813 F2FS_I(inode)->inmem_task == current)
1814 f2fs_drop_inmem_pages(inode);
1818 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1820 struct f2fs_inode_info *fi = F2FS_I(inode);
1821 u32 masked_flags = fi->i_flags & mask;
1823 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1825 /* Is it quota file? Do not allow user to mess with it */
1826 if (IS_NOQUOTA(inode))
1829 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1830 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1832 if (!f2fs_empty_dir(inode))
1836 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1837 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1839 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1843 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1844 if (masked_flags & F2FS_COMPR_FL) {
1845 if (!f2fs_disable_compressed_file(inode))
1848 if (!f2fs_may_compress(inode))
1850 if (S_ISREG(inode->i_mode) && inode->i_size)
1853 set_compress_context(inode);
1857 fi->i_flags = iflags | (fi->i_flags & ~mask);
1858 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1859 (fi->i_flags & F2FS_NOCOMP_FL));
1861 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1862 set_inode_flag(inode, FI_PROJ_INHERIT);
1864 clear_inode_flag(inode, FI_PROJ_INHERIT);
1866 inode->i_ctime = current_time(inode);
1867 f2fs_set_inode_flags(inode);
1868 f2fs_mark_inode_dirty_sync(inode, true);
1872 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1875 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1876 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1877 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1878 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1881 static const struct {
1884 } f2fs_fsflags_map[] = {
1885 { F2FS_COMPR_FL, FS_COMPR_FL },
1886 { F2FS_SYNC_FL, FS_SYNC_FL },
1887 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1888 { F2FS_APPEND_FL, FS_APPEND_FL },
1889 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1890 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1891 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1892 { F2FS_INDEX_FL, FS_INDEX_FL },
1893 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1894 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1895 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1898 #define F2FS_GETTABLE_FS_FL ( \
1908 FS_PROJINHERIT_FL | \
1910 FS_INLINE_DATA_FL | \
1915 #define F2FS_SETTABLE_FS_FL ( \
1924 FS_PROJINHERIT_FL | \
1927 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1928 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1933 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1934 if (iflags & f2fs_fsflags_map[i].iflag)
1935 fsflags |= f2fs_fsflags_map[i].fsflag;
1940 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1941 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1946 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1947 if (fsflags & f2fs_fsflags_map[i].fsflag)
1948 iflags |= f2fs_fsflags_map[i].iflag;
1953 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1955 struct inode *inode = file_inode(filp);
1956 struct f2fs_inode_info *fi = F2FS_I(inode);
1957 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1959 if (IS_ENCRYPTED(inode))
1960 fsflags |= FS_ENCRYPT_FL;
1961 if (IS_VERITY(inode))
1962 fsflags |= FS_VERITY_FL;
1963 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1964 fsflags |= FS_INLINE_DATA_FL;
1965 if (is_inode_flag_set(inode, FI_PIN_FILE))
1966 fsflags |= FS_NOCOW_FL;
1968 fsflags &= F2FS_GETTABLE_FS_FL;
1970 return put_user(fsflags, (int __user *)arg);
1973 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1975 struct inode *inode = file_inode(filp);
1976 struct f2fs_inode_info *fi = F2FS_I(inode);
1977 u32 fsflags, old_fsflags;
1981 if (!inode_owner_or_capable(inode))
1984 if (get_user(fsflags, (int __user *)arg))
1987 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1989 fsflags &= F2FS_SETTABLE_FS_FL;
1991 iflags = f2fs_fsflags_to_iflags(fsflags);
1992 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1995 ret = mnt_want_write_file(filp);
2001 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2002 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2006 ret = f2fs_setflags_common(inode, iflags,
2007 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2009 inode_unlock(inode);
2010 mnt_drop_write_file(filp);
2014 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2016 struct inode *inode = file_inode(filp);
2018 return put_user(inode->i_generation, (int __user *)arg);
2021 static int f2fs_ioc_start_atomic_write(struct file *filp)
2023 struct inode *inode = file_inode(filp);
2024 struct f2fs_inode_info *fi = F2FS_I(inode);
2025 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2028 if (!inode_owner_or_capable(inode))
2031 if (!S_ISREG(inode->i_mode))
2034 if (filp->f_flags & O_DIRECT)
2037 ret = mnt_want_write_file(filp);
2043 if (!f2fs_disable_compressed_file(inode)) {
2048 if (f2fs_is_atomic_file(inode)) {
2049 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2054 ret = f2fs_convert_inline_inode(inode);
2058 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2061 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2062 * f2fs_is_atomic_file.
2064 if (get_dirty_pages(inode))
2065 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2066 inode->i_ino, get_dirty_pages(inode));
2067 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2069 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2073 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2074 if (list_empty(&fi->inmem_ilist))
2075 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2076 sbi->atomic_files++;
2077 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2079 /* add inode in inmem_list first and set atomic_file */
2080 set_inode_flag(inode, FI_ATOMIC_FILE);
2081 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2082 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2084 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2085 F2FS_I(inode)->inmem_task = current;
2086 stat_update_max_atomic_write(inode);
2088 inode_unlock(inode);
2089 mnt_drop_write_file(filp);
2093 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2095 struct inode *inode = file_inode(filp);
2098 if (!inode_owner_or_capable(inode))
2101 ret = mnt_want_write_file(filp);
2105 f2fs_balance_fs(F2FS_I_SB(inode), true);
2109 if (f2fs_is_volatile_file(inode)) {
2114 if (f2fs_is_atomic_file(inode)) {
2115 ret = f2fs_commit_inmem_pages(inode);
2119 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2121 f2fs_drop_inmem_pages(inode);
2123 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2126 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2127 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2130 inode_unlock(inode);
2131 mnt_drop_write_file(filp);
2135 static int f2fs_ioc_start_volatile_write(struct file *filp)
2137 struct inode *inode = file_inode(filp);
2140 if (!inode_owner_or_capable(inode))
2143 if (!S_ISREG(inode->i_mode))
2146 ret = mnt_want_write_file(filp);
2152 if (f2fs_is_volatile_file(inode))
2155 ret = f2fs_convert_inline_inode(inode);
2159 stat_inc_volatile_write(inode);
2160 stat_update_max_volatile_write(inode);
2162 set_inode_flag(inode, FI_VOLATILE_FILE);
2163 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2165 inode_unlock(inode);
2166 mnt_drop_write_file(filp);
2170 static int f2fs_ioc_release_volatile_write(struct file *filp)
2172 struct inode *inode = file_inode(filp);
2175 if (!inode_owner_or_capable(inode))
2178 ret = mnt_want_write_file(filp);
2184 if (!f2fs_is_volatile_file(inode))
2187 if (!f2fs_is_first_block_written(inode)) {
2188 ret = truncate_partial_data_page(inode, 0, true);
2192 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2194 inode_unlock(inode);
2195 mnt_drop_write_file(filp);
2199 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2201 struct inode *inode = file_inode(filp);
2204 if (!inode_owner_or_capable(inode))
2207 ret = mnt_want_write_file(filp);
2213 if (f2fs_is_atomic_file(inode))
2214 f2fs_drop_inmem_pages(inode);
2215 if (f2fs_is_volatile_file(inode)) {
2216 clear_inode_flag(inode, FI_VOLATILE_FILE);
2217 stat_dec_volatile_write(inode);
2218 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2221 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2223 inode_unlock(inode);
2225 mnt_drop_write_file(filp);
2226 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2230 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2232 struct inode *inode = file_inode(filp);
2233 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2234 struct super_block *sb = sbi->sb;
2238 if (!capable(CAP_SYS_ADMIN))
2241 if (get_user(in, (__u32 __user *)arg))
2244 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2245 ret = mnt_want_write_file(filp);
2247 if (ret == -EROFS) {
2249 f2fs_stop_checkpoint(sbi, false);
2250 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2251 trace_f2fs_shutdown(sbi, in, ret);
2258 case F2FS_GOING_DOWN_FULLSYNC:
2259 sb = freeze_bdev(sb->s_bdev);
2265 f2fs_stop_checkpoint(sbi, false);
2266 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2267 thaw_bdev(sb->s_bdev, sb);
2270 case F2FS_GOING_DOWN_METASYNC:
2271 /* do checkpoint only */
2272 ret = f2fs_sync_fs(sb, 1);
2275 f2fs_stop_checkpoint(sbi, false);
2276 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2278 case F2FS_GOING_DOWN_NOSYNC:
2279 f2fs_stop_checkpoint(sbi, false);
2280 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2282 case F2FS_GOING_DOWN_METAFLUSH:
2283 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2284 f2fs_stop_checkpoint(sbi, false);
2285 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2287 case F2FS_GOING_DOWN_NEED_FSCK:
2288 set_sbi_flag(sbi, SBI_NEED_FSCK);
2289 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2290 set_sbi_flag(sbi, SBI_IS_DIRTY);
2291 /* do checkpoint only */
2292 ret = f2fs_sync_fs(sb, 1);
2299 f2fs_stop_gc_thread(sbi);
2300 f2fs_stop_discard_thread(sbi);
2302 f2fs_drop_discard_cmd(sbi);
2303 clear_opt(sbi, DISCARD);
2305 f2fs_update_time(sbi, REQ_TIME);
2307 if (in != F2FS_GOING_DOWN_FULLSYNC)
2308 mnt_drop_write_file(filp);
2310 trace_f2fs_shutdown(sbi, in, ret);
2315 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2317 struct inode *inode = file_inode(filp);
2318 struct super_block *sb = inode->i_sb;
2319 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2320 struct fstrim_range range;
2323 if (!capable(CAP_SYS_ADMIN))
2326 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2329 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2333 ret = mnt_want_write_file(filp);
2337 range.minlen = max((unsigned int)range.minlen,
2338 q->limits.discard_granularity);
2339 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2340 mnt_drop_write_file(filp);
2344 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2347 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2351 static bool uuid_is_nonzero(__u8 u[16])
2355 for (i = 0; i < 16; i++)
2361 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2363 struct inode *inode = file_inode(filp);
2365 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2368 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2370 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2373 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2375 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2377 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2380 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2382 struct inode *inode = file_inode(filp);
2383 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2386 if (!f2fs_sb_has_encrypt(sbi))
2389 err = mnt_want_write_file(filp);
2393 down_write(&sbi->sb_lock);
2395 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2398 /* update superblock with uuid */
2399 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2401 err = f2fs_commit_super(sbi, false);
2404 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2408 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2412 up_write(&sbi->sb_lock);
2413 mnt_drop_write_file(filp);
2417 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2420 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2423 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2426 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2428 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2431 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2434 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2436 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2439 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2442 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2445 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2448 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2451 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2454 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2457 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2460 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2462 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2465 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2468 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2470 struct inode *inode = file_inode(filp);
2471 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2475 if (!capable(CAP_SYS_ADMIN))
2478 if (get_user(sync, (__u32 __user *)arg))
2481 if (f2fs_readonly(sbi->sb))
2484 ret = mnt_want_write_file(filp);
2489 if (!down_write_trylock(&sbi->gc_lock)) {
2494 down_write(&sbi->gc_lock);
2497 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2499 mnt_drop_write_file(filp);
2503 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2505 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2509 if (!capable(CAP_SYS_ADMIN))
2511 if (f2fs_readonly(sbi->sb))
2514 end = range->start + range->len;
2515 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2516 end >= MAX_BLKADDR(sbi))
2519 ret = mnt_want_write_file(filp);
2525 if (!down_write_trylock(&sbi->gc_lock)) {
2530 down_write(&sbi->gc_lock);
2533 ret = f2fs_gc(sbi, range->sync, true, false,
2534 GET_SEGNO(sbi, range->start));
2540 range->start += BLKS_PER_SEC(sbi);
2541 if (range->start <= end)
2544 mnt_drop_write_file(filp);
2548 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2550 struct f2fs_gc_range range;
2552 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2555 return __f2fs_ioc_gc_range(filp, &range);
2558 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2560 struct inode *inode = file_inode(filp);
2561 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2564 if (!capable(CAP_SYS_ADMIN))
2567 if (f2fs_readonly(sbi->sb))
2570 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2571 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2575 ret = mnt_want_write_file(filp);
2579 ret = f2fs_sync_fs(sbi->sb, 1);
2581 mnt_drop_write_file(filp);
2585 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2587 struct f2fs_defragment *range)
2589 struct inode *inode = file_inode(filp);
2590 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2591 .m_seg_type = NO_CHECK_TYPE ,
2592 .m_may_create = false };
2593 struct extent_info ei = {0, 0, 0};
2594 pgoff_t pg_start, pg_end, next_pgofs;
2595 unsigned int blk_per_seg = sbi->blocks_per_seg;
2596 unsigned int total = 0, sec_num;
2597 block_t blk_end = 0;
2598 bool fragmented = false;
2601 /* if in-place-update policy is enabled, don't waste time here */
2602 if (f2fs_should_update_inplace(inode, NULL))
2605 pg_start = range->start >> PAGE_SHIFT;
2606 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2608 f2fs_balance_fs(sbi, true);
2612 /* writeback all dirty pages in the range */
2613 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2614 range->start + range->len - 1);
2619 * lookup mapping info in extent cache, skip defragmenting if physical
2620 * block addresses are continuous.
2622 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2623 if (ei.fofs + ei.len >= pg_end)
2627 map.m_lblk = pg_start;
2628 map.m_next_pgofs = &next_pgofs;
2631 * lookup mapping info in dnode page cache, skip defragmenting if all
2632 * physical block addresses are continuous even if there are hole(s)
2633 * in logical blocks.
2635 while (map.m_lblk < pg_end) {
2636 map.m_len = pg_end - map.m_lblk;
2637 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2641 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2642 map.m_lblk = next_pgofs;
2646 if (blk_end && blk_end != map.m_pblk)
2649 /* record total count of block that we're going to move */
2652 blk_end = map.m_pblk + map.m_len;
2654 map.m_lblk += map.m_len;
2662 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2665 * make sure there are enough free section for LFS allocation, this can
2666 * avoid defragment running in SSR mode when free section are allocated
2669 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2674 map.m_lblk = pg_start;
2675 map.m_len = pg_end - pg_start;
2678 while (map.m_lblk < pg_end) {
2683 map.m_len = pg_end - map.m_lblk;
2684 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2688 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2689 map.m_lblk = next_pgofs;
2693 set_inode_flag(inode, FI_DO_DEFRAG);
2696 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2699 page = f2fs_get_lock_data_page(inode, idx, true);
2701 err = PTR_ERR(page);
2705 set_page_dirty(page);
2706 f2fs_put_page(page, 1);
2715 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2718 clear_inode_flag(inode, FI_DO_DEFRAG);
2720 err = filemap_fdatawrite(inode->i_mapping);
2725 clear_inode_flag(inode, FI_DO_DEFRAG);
2727 inode_unlock(inode);
2729 range->len = (u64)total << PAGE_SHIFT;
2733 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2735 struct inode *inode = file_inode(filp);
2736 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2737 struct f2fs_defragment range;
2740 if (!capable(CAP_SYS_ADMIN))
2743 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2746 if (f2fs_readonly(sbi->sb))
2749 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2753 /* verify alignment of offset & size */
2754 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2757 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2758 sbi->max_file_blocks))
2761 err = mnt_want_write_file(filp);
2765 err = f2fs_defragment_range(sbi, filp, &range);
2766 mnt_drop_write_file(filp);
2768 f2fs_update_time(sbi, REQ_TIME);
2772 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2779 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2780 struct file *file_out, loff_t pos_out, size_t len)
2782 struct inode *src = file_inode(file_in);
2783 struct inode *dst = file_inode(file_out);
2784 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2785 size_t olen = len, dst_max_i_size = 0;
2789 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2790 src->i_sb != dst->i_sb)
2793 if (unlikely(f2fs_readonly(src->i_sb)))
2796 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2799 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2802 if (pos_out < 0 || pos_in < 0)
2806 if (pos_in == pos_out)
2808 if (pos_out > pos_in && pos_out < pos_in + len)
2815 if (!inode_trylock(dst))
2819 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2825 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2828 olen = len = src->i_size - pos_in;
2829 if (pos_in + len == src->i_size)
2830 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2836 dst_osize = dst->i_size;
2837 if (pos_out + olen > dst->i_size)
2838 dst_max_i_size = pos_out + olen;
2840 /* verify the end result is block aligned */
2841 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2842 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2843 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2846 ret = f2fs_convert_inline_inode(src);
2850 ret = f2fs_convert_inline_inode(dst);
2854 /* write out all dirty pages from offset */
2855 ret = filemap_write_and_wait_range(src->i_mapping,
2856 pos_in, pos_in + len);
2860 ret = filemap_write_and_wait_range(dst->i_mapping,
2861 pos_out, pos_out + len);
2865 f2fs_balance_fs(sbi, true);
2867 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2870 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2875 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2876 pos_out >> F2FS_BLKSIZE_BITS,
2877 len >> F2FS_BLKSIZE_BITS, false);
2881 f2fs_i_size_write(dst, dst_max_i_size);
2882 else if (dst_osize != dst->i_size)
2883 f2fs_i_size_write(dst, dst_osize);
2885 f2fs_unlock_op(sbi);
2888 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2890 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2899 static int __f2fs_ioc_move_range(struct file *filp,
2900 struct f2fs_move_range *range)
2905 if (!(filp->f_mode & FMODE_READ) ||
2906 !(filp->f_mode & FMODE_WRITE))
2909 dst = fdget(range->dst_fd);
2913 if (!(dst.file->f_mode & FMODE_WRITE)) {
2918 err = mnt_want_write_file(filp);
2922 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2923 range->pos_out, range->len);
2925 mnt_drop_write_file(filp);
2931 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2933 struct f2fs_move_range range;
2935 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2938 return __f2fs_ioc_move_range(filp, &range);
2941 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2943 struct inode *inode = file_inode(filp);
2944 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2945 struct sit_info *sm = SIT_I(sbi);
2946 unsigned int start_segno = 0, end_segno = 0;
2947 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2948 struct f2fs_flush_device range;
2951 if (!capable(CAP_SYS_ADMIN))
2954 if (f2fs_readonly(sbi->sb))
2957 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2960 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2964 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2965 __is_large_section(sbi)) {
2966 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2967 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2971 ret = mnt_want_write_file(filp);
2975 if (range.dev_num != 0)
2976 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2977 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2979 start_segno = sm->last_victim[FLUSH_DEVICE];
2980 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2981 start_segno = dev_start_segno;
2982 end_segno = min(start_segno + range.segments, dev_end_segno);
2984 while (start_segno < end_segno) {
2985 if (!down_write_trylock(&sbi->gc_lock)) {
2989 sm->last_victim[GC_CB] = end_segno + 1;
2990 sm->last_victim[GC_GREEDY] = end_segno + 1;
2991 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2992 ret = f2fs_gc(sbi, true, true, true, start_segno);
3000 mnt_drop_write_file(filp);
3004 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3006 struct inode *inode = file_inode(filp);
3007 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3009 /* Must validate to set it with SQLite behavior in Android. */
3010 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3012 return put_user(sb_feature, (u32 __user *)arg);
3016 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3018 struct dquot *transfer_to[MAXQUOTAS] = {};
3019 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3020 struct super_block *sb = sbi->sb;
3023 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3024 if (IS_ERR(transfer_to[PRJQUOTA]))
3025 return PTR_ERR(transfer_to[PRJQUOTA]);
3027 err = __dquot_transfer(inode, transfer_to);
3029 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3030 dqput(transfer_to[PRJQUOTA]);
3034 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3036 struct inode *inode = file_inode(filp);
3037 struct f2fs_inode_info *fi = F2FS_I(inode);
3038 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3043 if (!f2fs_sb_has_project_quota(sbi)) {
3044 if (projid != F2FS_DEF_PROJID)
3050 if (!f2fs_has_extra_attr(inode))
3053 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3055 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3059 /* Is it quota file? Do not allow user to mess with it */
3060 if (IS_NOQUOTA(inode))
3063 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3065 return PTR_ERR(ipage);
3067 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3070 f2fs_put_page(ipage, 1);
3073 f2fs_put_page(ipage, 1);
3075 err = dquot_initialize(inode);
3080 err = f2fs_transfer_project_quota(inode, kprojid);
3084 F2FS_I(inode)->i_projid = kprojid;
3085 inode->i_ctime = current_time(inode);
3086 f2fs_mark_inode_dirty_sync(inode, true);
3088 f2fs_unlock_op(sbi);
3092 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3097 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3099 if (projid != F2FS_DEF_PROJID)
3105 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3108 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3109 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3110 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3113 static const struct {
3116 } f2fs_xflags_map[] = {
3117 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3118 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3119 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3120 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3121 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3122 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3125 #define F2FS_SUPPORTED_XFLAGS ( \
3127 FS_XFLAG_IMMUTABLE | \
3130 FS_XFLAG_NOATIME | \
3131 FS_XFLAG_PROJINHERIT)
3133 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3134 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3139 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3140 if (iflags & f2fs_xflags_map[i].iflag)
3141 xflags |= f2fs_xflags_map[i].xflag;
3146 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3147 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3152 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3153 if (xflags & f2fs_xflags_map[i].xflag)
3154 iflags |= f2fs_xflags_map[i].iflag;
3159 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3161 struct f2fs_inode_info *fi = F2FS_I(inode);
3163 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3165 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3166 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3169 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3171 struct inode *inode = file_inode(filp);
3174 f2fs_fill_fsxattr(inode, &fa);
3176 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3181 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3183 struct inode *inode = file_inode(filp);
3184 struct fsxattr fa, old_fa;
3188 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3191 /* Make sure caller has proper permission */
3192 if (!inode_owner_or_capable(inode))
3195 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3198 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3199 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3202 err = mnt_want_write_file(filp);
3208 f2fs_fill_fsxattr(inode, &old_fa);
3209 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3213 err = f2fs_setflags_common(inode, iflags,
3214 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3218 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3220 inode_unlock(inode);
3221 mnt_drop_write_file(filp);
3225 int f2fs_pin_file_control(struct inode *inode, bool inc)
3227 struct f2fs_inode_info *fi = F2FS_I(inode);
3228 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3230 /* Use i_gc_failures for normal file as a risk signal. */
3232 f2fs_i_gc_failures_write(inode,
3233 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3235 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3236 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3237 __func__, inode->i_ino,
3238 fi->i_gc_failures[GC_FAILURE_PIN]);
3239 clear_inode_flag(inode, FI_PIN_FILE);
3245 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3247 struct inode *inode = file_inode(filp);
3251 if (get_user(pin, (__u32 __user *)arg))
3254 if (!S_ISREG(inode->i_mode))
3257 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3260 ret = mnt_want_write_file(filp);
3266 if (f2fs_should_update_outplace(inode, NULL)) {
3272 clear_inode_flag(inode, FI_PIN_FILE);
3273 f2fs_i_gc_failures_write(inode, 0);
3277 if (f2fs_pin_file_control(inode, false)) {
3282 ret = f2fs_convert_inline_inode(inode);
3286 if (!f2fs_disable_compressed_file(inode)) {
3291 set_inode_flag(inode, FI_PIN_FILE);
3292 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3294 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3296 inode_unlock(inode);
3297 mnt_drop_write_file(filp);
3301 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3303 struct inode *inode = file_inode(filp);
3306 if (is_inode_flag_set(inode, FI_PIN_FILE))
3307 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3308 return put_user(pin, (u32 __user *)arg);
3311 int f2fs_precache_extents(struct inode *inode)
3313 struct f2fs_inode_info *fi = F2FS_I(inode);
3314 struct f2fs_map_blocks map;
3315 pgoff_t m_next_extent;
3319 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3324 map.m_next_pgofs = NULL;
3325 map.m_next_extent = &m_next_extent;
3326 map.m_seg_type = NO_CHECK_TYPE;
3327 map.m_may_create = false;
3328 end = F2FS_I_SB(inode)->max_file_blocks;
3330 while (map.m_lblk < end) {
3331 map.m_len = end - map.m_lblk;
3333 down_write(&fi->i_gc_rwsem[WRITE]);
3334 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3335 up_write(&fi->i_gc_rwsem[WRITE]);
3339 map.m_lblk = m_next_extent;
3345 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3347 return f2fs_precache_extents(file_inode(filp));
3350 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3352 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3355 if (!capable(CAP_SYS_ADMIN))
3358 if (f2fs_readonly(sbi->sb))
3361 if (copy_from_user(&block_count, (void __user *)arg,
3362 sizeof(block_count)))
3365 return f2fs_resize_fs(filp, block_count);
3368 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3370 struct inode *inode = file_inode(filp);
3372 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3374 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3375 f2fs_warn(F2FS_I_SB(inode),
3376 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3381 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3384 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3386 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3389 return fsverity_ioctl_measure(filp, (void __user *)arg);
3392 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3394 struct inode *inode = file_inode(filp);
3395 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3400 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3404 down_read(&sbi->sb_lock);
3405 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3406 ARRAY_SIZE(sbi->raw_super->volume_name),
3407 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3408 up_read(&sbi->sb_lock);
3410 if (copy_to_user((char __user *)arg, vbuf,
3411 min(FSLABEL_MAX, count)))
3418 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3420 struct inode *inode = file_inode(filp);
3421 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3425 if (!capable(CAP_SYS_ADMIN))
3428 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3430 return PTR_ERR(vbuf);
3432 err = mnt_want_write_file(filp);
3436 down_write(&sbi->sb_lock);
3438 memset(sbi->raw_super->volume_name, 0,
3439 sizeof(sbi->raw_super->volume_name));
3440 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3441 sbi->raw_super->volume_name,
3442 ARRAY_SIZE(sbi->raw_super->volume_name));
3444 err = f2fs_commit_super(sbi, false);
3446 up_write(&sbi->sb_lock);
3448 mnt_drop_write_file(filp);
3454 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3456 struct inode *inode = file_inode(filp);
3459 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3462 if (!f2fs_compressed_file(inode))
3465 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3466 return put_user(blocks, (u64 __user *)arg);
3469 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3471 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3472 unsigned int released_blocks = 0;
3473 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3477 for (i = 0; i < count; i++) {
3478 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3479 dn->ofs_in_node + i);
3481 if (!__is_valid_data_blkaddr(blkaddr))
3483 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3484 DATA_GENERIC_ENHANCE)))
3485 return -EFSCORRUPTED;
3489 int compr_blocks = 0;
3491 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3492 blkaddr = f2fs_data_blkaddr(dn);
3495 if (blkaddr == COMPRESS_ADDR)
3497 dn->ofs_in_node += cluster_size;
3501 if (__is_valid_data_blkaddr(blkaddr))
3504 if (blkaddr != NEW_ADDR)
3507 dn->data_blkaddr = NULL_ADDR;
3508 f2fs_set_data_blkaddr(dn);
3511 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3512 dec_valid_block_count(sbi, dn->inode,
3513 cluster_size - compr_blocks);
3515 released_blocks += cluster_size - compr_blocks;
3517 count -= cluster_size;
3520 return released_blocks;
3523 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3525 struct inode *inode = file_inode(filp);
3526 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3527 pgoff_t page_idx = 0, last_idx;
3528 unsigned int released_blocks = 0;
3532 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3535 if (!f2fs_compressed_file(inode))
3538 if (f2fs_readonly(sbi->sb))
3541 ret = mnt_want_write_file(filp);
3545 f2fs_balance_fs(F2FS_I_SB(inode), true);
3549 writecount = atomic_read(&inode->i_writecount);
3550 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3551 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3556 if (IS_IMMUTABLE(inode)) {
3561 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3565 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3566 f2fs_set_inode_flags(inode);
3567 inode->i_ctime = current_time(inode);
3568 f2fs_mark_inode_dirty_sync(inode, true);
3570 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3573 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3574 down_write(&F2FS_I(inode)->i_mmap_sem);
3576 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3578 while (page_idx < last_idx) {
3579 struct dnode_of_data dn;
3580 pgoff_t end_offset, count;
3582 set_new_dnode(&dn, inode, NULL, NULL, 0);
3583 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3585 if (ret == -ENOENT) {
3586 page_idx = f2fs_get_next_page_offset(&dn,
3594 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3595 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3596 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3598 ret = release_compress_blocks(&dn, count);
3600 f2fs_put_dnode(&dn);
3606 released_blocks += ret;
3609 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3610 up_write(&F2FS_I(inode)->i_mmap_sem);
3612 inode_unlock(inode);
3614 mnt_drop_write_file(filp);
3617 ret = put_user(released_blocks, (u64 __user *)arg);
3618 } else if (released_blocks &&
3619 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3620 set_sbi_flag(sbi, SBI_NEED_FSCK);
3621 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3622 "iblocks=%llu, released=%u, compr_blocks=%u, "
3624 __func__, inode->i_ino, inode->i_blocks,
3626 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3632 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3634 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3635 unsigned int reserved_blocks = 0;
3636 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3640 for (i = 0; i < count; i++) {
3641 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3642 dn->ofs_in_node + i);
3644 if (!__is_valid_data_blkaddr(blkaddr))
3646 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3647 DATA_GENERIC_ENHANCE)))
3648 return -EFSCORRUPTED;
3652 int compr_blocks = 0;
3656 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3657 blkaddr = f2fs_data_blkaddr(dn);
3660 if (blkaddr == COMPRESS_ADDR)
3662 dn->ofs_in_node += cluster_size;
3667 * compressed cluster was not released due to it
3668 * fails in release_compress_blocks(), so NEW_ADDR
3669 * is a possible case.
3671 if (blkaddr == NEW_ADDR ||
3672 __is_valid_data_blkaddr(blkaddr)) {
3677 dn->data_blkaddr = NEW_ADDR;
3678 f2fs_set_data_blkaddr(dn);
3681 reserved = cluster_size - compr_blocks;
3683 /* for the case all blocks in cluster were reserved */
3687 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3691 if (reserved != cluster_size - compr_blocks)
3694 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3696 reserved_blocks += reserved;
3698 count -= cluster_size;
3701 return reserved_blocks;
3704 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3706 struct inode *inode = file_inode(filp);
3707 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3708 pgoff_t page_idx = 0, last_idx;
3709 unsigned int reserved_blocks = 0;
3712 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3715 if (!f2fs_compressed_file(inode))
3718 if (f2fs_readonly(sbi->sb))
3721 ret = mnt_want_write_file(filp);
3725 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3728 f2fs_balance_fs(F2FS_I_SB(inode), true);
3732 if (!IS_IMMUTABLE(inode)) {
3737 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3738 down_write(&F2FS_I(inode)->i_mmap_sem);
3740 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3742 while (page_idx < last_idx) {
3743 struct dnode_of_data dn;
3744 pgoff_t end_offset, count;
3746 set_new_dnode(&dn, inode, NULL, NULL, 0);
3747 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3749 if (ret == -ENOENT) {
3750 page_idx = f2fs_get_next_page_offset(&dn,
3758 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3759 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3760 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3762 ret = reserve_compress_blocks(&dn, count);
3764 f2fs_put_dnode(&dn);
3770 reserved_blocks += ret;
3773 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3774 up_write(&F2FS_I(inode)->i_mmap_sem);
3777 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3778 f2fs_set_inode_flags(inode);
3779 inode->i_ctime = current_time(inode);
3780 f2fs_mark_inode_dirty_sync(inode, true);
3783 inode_unlock(inode);
3785 mnt_drop_write_file(filp);
3788 ret = put_user(reserved_blocks, (u64 __user *)arg);
3789 } else if (reserved_blocks &&
3790 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3791 set_sbi_flag(sbi, SBI_NEED_FSCK);
3792 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3793 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3795 __func__, inode->i_ino, inode->i_blocks,
3797 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3803 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3804 pgoff_t off, block_t block, block_t len, u32 flags)
3806 struct request_queue *q = bdev_get_queue(bdev);
3807 sector_t sector = SECTOR_FROM_BLOCK(block);
3808 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3814 if (flags & F2FS_TRIM_FILE_DISCARD)
3815 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3816 blk_queue_secure_erase(q) ?
3817 BLKDEV_DISCARD_SECURE : 0);
3819 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3820 if (IS_ENCRYPTED(inode))
3821 ret = fscrypt_zeroout_range(inode, off, block, len);
3823 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3830 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3832 struct inode *inode = file_inode(filp);
3833 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3834 struct address_space *mapping = inode->i_mapping;
3835 struct block_device *prev_bdev = NULL;
3836 struct f2fs_sectrim_range range;
3837 pgoff_t index, pg_end, prev_index = 0;
3838 block_t prev_block = 0, len = 0;
3840 bool to_end = false;
3843 if (!(filp->f_mode & FMODE_WRITE))
3846 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3850 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3851 !S_ISREG(inode->i_mode))
3854 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3855 !f2fs_hw_support_discard(sbi)) ||
3856 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3857 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3860 file_start_write(filp);
3863 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3864 range.start >= inode->i_size) {
3872 if (inode->i_size - range.start > range.len) {
3873 end_addr = range.start + range.len;
3875 end_addr = range.len == (u64)-1 ?
3876 sbi->sb->s_maxbytes : inode->i_size;
3880 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3881 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3886 index = F2FS_BYTES_TO_BLK(range.start);
3887 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3889 ret = f2fs_convert_inline_inode(inode);
3893 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3894 down_write(&F2FS_I(inode)->i_mmap_sem);
3896 ret = filemap_write_and_wait_range(mapping, range.start,
3897 to_end ? LLONG_MAX : end_addr - 1);
3901 truncate_inode_pages_range(mapping, range.start,
3902 to_end ? -1 : end_addr - 1);
3904 while (index < pg_end) {
3905 struct dnode_of_data dn;
3906 pgoff_t end_offset, count;
3909 set_new_dnode(&dn, inode, NULL, NULL, 0);
3910 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3912 if (ret == -ENOENT) {
3913 index = f2fs_get_next_page_offset(&dn, index);
3919 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3920 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3921 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3922 struct block_device *cur_bdev;
3923 block_t blkaddr = f2fs_data_blkaddr(&dn);
3925 if (!__is_valid_data_blkaddr(blkaddr))
3928 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3929 DATA_GENERIC_ENHANCE)) {
3930 ret = -EFSCORRUPTED;
3931 f2fs_put_dnode(&dn);
3935 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3936 if (f2fs_is_multi_device(sbi)) {
3937 int di = f2fs_target_device_index(sbi, blkaddr);
3939 blkaddr -= FDEV(di).start_blk;
3943 if (prev_bdev == cur_bdev &&
3944 index == prev_index + len &&
3945 blkaddr == prev_block + len) {
3948 ret = f2fs_secure_erase(prev_bdev,
3949 inode, prev_index, prev_block,
3952 f2fs_put_dnode(&dn);
3961 prev_bdev = cur_bdev;
3963 prev_block = blkaddr;
3968 f2fs_put_dnode(&dn);
3970 if (fatal_signal_pending(current)) {
3978 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3979 prev_block, len, range.flags);
3981 up_write(&F2FS_I(inode)->i_mmap_sem);
3982 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3984 inode_unlock(inode);
3985 file_end_write(filp);
3990 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3993 case FS_IOC_GETFLAGS:
3994 return f2fs_ioc_getflags(filp, arg);
3995 case FS_IOC_SETFLAGS:
3996 return f2fs_ioc_setflags(filp, arg);
3997 case FS_IOC_GETVERSION:
3998 return f2fs_ioc_getversion(filp, arg);
3999 case F2FS_IOC_START_ATOMIC_WRITE:
4000 return f2fs_ioc_start_atomic_write(filp);
4001 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4002 return f2fs_ioc_commit_atomic_write(filp);
4003 case F2FS_IOC_START_VOLATILE_WRITE:
4004 return f2fs_ioc_start_volatile_write(filp);
4005 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4006 return f2fs_ioc_release_volatile_write(filp);
4007 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4008 return f2fs_ioc_abort_volatile_write(filp);
4009 case F2FS_IOC_SHUTDOWN:
4010 return f2fs_ioc_shutdown(filp, arg);
4012 return f2fs_ioc_fitrim(filp, arg);
4013 case FS_IOC_SET_ENCRYPTION_POLICY:
4014 return f2fs_ioc_set_encryption_policy(filp, arg);
4015 case FS_IOC_GET_ENCRYPTION_POLICY:
4016 return f2fs_ioc_get_encryption_policy(filp, arg);
4017 case FS_IOC_GET_ENCRYPTION_PWSALT:
4018 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4019 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4020 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4021 case FS_IOC_ADD_ENCRYPTION_KEY:
4022 return f2fs_ioc_add_encryption_key(filp, arg);
4023 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4024 return f2fs_ioc_remove_encryption_key(filp, arg);
4025 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4026 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4027 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4028 return f2fs_ioc_get_encryption_key_status(filp, arg);
4029 case FS_IOC_GET_ENCRYPTION_NONCE:
4030 return f2fs_ioc_get_encryption_nonce(filp, arg);
4031 case F2FS_IOC_GARBAGE_COLLECT:
4032 return f2fs_ioc_gc(filp, arg);
4033 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4034 return f2fs_ioc_gc_range(filp, arg);
4035 case F2FS_IOC_WRITE_CHECKPOINT:
4036 return f2fs_ioc_write_checkpoint(filp, arg);
4037 case F2FS_IOC_DEFRAGMENT:
4038 return f2fs_ioc_defragment(filp, arg);
4039 case F2FS_IOC_MOVE_RANGE:
4040 return f2fs_ioc_move_range(filp, arg);
4041 case F2FS_IOC_FLUSH_DEVICE:
4042 return f2fs_ioc_flush_device(filp, arg);
4043 case F2FS_IOC_GET_FEATURES:
4044 return f2fs_ioc_get_features(filp, arg);
4045 case FS_IOC_FSGETXATTR:
4046 return f2fs_ioc_fsgetxattr(filp, arg);
4047 case FS_IOC_FSSETXATTR:
4048 return f2fs_ioc_fssetxattr(filp, arg);
4049 case F2FS_IOC_GET_PIN_FILE:
4050 return f2fs_ioc_get_pin_file(filp, arg);
4051 case F2FS_IOC_SET_PIN_FILE:
4052 return f2fs_ioc_set_pin_file(filp, arg);
4053 case F2FS_IOC_PRECACHE_EXTENTS:
4054 return f2fs_ioc_precache_extents(filp, arg);
4055 case F2FS_IOC_RESIZE_FS:
4056 return f2fs_ioc_resize_fs(filp, arg);
4057 case FS_IOC_ENABLE_VERITY:
4058 return f2fs_ioc_enable_verity(filp, arg);
4059 case FS_IOC_MEASURE_VERITY:
4060 return f2fs_ioc_measure_verity(filp, arg);
4061 case FS_IOC_GETFSLABEL:
4062 return f2fs_ioc_getfslabel(filp, arg);
4063 case FS_IOC_SETFSLABEL:
4064 return f2fs_ioc_setfslabel(filp, arg);
4065 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4066 return f2fs_get_compress_blocks(filp, arg);
4067 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4068 return f2fs_release_compress_blocks(filp, arg);
4069 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4070 return f2fs_reserve_compress_blocks(filp, arg);
4071 case F2FS_IOC_SEC_TRIM_FILE:
4072 return f2fs_sec_trim_file(filp, arg);
4078 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4080 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4082 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4085 return __f2fs_ioctl(filp, cmd, arg);
4088 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4090 struct file *file = iocb->ki_filp;
4091 struct inode *inode = file_inode(file);
4094 if (!f2fs_is_compress_backend_ready(inode))
4097 ret = generic_file_read_iter(iocb, iter);
4100 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4105 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4107 struct file *file = iocb->ki_filp;
4108 struct inode *inode = file_inode(file);
4111 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4116 if (!f2fs_is_compress_backend_ready(inode)) {
4121 if (iocb->ki_flags & IOCB_NOWAIT) {
4122 if (!inode_trylock(inode)) {
4130 if (unlikely(IS_IMMUTABLE(inode))) {
4135 ret = generic_write_checks(iocb, from);
4137 bool preallocated = false;
4138 size_t target_size = 0;
4141 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4142 set_inode_flag(inode, FI_NO_PREALLOC);
4144 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4145 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4146 iov_iter_count(from)) ||
4147 f2fs_has_inline_data(inode) ||
4148 f2fs_force_buffered_io(inode, iocb, from)) {
4149 clear_inode_flag(inode, FI_NO_PREALLOC);
4150 inode_unlock(inode);
4157 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4160 if (iocb->ki_flags & IOCB_DIRECT) {
4162 * Convert inline data for Direct I/O before entering
4165 err = f2fs_convert_inline_inode(inode);
4169 * If force_buffere_io() is true, we have to allocate
4170 * blocks all the time, since f2fs_direct_IO will fall
4171 * back to buffered IO.
4173 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4174 allow_outplace_dio(inode, iocb, from))
4177 preallocated = true;
4178 target_size = iocb->ki_pos + iov_iter_count(from);
4180 err = f2fs_preallocate_blocks(iocb, from);
4183 clear_inode_flag(inode, FI_NO_PREALLOC);
4184 inode_unlock(inode);
4189 ret = __generic_file_write_iter(iocb, from);
4190 clear_inode_flag(inode, FI_NO_PREALLOC);
4192 /* if we couldn't write data, we should deallocate blocks. */
4193 if (preallocated && i_size_read(inode) < target_size)
4194 f2fs_truncate(inode);
4197 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4200 inode_unlock(inode);
4202 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4203 iov_iter_count(from), ret);
4205 ret = generic_write_sync(iocb, ret);
4209 #ifdef CONFIG_COMPAT
4210 struct compat_f2fs_gc_range {
4215 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4216 struct compat_f2fs_gc_range)
4218 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4220 struct compat_f2fs_gc_range __user *urange;
4221 struct f2fs_gc_range range;
4224 urange = compat_ptr(arg);
4225 err = get_user(range.sync, &urange->sync);
4226 err |= get_user(range.start, &urange->start);
4227 err |= get_user(range.len, &urange->len);
4231 return __f2fs_ioc_gc_range(file, &range);
4234 struct compat_f2fs_move_range {
4240 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4241 struct compat_f2fs_move_range)
4243 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4245 struct compat_f2fs_move_range __user *urange;
4246 struct f2fs_move_range range;
4249 urange = compat_ptr(arg);
4250 err = get_user(range.dst_fd, &urange->dst_fd);
4251 err |= get_user(range.pos_in, &urange->pos_in);
4252 err |= get_user(range.pos_out, &urange->pos_out);
4253 err |= get_user(range.len, &urange->len);
4257 return __f2fs_ioc_move_range(file, &range);
4260 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4262 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4264 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4268 case FS_IOC32_GETFLAGS:
4269 cmd = FS_IOC_GETFLAGS;
4271 case FS_IOC32_SETFLAGS:
4272 cmd = FS_IOC_SETFLAGS;
4274 case FS_IOC32_GETVERSION:
4275 cmd = FS_IOC_GETVERSION;
4277 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4278 return f2fs_compat_ioc_gc_range(file, arg);
4279 case F2FS_IOC32_MOVE_RANGE:
4280 return f2fs_compat_ioc_move_range(file, arg);
4281 case F2FS_IOC_START_ATOMIC_WRITE:
4282 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4283 case F2FS_IOC_START_VOLATILE_WRITE:
4284 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4285 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4286 case F2FS_IOC_SHUTDOWN:
4288 case FS_IOC_SET_ENCRYPTION_POLICY:
4289 case FS_IOC_GET_ENCRYPTION_PWSALT:
4290 case FS_IOC_GET_ENCRYPTION_POLICY:
4291 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4292 case FS_IOC_ADD_ENCRYPTION_KEY:
4293 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4294 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4295 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4296 case FS_IOC_GET_ENCRYPTION_NONCE:
4297 case F2FS_IOC_GARBAGE_COLLECT:
4298 case F2FS_IOC_WRITE_CHECKPOINT:
4299 case F2FS_IOC_DEFRAGMENT:
4300 case F2FS_IOC_FLUSH_DEVICE:
4301 case F2FS_IOC_GET_FEATURES:
4302 case FS_IOC_FSGETXATTR:
4303 case FS_IOC_FSSETXATTR:
4304 case F2FS_IOC_GET_PIN_FILE:
4305 case F2FS_IOC_SET_PIN_FILE:
4306 case F2FS_IOC_PRECACHE_EXTENTS:
4307 case F2FS_IOC_RESIZE_FS:
4308 case FS_IOC_ENABLE_VERITY:
4309 case FS_IOC_MEASURE_VERITY:
4310 case FS_IOC_GETFSLABEL:
4311 case FS_IOC_SETFSLABEL:
4312 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4313 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4314 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4315 case F2FS_IOC_SEC_TRIM_FILE:
4318 return -ENOIOCTLCMD;
4320 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4324 const struct file_operations f2fs_file_operations = {
4325 .llseek = f2fs_llseek,
4326 .read_iter = f2fs_file_read_iter,
4327 .write_iter = f2fs_file_write_iter,
4328 .open = f2fs_file_open,
4329 .release = f2fs_release_file,
4330 .mmap = f2fs_file_mmap,
4331 .flush = f2fs_file_flush,
4332 .fsync = f2fs_sync_file,
4333 .fallocate = f2fs_fallocate,
4334 .unlocked_ioctl = f2fs_ioctl,
4335 #ifdef CONFIG_COMPAT
4336 .compat_ioctl = f2fs_compat_ioctl,
4338 .splice_read = generic_file_splice_read,
4339 .splice_write = iter_file_splice_write,