1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
41 struct inode *inode = file_inode(vmf->vma->vm_file);
42 vm_flags_t flags = vmf->vma->vm_flags;
45 ret = filemap_fault(vmf);
46 if (ret & VM_FAULT_LOCKED)
47 f2fs_update_iostat(F2FS_I_SB(inode), inode,
48 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
50 trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret);
55 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
57 struct page *page = vmf->page;
58 struct inode *inode = file_inode(vmf->vma->vm_file);
59 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
60 struct dnode_of_data dn;
61 bool need_alloc = true;
65 if (unlikely(IS_IMMUTABLE(inode)))
66 return VM_FAULT_SIGBUS;
68 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
73 if (unlikely(f2fs_cp_error(sbi))) {
78 if (!f2fs_is_checkpoint_ready(sbi)) {
83 err = f2fs_convert_inline_inode(inode);
87 #ifdef CONFIG_F2FS_FS_COMPRESSION
88 if (f2fs_compressed_file(inode)) {
89 int ret = f2fs_is_compressed_cluster(inode, page->index);
99 /* should do out of any locked page */
101 f2fs_balance_fs(sbi, true);
103 sb_start_pagefault(inode->i_sb);
105 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
107 file_update_time(vmf->vma->vm_file);
108 filemap_invalidate_lock_shared(inode->i_mapping);
110 if (unlikely(page->mapping != inode->i_mapping ||
111 page_offset(page) > i_size_read(inode) ||
112 !PageUptodate(page))) {
119 /* block allocation */
120 set_new_dnode(&dn, inode, NULL, NULL, 0);
121 err = f2fs_get_block_locked(&dn, page->index);
124 #ifdef CONFIG_F2FS_FS_COMPRESSION
126 set_new_dnode(&dn, inode, NULL, NULL, 0);
127 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
136 f2fs_wait_on_page_writeback(page, DATA, false, true);
138 /* wait for GCed page writeback via META_MAPPING */
139 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
142 * check to see if the page is mapped already (no holes)
144 if (PageMappedToDisk(page))
147 /* page is wholly or partially inside EOF */
148 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
149 i_size_read(inode)) {
152 offset = i_size_read(inode) & ~PAGE_MASK;
153 zero_user_segment(page, offset, PAGE_SIZE);
155 set_page_dirty(page);
157 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
158 f2fs_update_time(sbi, REQ_TIME);
161 filemap_invalidate_unlock_shared(inode->i_mapping);
163 sb_end_pagefault(inode->i_sb);
165 ret = vmf_fs_error(err);
167 trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
171 static const struct vm_operations_struct f2fs_file_vm_ops = {
172 .fault = f2fs_filemap_fault,
173 .map_pages = filemap_map_pages,
174 .page_mkwrite = f2fs_vm_page_mkwrite,
177 static int get_parent_ino(struct inode *inode, nid_t *pino)
179 struct dentry *dentry;
182 * Make sure to get the non-deleted alias. The alias associated with
183 * the open file descriptor being fsync()'ed may be deleted already.
185 dentry = d_find_alias(inode);
189 *pino = parent_ino(dentry);
194 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
196 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197 enum cp_reason_type cp_reason = CP_NO_NEEDED;
199 if (!S_ISREG(inode->i_mode))
200 cp_reason = CP_NON_REGULAR;
201 else if (f2fs_compressed_file(inode))
202 cp_reason = CP_COMPRESSED;
203 else if (inode->i_nlink != 1)
204 cp_reason = CP_HARDLINK;
205 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
206 cp_reason = CP_SB_NEED_CP;
207 else if (file_wrong_pino(inode))
208 cp_reason = CP_WRONG_PINO;
209 else if (!f2fs_space_for_roll_forward(sbi))
210 cp_reason = CP_NO_SPC_ROLL;
211 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212 cp_reason = CP_NODE_NEED_CP;
213 else if (test_opt(sbi, FASTBOOT))
214 cp_reason = CP_FASTBOOT_MODE;
215 else if (F2FS_OPTION(sbi).active_logs == 2)
216 cp_reason = CP_SPEC_LOG_NUM;
217 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
218 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
219 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
221 cp_reason = CP_RECOVER_DIR;
226 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
228 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
230 /* But we need to avoid that there are some inode updates */
231 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
237 static void try_to_fix_pino(struct inode *inode)
239 struct f2fs_inode_info *fi = F2FS_I(inode);
242 f2fs_down_write(&fi->i_sem);
243 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
244 get_parent_ino(inode, &pino)) {
245 f2fs_i_pino_write(inode, pino);
246 file_got_pino(inode);
248 f2fs_up_write(&fi->i_sem);
251 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
252 int datasync, bool atomic)
254 struct inode *inode = file->f_mapping->host;
255 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
256 nid_t ino = inode->i_ino;
258 enum cp_reason_type cp_reason = 0;
259 struct writeback_control wbc = {
260 .sync_mode = WB_SYNC_ALL,
261 .nr_to_write = LONG_MAX,
264 unsigned int seq_id = 0;
266 if (unlikely(f2fs_readonly(inode->i_sb)))
269 trace_f2fs_sync_file_enter(inode);
271 if (S_ISDIR(inode->i_mode))
274 /* if fdatasync is triggered, let's do in-place-update */
275 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276 set_inode_flag(inode, FI_NEED_IPU);
277 ret = file_write_and_wait_range(file, start, end);
278 clear_inode_flag(inode, FI_NEED_IPU);
280 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
281 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
285 /* if the inode is dirty, let's recover all the time */
286 if (!f2fs_skip_inode_update(inode, datasync)) {
287 f2fs_write_inode(inode, NULL);
292 * if there is no written data, don't waste time to write recovery info.
294 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
297 /* it may call write_inode just prior to fsync */
298 if (need_inode_page_update(sbi, ino))
301 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
307 * for OPU case, during fsync(), node can be persisted before
308 * data when lower device doesn't support write barrier, result
309 * in data corruption after SPO.
310 * So for strict fsync mode, force to use atomic write semantics
311 * to keep write order in between data/node and last node to
312 * avoid potential data corruption.
314 if (F2FS_OPTION(sbi).fsync_mode ==
315 FSYNC_MODE_STRICT && !atomic)
320 * Both of fdatasync() and fsync() are able to be recovered from
323 f2fs_down_read(&F2FS_I(inode)->i_sem);
324 cp_reason = need_do_checkpoint(inode);
325 f2fs_up_read(&F2FS_I(inode)->i_sem);
328 /* all the dirty node pages should be flushed for POR */
329 ret = f2fs_sync_fs(inode->i_sb, 1);
332 * We've secured consistency through sync_fs. Following pino
333 * will be used only for fsynced inodes after checkpoint.
335 try_to_fix_pino(inode);
336 clear_inode_flag(inode, FI_APPEND_WRITE);
337 clear_inode_flag(inode, FI_UPDATE_WRITE);
341 atomic_inc(&sbi->wb_sync_req[NODE]);
342 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
343 atomic_dec(&sbi->wb_sync_req[NODE]);
347 /* if cp_error was enabled, we should avoid infinite loop */
348 if (unlikely(f2fs_cp_error(sbi))) {
353 if (f2fs_need_inode_block_update(sbi, ino)) {
354 f2fs_mark_inode_dirty_sync(inode, true);
355 f2fs_write_inode(inode, NULL);
360 * If it's atomic_write, it's just fine to keep write ordering. So
361 * here we don't need to wait for node write completion, since we use
362 * node chain which serializes node blocks. If one of node writes are
363 * reordered, we can see simply broken chain, resulting in stopping
364 * roll-forward recovery. It means we'll recover all or none node blocks
368 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
373 /* once recovery info is written, don't need to tack this */
374 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
375 clear_inode_flag(inode, FI_APPEND_WRITE);
377 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
378 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
379 ret = f2fs_issue_flush(sbi, inode->i_ino);
381 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
382 clear_inode_flag(inode, FI_UPDATE_WRITE);
383 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
385 f2fs_update_time(sbi, REQ_TIME);
387 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
391 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
393 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
395 return f2fs_do_sync_file(file, start, end, datasync, false);
398 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
399 pgoff_t index, int whence)
403 if (__is_valid_data_blkaddr(blkaddr))
405 if (blkaddr == NEW_ADDR &&
406 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
410 if (blkaddr == NULL_ADDR)
417 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
419 struct inode *inode = file->f_mapping->host;
420 loff_t maxbytes = inode->i_sb->s_maxbytes;
421 struct dnode_of_data dn;
422 pgoff_t pgofs, end_offset;
423 loff_t data_ofs = offset;
427 inode_lock_shared(inode);
429 isize = i_size_read(inode);
433 /* handle inline data case */
434 if (f2fs_has_inline_data(inode)) {
435 if (whence == SEEK_HOLE) {
438 } else if (whence == SEEK_DATA) {
444 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
446 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
447 set_new_dnode(&dn, inode, NULL, NULL, 0);
448 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
449 if (err && err != -ENOENT) {
451 } else if (err == -ENOENT) {
452 /* direct node does not exists */
453 if (whence == SEEK_DATA) {
454 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
461 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
463 /* find data/hole in dnode block */
464 for (; dn.ofs_in_node < end_offset;
465 dn.ofs_in_node++, pgofs++,
466 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
469 blkaddr = f2fs_data_blkaddr(&dn);
471 if (__is_valid_data_blkaddr(blkaddr) &&
472 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
473 blkaddr, DATA_GENERIC_ENHANCE)) {
478 if (__found_offset(file->f_mapping, blkaddr,
487 if (whence == SEEK_DATA)
490 if (whence == SEEK_HOLE && data_ofs > isize)
492 inode_unlock_shared(inode);
493 return vfs_setpos(file, data_ofs, maxbytes);
495 inode_unlock_shared(inode);
499 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
501 struct inode *inode = file->f_mapping->host;
502 loff_t maxbytes = inode->i_sb->s_maxbytes;
504 if (f2fs_compressed_file(inode))
505 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
511 return generic_file_llseek_size(file, offset, whence,
512 maxbytes, i_size_read(inode));
517 return f2fs_seek_block(file, offset, whence);
523 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
525 struct inode *inode = file_inode(file);
527 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
530 if (!f2fs_is_compress_backend_ready(inode))
534 vma->vm_ops = &f2fs_file_vm_ops;
536 f2fs_down_read(&F2FS_I(inode)->i_sem);
537 set_inode_flag(inode, FI_MMAP_FILE);
538 f2fs_up_read(&F2FS_I(inode)->i_sem);
543 static int f2fs_file_open(struct inode *inode, struct file *filp)
545 int err = fscrypt_file_open(inode, filp);
550 if (!f2fs_is_compress_backend_ready(inode))
553 err = fsverity_file_open(inode, filp);
557 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
558 filp->f_mode |= FMODE_CAN_ODIRECT;
560 return dquot_file_open(inode, filp);
563 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
565 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
566 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
568 bool compressed_cluster = false;
569 int cluster_index = 0, valid_blocks = 0;
570 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
571 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
573 addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
575 /* Assumption: truncation starts with cluster */
576 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
577 block_t blkaddr = le32_to_cpu(*addr);
579 if (f2fs_compressed_file(dn->inode) &&
580 !(cluster_index & (cluster_size - 1))) {
581 if (compressed_cluster)
582 f2fs_i_compr_blocks_update(dn->inode,
583 valid_blocks, false);
584 compressed_cluster = (blkaddr == COMPRESS_ADDR);
588 if (blkaddr == NULL_ADDR)
591 f2fs_set_data_blkaddr(dn, NULL_ADDR);
593 if (__is_valid_data_blkaddr(blkaddr)) {
594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595 DATA_GENERIC_ENHANCE))
597 if (compressed_cluster)
601 f2fs_invalidate_blocks(sbi, blkaddr);
603 if (!released || blkaddr != COMPRESS_ADDR)
607 if (compressed_cluster)
608 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
613 * once we invalidate valid blkaddr in range [ofs, ofs + count],
614 * we will invalidate all blkaddr in the whole range.
616 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
618 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
619 f2fs_update_age_extent_cache_range(dn, fofs, len);
620 dec_valid_block_count(sbi, dn->inode, nr_free);
622 dn->ofs_in_node = ofs;
624 f2fs_update_time(sbi, REQ_TIME);
625 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
626 dn->ofs_in_node, nr_free);
629 static int truncate_partial_data_page(struct inode *inode, u64 from,
632 loff_t offset = from & (PAGE_SIZE - 1);
633 pgoff_t index = from >> PAGE_SHIFT;
634 struct address_space *mapping = inode->i_mapping;
637 if (!offset && !cache_only)
641 page = find_lock_page(mapping, index);
642 if (page && PageUptodate(page))
644 f2fs_put_page(page, 1);
648 page = f2fs_get_lock_data_page(inode, index, true);
650 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
652 f2fs_wait_on_page_writeback(page, DATA, true, true);
653 zero_user(page, offset, PAGE_SIZE - offset);
655 /* An encrypted inode should have a key and truncate the last page. */
656 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
658 set_page_dirty(page);
659 f2fs_put_page(page, 1);
663 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
665 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
666 struct dnode_of_data dn;
668 int count = 0, err = 0;
670 bool truncate_page = false;
672 trace_f2fs_truncate_blocks_enter(inode, from);
674 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
676 if (free_from >= max_file_blocks(inode))
682 ipage = f2fs_get_node_page(sbi, inode->i_ino);
684 err = PTR_ERR(ipage);
688 if (f2fs_has_inline_data(inode)) {
689 f2fs_truncate_inline_inode(inode, ipage, from);
690 f2fs_put_page(ipage, 1);
691 truncate_page = true;
695 set_new_dnode(&dn, inode, ipage, NULL, 0);
696 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
703 count = ADDRS_PER_PAGE(dn.node_page, inode);
705 count -= dn.ofs_in_node;
706 f2fs_bug_on(sbi, count < 0);
708 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
709 f2fs_truncate_data_blocks_range(&dn, count);
715 err = f2fs_truncate_inode_blocks(inode, free_from);
720 /* lastly zero out the first data page */
722 err = truncate_partial_data_page(inode, from, truncate_page);
724 trace_f2fs_truncate_blocks_exit(inode, err);
728 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
730 u64 free_from = from;
733 #ifdef CONFIG_F2FS_FS_COMPRESSION
735 * for compressed file, only support cluster size
736 * aligned truncation.
738 if (f2fs_compressed_file(inode))
739 free_from = round_up(from,
740 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
743 err = f2fs_do_truncate_blocks(inode, free_from, lock);
747 #ifdef CONFIG_F2FS_FS_COMPRESSION
749 * For compressed file, after release compress blocks, don't allow write
750 * direct, but we should allow write direct after truncate to zero.
752 if (f2fs_compressed_file(inode) && !free_from
753 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
754 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
756 if (from != free_from) {
757 err = f2fs_truncate_partial_cluster(inode, from, lock);
766 int f2fs_truncate(struct inode *inode)
770 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
773 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
774 S_ISLNK(inode->i_mode)))
777 trace_f2fs_truncate(inode);
779 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
782 err = f2fs_dquot_initialize(inode);
786 /* we should check inline_data size */
787 if (!f2fs_may_inline_data(inode)) {
788 err = f2fs_convert_inline_inode(inode);
793 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
797 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
798 f2fs_mark_inode_dirty_sync(inode, false);
802 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
804 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
806 if (!fscrypt_dio_supported(inode))
808 if (fsverity_active(inode))
810 if (f2fs_compressed_file(inode))
813 /* disallow direct IO if any of devices has unaligned blksize */
814 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
817 * for blkzoned device, fallback direct IO to buffered IO, so
818 * all IOs can be serialized by log-structured write.
820 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
822 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
824 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
830 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
831 struct kstat *stat, u32 request_mask, unsigned int query_flags)
833 struct inode *inode = d_inode(path->dentry);
834 struct f2fs_inode_info *fi = F2FS_I(inode);
835 struct f2fs_inode *ri = NULL;
838 if (f2fs_has_extra_attr(inode) &&
839 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
840 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
841 stat->result_mask |= STATX_BTIME;
842 stat->btime.tv_sec = fi->i_crtime.tv_sec;
843 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
847 * Return the DIO alignment restrictions if requested. We only return
848 * this information when requested, since on encrypted files it might
849 * take a fair bit of work to get if the file wasn't opened recently.
851 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
852 * cannot represent that, so in that case we report no DIO support.
854 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
855 unsigned int bsize = i_blocksize(inode);
857 stat->result_mask |= STATX_DIOALIGN;
858 if (!f2fs_force_buffered_io(inode, WRITE)) {
859 stat->dio_mem_align = bsize;
860 stat->dio_offset_align = bsize;
865 if (flags & F2FS_COMPR_FL)
866 stat->attributes |= STATX_ATTR_COMPRESSED;
867 if (flags & F2FS_APPEND_FL)
868 stat->attributes |= STATX_ATTR_APPEND;
869 if (IS_ENCRYPTED(inode))
870 stat->attributes |= STATX_ATTR_ENCRYPTED;
871 if (flags & F2FS_IMMUTABLE_FL)
872 stat->attributes |= STATX_ATTR_IMMUTABLE;
873 if (flags & F2FS_NODUMP_FL)
874 stat->attributes |= STATX_ATTR_NODUMP;
875 if (IS_VERITY(inode))
876 stat->attributes |= STATX_ATTR_VERITY;
878 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
880 STATX_ATTR_ENCRYPTED |
881 STATX_ATTR_IMMUTABLE |
885 generic_fillattr(idmap, request_mask, inode, stat);
887 /* we need to show initial sectors used for inline_data/dentries */
888 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
889 f2fs_has_inline_dentry(inode))
890 stat->blocks += (stat->size + 511) >> 9;
895 #ifdef CONFIG_F2FS_FS_POSIX_ACL
896 static void __setattr_copy(struct mnt_idmap *idmap,
897 struct inode *inode, const struct iattr *attr)
899 unsigned int ia_valid = attr->ia_valid;
901 i_uid_update(idmap, attr, inode);
902 i_gid_update(idmap, attr, inode);
903 if (ia_valid & ATTR_ATIME)
904 inode_set_atime_to_ts(inode, attr->ia_atime);
905 if (ia_valid & ATTR_MTIME)
906 inode_set_mtime_to_ts(inode, attr->ia_mtime);
907 if (ia_valid & ATTR_CTIME)
908 inode_set_ctime_to_ts(inode, attr->ia_ctime);
909 if (ia_valid & ATTR_MODE) {
910 umode_t mode = attr->ia_mode;
911 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
913 if (!vfsgid_in_group_p(vfsgid) &&
914 !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
916 set_acl_inode(inode, mode);
920 #define __setattr_copy setattr_copy
923 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
926 struct inode *inode = d_inode(dentry);
929 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
932 if (unlikely(IS_IMMUTABLE(inode)))
935 if (unlikely(IS_APPEND(inode) &&
936 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
937 ATTR_GID | ATTR_TIMES_SET))))
940 if ((attr->ia_valid & ATTR_SIZE) &&
941 !f2fs_is_compress_backend_ready(inode))
944 err = setattr_prepare(idmap, dentry, attr);
948 err = fscrypt_prepare_setattr(dentry, attr);
952 err = fsverity_prepare_setattr(dentry, attr);
956 if (is_quota_modification(idmap, inode, attr)) {
957 err = f2fs_dquot_initialize(inode);
961 if (i_uid_needs_update(idmap, attr, inode) ||
962 i_gid_needs_update(idmap, attr, inode)) {
963 f2fs_lock_op(F2FS_I_SB(inode));
964 err = dquot_transfer(idmap, inode, attr);
966 set_sbi_flag(F2FS_I_SB(inode),
967 SBI_QUOTA_NEED_REPAIR);
968 f2fs_unlock_op(F2FS_I_SB(inode));
972 * update uid/gid under lock_op(), so that dquot and inode can
973 * be updated atomically.
975 i_uid_update(idmap, attr, inode);
976 i_gid_update(idmap, attr, inode);
977 f2fs_mark_inode_dirty_sync(inode, true);
978 f2fs_unlock_op(F2FS_I_SB(inode));
981 if (attr->ia_valid & ATTR_SIZE) {
982 loff_t old_size = i_size_read(inode);
984 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
986 * should convert inline inode before i_size_write to
987 * keep smaller than inline_data size with inline flag.
989 err = f2fs_convert_inline_inode(inode);
994 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
995 filemap_invalidate_lock(inode->i_mapping);
997 truncate_setsize(inode, attr->ia_size);
999 if (attr->ia_size <= old_size)
1000 err = f2fs_truncate(inode);
1002 * do not trim all blocks after i_size if target size is
1003 * larger than i_size.
1005 filemap_invalidate_unlock(inode->i_mapping);
1006 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1010 spin_lock(&F2FS_I(inode)->i_size_lock);
1011 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1012 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1013 spin_unlock(&F2FS_I(inode)->i_size_lock);
1016 __setattr_copy(idmap, inode, attr);
1018 if (attr->ia_valid & ATTR_MODE) {
1019 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1021 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1023 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1024 clear_inode_flag(inode, FI_ACL_MODE);
1028 /* file size may changed here */
1029 f2fs_mark_inode_dirty_sync(inode, true);
1031 /* inode change will produce dirty node pages flushed by checkpoint */
1032 f2fs_balance_fs(F2FS_I_SB(inode), true);
1037 const struct inode_operations f2fs_file_inode_operations = {
1038 .getattr = f2fs_getattr,
1039 .setattr = f2fs_setattr,
1040 .get_inode_acl = f2fs_get_acl,
1041 .set_acl = f2fs_set_acl,
1042 .listxattr = f2fs_listxattr,
1043 .fiemap = f2fs_fiemap,
1044 .fileattr_get = f2fs_fileattr_get,
1045 .fileattr_set = f2fs_fileattr_set,
1048 static int fill_zero(struct inode *inode, pgoff_t index,
1049 loff_t start, loff_t len)
1051 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1057 f2fs_balance_fs(sbi, true);
1060 page = f2fs_get_new_data_page(inode, NULL, index, false);
1061 f2fs_unlock_op(sbi);
1064 return PTR_ERR(page);
1066 f2fs_wait_on_page_writeback(page, DATA, true, true);
1067 zero_user(page, start, len);
1068 set_page_dirty(page);
1069 f2fs_put_page(page, 1);
1073 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1077 while (pg_start < pg_end) {
1078 struct dnode_of_data dn;
1079 pgoff_t end_offset, count;
1081 set_new_dnode(&dn, inode, NULL, NULL, 0);
1082 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1084 if (err == -ENOENT) {
1085 pg_start = f2fs_get_next_page_offset(&dn,
1092 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1093 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1095 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1097 f2fs_truncate_data_blocks_range(&dn, count);
1098 f2fs_put_dnode(&dn);
1105 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1107 pgoff_t pg_start, pg_end;
1108 loff_t off_start, off_end;
1111 ret = f2fs_convert_inline_inode(inode);
1115 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1116 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1118 off_start = offset & (PAGE_SIZE - 1);
1119 off_end = (offset + len) & (PAGE_SIZE - 1);
1121 if (pg_start == pg_end) {
1122 ret = fill_zero(inode, pg_start, off_start,
1123 off_end - off_start);
1128 ret = fill_zero(inode, pg_start++, off_start,
1129 PAGE_SIZE - off_start);
1134 ret = fill_zero(inode, pg_end, 0, off_end);
1139 if (pg_start < pg_end) {
1140 loff_t blk_start, blk_end;
1141 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1143 f2fs_balance_fs(sbi, true);
1145 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1146 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1148 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1149 filemap_invalidate_lock(inode->i_mapping);
1151 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1154 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1155 f2fs_unlock_op(sbi);
1157 filemap_invalidate_unlock(inode->i_mapping);
1158 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1165 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1166 int *do_replace, pgoff_t off, pgoff_t len)
1168 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1169 struct dnode_of_data dn;
1173 set_new_dnode(&dn, inode, NULL, NULL, 0);
1174 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1175 if (ret && ret != -ENOENT) {
1177 } else if (ret == -ENOENT) {
1178 if (dn.max_level == 0)
1180 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1181 dn.ofs_in_node, len);
1187 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1188 dn.ofs_in_node, len);
1189 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1190 *blkaddr = f2fs_data_blkaddr(&dn);
1192 if (__is_valid_data_blkaddr(*blkaddr) &&
1193 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1194 DATA_GENERIC_ENHANCE)) {
1195 f2fs_put_dnode(&dn);
1196 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1197 return -EFSCORRUPTED;
1200 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1202 if (f2fs_lfs_mode(sbi)) {
1203 f2fs_put_dnode(&dn);
1207 /* do not invalidate this block address */
1208 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1212 f2fs_put_dnode(&dn);
1221 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1222 int *do_replace, pgoff_t off, int len)
1224 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1225 struct dnode_of_data dn;
1228 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1229 if (*do_replace == 0)
1232 set_new_dnode(&dn, inode, NULL, NULL, 0);
1233 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1235 dec_valid_block_count(sbi, inode, 1);
1236 f2fs_invalidate_blocks(sbi, *blkaddr);
1238 f2fs_update_data_blkaddr(&dn, *blkaddr);
1240 f2fs_put_dnode(&dn);
1245 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1246 block_t *blkaddr, int *do_replace,
1247 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1249 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1254 if (blkaddr[i] == NULL_ADDR && !full) {
1259 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1260 struct dnode_of_data dn;
1261 struct node_info ni;
1265 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1266 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1270 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1272 f2fs_put_dnode(&dn);
1276 ilen = min((pgoff_t)
1277 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1278 dn.ofs_in_node, len - i);
1280 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1281 f2fs_truncate_data_blocks_range(&dn, 1);
1283 if (do_replace[i]) {
1284 f2fs_i_blocks_write(src_inode,
1286 f2fs_i_blocks_write(dst_inode,
1288 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1289 blkaddr[i], ni.version, true, false);
1295 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1296 if (dst_inode->i_size < new_size)
1297 f2fs_i_size_write(dst_inode, new_size);
1298 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1300 f2fs_put_dnode(&dn);
1302 struct page *psrc, *pdst;
1304 psrc = f2fs_get_lock_data_page(src_inode,
1307 return PTR_ERR(psrc);
1308 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1311 f2fs_put_page(psrc, 1);
1312 return PTR_ERR(pdst);
1314 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1315 set_page_dirty(pdst);
1316 set_page_private_gcing(pdst);
1317 f2fs_put_page(pdst, 1);
1318 f2fs_put_page(psrc, 1);
1320 ret = f2fs_truncate_hole(src_inode,
1321 src + i, src + i + 1);
1330 static int __exchange_data_block(struct inode *src_inode,
1331 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1332 pgoff_t len, bool full)
1334 block_t *src_blkaddr;
1340 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1342 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1343 array_size(olen, sizeof(block_t)),
1348 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1349 array_size(olen, sizeof(int)),
1352 kvfree(src_blkaddr);
1356 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1357 do_replace, src, olen);
1361 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1362 do_replace, src, dst, olen, full);
1370 kvfree(src_blkaddr);
1376 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1377 kvfree(src_blkaddr);
1382 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1384 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1385 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1386 pgoff_t start = offset >> PAGE_SHIFT;
1387 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1390 f2fs_balance_fs(sbi, true);
1392 /* avoid gc operation during block exchange */
1393 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1394 filemap_invalidate_lock(inode->i_mapping);
1397 f2fs_drop_extent_tree(inode);
1398 truncate_pagecache(inode, offset);
1399 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1400 f2fs_unlock_op(sbi);
1402 filemap_invalidate_unlock(inode->i_mapping);
1403 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1407 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1412 if (offset + len >= i_size_read(inode))
1415 /* collapse range should be aligned to block size of f2fs. */
1416 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1419 ret = f2fs_convert_inline_inode(inode);
1423 /* write out all dirty pages from offset */
1424 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1428 ret = f2fs_do_collapse(inode, offset, len);
1432 /* write out all moved pages, if possible */
1433 filemap_invalidate_lock(inode->i_mapping);
1434 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1435 truncate_pagecache(inode, offset);
1437 new_size = i_size_read(inode) - len;
1438 ret = f2fs_truncate_blocks(inode, new_size, true);
1439 filemap_invalidate_unlock(inode->i_mapping);
1441 f2fs_i_size_write(inode, new_size);
1445 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1448 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1449 pgoff_t index = start;
1450 unsigned int ofs_in_node = dn->ofs_in_node;
1454 for (; index < end; index++, dn->ofs_in_node++) {
1455 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1459 dn->ofs_in_node = ofs_in_node;
1460 ret = f2fs_reserve_new_blocks(dn, count);
1464 dn->ofs_in_node = ofs_in_node;
1465 for (index = start; index < end; index++, dn->ofs_in_node++) {
1466 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1468 * f2fs_reserve_new_blocks will not guarantee entire block
1471 if (dn->data_blkaddr == NULL_ADDR) {
1476 if (dn->data_blkaddr == NEW_ADDR)
1479 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1480 DATA_GENERIC_ENHANCE)) {
1481 ret = -EFSCORRUPTED;
1482 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1486 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1487 f2fs_set_data_blkaddr(dn, NEW_ADDR);
1490 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1491 f2fs_update_age_extent_cache_range(dn, start, index - start);
1496 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1499 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1500 struct address_space *mapping = inode->i_mapping;
1501 pgoff_t index, pg_start, pg_end;
1502 loff_t new_size = i_size_read(inode);
1503 loff_t off_start, off_end;
1506 ret = inode_newsize_ok(inode, (len + offset));
1510 ret = f2fs_convert_inline_inode(inode);
1514 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1518 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1519 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1521 off_start = offset & (PAGE_SIZE - 1);
1522 off_end = (offset + len) & (PAGE_SIZE - 1);
1524 if (pg_start == pg_end) {
1525 ret = fill_zero(inode, pg_start, off_start,
1526 off_end - off_start);
1530 new_size = max_t(loff_t, new_size, offset + len);
1533 ret = fill_zero(inode, pg_start++, off_start,
1534 PAGE_SIZE - off_start);
1538 new_size = max_t(loff_t, new_size,
1539 (loff_t)pg_start << PAGE_SHIFT);
1542 for (index = pg_start; index < pg_end;) {
1543 struct dnode_of_data dn;
1544 unsigned int end_offset;
1547 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1548 filemap_invalidate_lock(mapping);
1550 truncate_pagecache_range(inode,
1551 (loff_t)index << PAGE_SHIFT,
1552 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1556 set_new_dnode(&dn, inode, NULL, NULL, 0);
1557 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1559 f2fs_unlock_op(sbi);
1560 filemap_invalidate_unlock(mapping);
1561 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1565 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1566 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1568 ret = f2fs_do_zero_range(&dn, index, end);
1569 f2fs_put_dnode(&dn);
1571 f2fs_unlock_op(sbi);
1572 filemap_invalidate_unlock(mapping);
1573 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1575 f2fs_balance_fs(sbi, dn.node_changed);
1581 new_size = max_t(loff_t, new_size,
1582 (loff_t)index << PAGE_SHIFT);
1586 ret = fill_zero(inode, pg_end, 0, off_end);
1590 new_size = max_t(loff_t, new_size, offset + len);
1595 if (new_size > i_size_read(inode)) {
1596 if (mode & FALLOC_FL_KEEP_SIZE)
1597 file_set_keep_isize(inode);
1599 f2fs_i_size_write(inode, new_size);
1604 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1606 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1607 struct address_space *mapping = inode->i_mapping;
1608 pgoff_t nr, pg_start, pg_end, delta, idx;
1612 new_size = i_size_read(inode) + len;
1613 ret = inode_newsize_ok(inode, new_size);
1617 if (offset >= i_size_read(inode))
1620 /* insert range should be aligned to block size of f2fs. */
1621 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1624 ret = f2fs_convert_inline_inode(inode);
1628 f2fs_balance_fs(sbi, true);
1630 filemap_invalidate_lock(mapping);
1631 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1632 filemap_invalidate_unlock(mapping);
1636 /* write out all dirty pages from offset */
1637 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1641 pg_start = offset >> PAGE_SHIFT;
1642 pg_end = (offset + len) >> PAGE_SHIFT;
1643 delta = pg_end - pg_start;
1644 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1646 /* avoid gc operation during block exchange */
1647 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1648 filemap_invalidate_lock(mapping);
1649 truncate_pagecache(inode, offset);
1651 while (!ret && idx > pg_start) {
1652 nr = idx - pg_start;
1658 f2fs_drop_extent_tree(inode);
1660 ret = __exchange_data_block(inode, inode, idx,
1661 idx + delta, nr, false);
1662 f2fs_unlock_op(sbi);
1664 filemap_invalidate_unlock(mapping);
1665 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1667 /* write out all moved pages, if possible */
1668 filemap_invalidate_lock(mapping);
1669 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1670 truncate_pagecache(inode, offset);
1671 filemap_invalidate_unlock(mapping);
1674 f2fs_i_size_write(inode, new_size);
1678 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1679 loff_t len, int mode)
1681 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1682 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1683 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1684 .m_may_create = true };
1685 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1686 .init_gc_type = FG_GC,
1687 .should_migrate_blocks = false,
1688 .err_gc_skipped = true,
1689 .nr_free_secs = 0 };
1690 pgoff_t pg_start, pg_end;
1693 block_t expanded = 0;
1696 err = inode_newsize_ok(inode, (len + offset));
1700 err = f2fs_convert_inline_inode(inode);
1704 f2fs_balance_fs(sbi, true);
1706 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1707 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1708 off_end = (offset + len) & (PAGE_SIZE - 1);
1710 map.m_lblk = pg_start;
1711 map.m_len = pg_end - pg_start;
1718 if (f2fs_is_pinned_file(inode)) {
1719 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1720 block_t sec_len = roundup(map.m_len, sec_blks);
1722 map.m_len = sec_blks;
1724 if (has_not_enough_free_secs(sbi, 0,
1725 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1726 f2fs_down_write(&sbi->gc_lock);
1727 stat_inc_gc_call_count(sbi, FOREGROUND);
1728 err = f2fs_gc(sbi, &gc_control);
1729 if (err && err != -ENODATA)
1733 f2fs_down_write(&sbi->pin_sem);
1736 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1737 f2fs_unlock_op(sbi);
1739 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1740 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1741 file_dont_truncate(inode);
1743 f2fs_up_write(&sbi->pin_sem);
1745 expanded += map.m_len;
1746 sec_len -= map.m_len;
1747 map.m_lblk += map.m_len;
1748 if (!err && sec_len)
1751 map.m_len = expanded;
1753 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1754 expanded = map.m_len;
1763 last_off = pg_start + expanded - 1;
1765 /* update new size to the failed position */
1766 new_size = (last_off == pg_end) ? offset + len :
1767 (loff_t)(last_off + 1) << PAGE_SHIFT;
1769 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1772 if (new_size > i_size_read(inode)) {
1773 if (mode & FALLOC_FL_KEEP_SIZE)
1774 file_set_keep_isize(inode);
1776 f2fs_i_size_write(inode, new_size);
1782 static long f2fs_fallocate(struct file *file, int mode,
1783 loff_t offset, loff_t len)
1785 struct inode *inode = file_inode(file);
1788 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1790 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1792 if (!f2fs_is_compress_backend_ready(inode))
1795 /* f2fs only support ->fallocate for regular file */
1796 if (!S_ISREG(inode->i_mode))
1799 if (IS_ENCRYPTED(inode) &&
1800 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1804 * Pinned file should not support partial truncation since the block
1805 * can be used by applications.
1807 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1808 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1809 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1812 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1813 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1814 FALLOC_FL_INSERT_RANGE))
1819 ret = file_modified(file);
1823 if (mode & FALLOC_FL_PUNCH_HOLE) {
1824 if (offset >= inode->i_size)
1827 ret = f2fs_punch_hole(inode, offset, len);
1828 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1829 ret = f2fs_collapse_range(inode, offset, len);
1830 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1831 ret = f2fs_zero_range(inode, offset, len, mode);
1832 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1833 ret = f2fs_insert_range(inode, offset, len);
1835 ret = f2fs_expand_inode_data(inode, offset, len, mode);
1839 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1840 f2fs_mark_inode_dirty_sync(inode, false);
1841 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1845 inode_unlock(inode);
1847 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1851 static int f2fs_release_file(struct inode *inode, struct file *filp)
1854 * f2fs_release_file is called at every close calls. So we should
1855 * not drop any inmemory pages by close called by other process.
1857 if (!(filp->f_mode & FMODE_WRITE) ||
1858 atomic_read(&inode->i_writecount) != 1)
1862 f2fs_abort_atomic_write(inode, true);
1863 inode_unlock(inode);
1868 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1870 struct inode *inode = file_inode(file);
1873 * If the process doing a transaction is crashed, we should do
1874 * roll-back. Otherwise, other reader/write can see corrupted database
1875 * until all the writers close its file. Since this should be done
1876 * before dropping file lock, it needs to do in ->flush.
1878 if (F2FS_I(inode)->atomic_write_task == current &&
1879 (current->flags & PF_EXITING)) {
1881 f2fs_abort_atomic_write(inode, true);
1882 inode_unlock(inode);
1888 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1890 struct f2fs_inode_info *fi = F2FS_I(inode);
1891 u32 masked_flags = fi->i_flags & mask;
1893 /* mask can be shrunk by flags_valid selector */
1896 /* Is it quota file? Do not allow user to mess with it */
1897 if (IS_NOQUOTA(inode))
1900 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1901 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1903 if (!f2fs_empty_dir(inode))
1907 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1908 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1910 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1914 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1915 if (masked_flags & F2FS_COMPR_FL) {
1916 if (!f2fs_disable_compressed_file(inode))
1919 /* try to convert inline_data to support compression */
1920 int err = f2fs_convert_inline_inode(inode);
1924 f2fs_down_write(&F2FS_I(inode)->i_sem);
1925 if (!f2fs_may_compress(inode) ||
1926 (S_ISREG(inode->i_mode) &&
1927 F2FS_HAS_BLOCKS(inode))) {
1928 f2fs_up_write(&F2FS_I(inode)->i_sem);
1931 err = set_compress_context(inode);
1932 f2fs_up_write(&F2FS_I(inode)->i_sem);
1939 fi->i_flags = iflags | (fi->i_flags & ~mask);
1940 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1941 (fi->i_flags & F2FS_NOCOMP_FL));
1943 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1944 set_inode_flag(inode, FI_PROJ_INHERIT);
1946 clear_inode_flag(inode, FI_PROJ_INHERIT);
1948 inode_set_ctime_current(inode);
1949 f2fs_set_inode_flags(inode);
1950 f2fs_mark_inode_dirty_sync(inode, true);
1954 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1957 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1958 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1959 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1960 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1962 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1963 * FS_IOC_FSSETXATTR is done by the VFS.
1966 static const struct {
1969 } f2fs_fsflags_map[] = {
1970 { F2FS_COMPR_FL, FS_COMPR_FL },
1971 { F2FS_SYNC_FL, FS_SYNC_FL },
1972 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1973 { F2FS_APPEND_FL, FS_APPEND_FL },
1974 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1975 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1976 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1977 { F2FS_INDEX_FL, FS_INDEX_FL },
1978 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1979 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1980 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1983 #define F2FS_GETTABLE_FS_FL ( \
1993 FS_PROJINHERIT_FL | \
1995 FS_INLINE_DATA_FL | \
2000 #define F2FS_SETTABLE_FS_FL ( \
2009 FS_PROJINHERIT_FL | \
2012 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2013 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2018 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2019 if (iflags & f2fs_fsflags_map[i].iflag)
2020 fsflags |= f2fs_fsflags_map[i].fsflag;
2025 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2026 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2031 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2032 if (fsflags & f2fs_fsflags_map[i].fsflag)
2033 iflags |= f2fs_fsflags_map[i].iflag;
2038 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2040 struct inode *inode = file_inode(filp);
2042 return put_user(inode->i_generation, (int __user *)arg);
2045 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2047 struct inode *inode = file_inode(filp);
2048 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2049 struct f2fs_inode_info *fi = F2FS_I(inode);
2050 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2051 struct inode *pinode;
2055 if (!inode_owner_or_capable(idmap, inode))
2058 if (!S_ISREG(inode->i_mode))
2061 if (filp->f_flags & O_DIRECT)
2064 ret = mnt_want_write_file(filp);
2070 if (!f2fs_disable_compressed_file(inode)) {
2075 if (f2fs_is_atomic_file(inode))
2078 ret = f2fs_convert_inline_inode(inode);
2082 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2085 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2086 * f2fs_is_atomic_file.
2088 if (get_dirty_pages(inode))
2089 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2090 inode->i_ino, get_dirty_pages(inode));
2091 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2093 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2097 /* Check if the inode already has a COW inode */
2098 if (fi->cow_inode == NULL) {
2099 /* Create a COW inode for atomic write */
2100 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2101 if (IS_ERR(pinode)) {
2102 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2103 ret = PTR_ERR(pinode);
2107 ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
2110 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2114 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2115 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2117 /* Reuse the already created COW inode */
2118 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2120 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2125 f2fs_write_inode(inode, NULL);
2127 stat_inc_atomic_inode(inode);
2129 set_inode_flag(inode, FI_ATOMIC_FILE);
2131 isize = i_size_read(inode);
2132 fi->original_i_size = isize;
2134 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2135 truncate_inode_pages_final(inode->i_mapping);
2136 f2fs_i_size_write(inode, 0);
2139 f2fs_i_size_write(fi->cow_inode, isize);
2141 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2143 f2fs_update_time(sbi, REQ_TIME);
2144 fi->atomic_write_task = current;
2145 stat_update_max_atomic_write(inode);
2146 fi->atomic_write_cnt = 0;
2148 inode_unlock(inode);
2149 mnt_drop_write_file(filp);
2153 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2155 struct inode *inode = file_inode(filp);
2156 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2159 if (!inode_owner_or_capable(idmap, inode))
2162 ret = mnt_want_write_file(filp);
2166 f2fs_balance_fs(F2FS_I_SB(inode), true);
2170 if (f2fs_is_atomic_file(inode)) {
2171 ret = f2fs_commit_atomic_write(inode);
2173 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2175 f2fs_abort_atomic_write(inode, ret);
2177 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2180 inode_unlock(inode);
2181 mnt_drop_write_file(filp);
2185 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2187 struct inode *inode = file_inode(filp);
2188 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2191 if (!inode_owner_or_capable(idmap, inode))
2194 ret = mnt_want_write_file(filp);
2200 f2fs_abort_atomic_write(inode, true);
2202 inode_unlock(inode);
2204 mnt_drop_write_file(filp);
2205 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2209 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2211 struct inode *inode = file_inode(filp);
2212 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2213 struct super_block *sb = sbi->sb;
2217 if (!capable(CAP_SYS_ADMIN))
2220 if (get_user(in, (__u32 __user *)arg))
2223 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2224 ret = mnt_want_write_file(filp);
2226 if (ret == -EROFS) {
2228 f2fs_stop_checkpoint(sbi, false,
2229 STOP_CP_REASON_SHUTDOWN);
2230 trace_f2fs_shutdown(sbi, in, ret);
2237 case F2FS_GOING_DOWN_FULLSYNC:
2238 ret = bdev_freeze(sb->s_bdev);
2241 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2242 bdev_thaw(sb->s_bdev);
2244 case F2FS_GOING_DOWN_METASYNC:
2245 /* do checkpoint only */
2246 ret = f2fs_sync_fs(sb, 1);
2249 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2251 case F2FS_GOING_DOWN_NOSYNC:
2252 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2254 case F2FS_GOING_DOWN_METAFLUSH:
2255 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2256 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2258 case F2FS_GOING_DOWN_NEED_FSCK:
2259 set_sbi_flag(sbi, SBI_NEED_FSCK);
2260 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2261 set_sbi_flag(sbi, SBI_IS_DIRTY);
2262 /* do checkpoint only */
2263 ret = f2fs_sync_fs(sb, 1);
2270 f2fs_stop_gc_thread(sbi);
2271 f2fs_stop_discard_thread(sbi);
2273 f2fs_drop_discard_cmd(sbi);
2274 clear_opt(sbi, DISCARD);
2276 f2fs_update_time(sbi, REQ_TIME);
2278 if (in != F2FS_GOING_DOWN_FULLSYNC)
2279 mnt_drop_write_file(filp);
2281 trace_f2fs_shutdown(sbi, in, ret);
2286 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2288 struct inode *inode = file_inode(filp);
2289 struct super_block *sb = inode->i_sb;
2290 struct fstrim_range range;
2293 if (!capable(CAP_SYS_ADMIN))
2296 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2299 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2303 ret = mnt_want_write_file(filp);
2307 range.minlen = max((unsigned int)range.minlen,
2308 bdev_discard_granularity(sb->s_bdev));
2309 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2310 mnt_drop_write_file(filp);
2314 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2317 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2321 static bool uuid_is_nonzero(__u8 u[16])
2325 for (i = 0; i < 16; i++)
2331 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2333 struct inode *inode = file_inode(filp);
2335 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2338 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2340 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2343 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2345 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2347 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2350 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2352 struct inode *inode = file_inode(filp);
2353 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2354 u8 encrypt_pw_salt[16];
2357 if (!f2fs_sb_has_encrypt(sbi))
2360 err = mnt_want_write_file(filp);
2364 f2fs_down_write(&sbi->sb_lock);
2366 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2369 /* update superblock with uuid */
2370 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2372 err = f2fs_commit_super(sbi, false);
2375 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2379 memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2381 f2fs_up_write(&sbi->sb_lock);
2382 mnt_drop_write_file(filp);
2384 if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2390 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2393 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2396 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2399 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2401 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2404 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2407 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2409 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2412 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2415 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2418 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2421 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2424 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2427 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2430 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2433 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2435 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2438 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2441 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2443 struct inode *inode = file_inode(filp);
2444 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2445 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2447 .should_migrate_blocks = false,
2448 .nr_free_secs = 0 };
2452 if (!capable(CAP_SYS_ADMIN))
2455 if (get_user(sync, (__u32 __user *)arg))
2458 if (f2fs_readonly(sbi->sb))
2461 ret = mnt_want_write_file(filp);
2466 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2471 f2fs_down_write(&sbi->gc_lock);
2474 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2475 gc_control.err_gc_skipped = sync;
2476 stat_inc_gc_call_count(sbi, FOREGROUND);
2477 ret = f2fs_gc(sbi, &gc_control);
2479 mnt_drop_write_file(filp);
2483 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2485 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2486 struct f2fs_gc_control gc_control = {
2487 .init_gc_type = range->sync ? FG_GC : BG_GC,
2489 .should_migrate_blocks = false,
2490 .err_gc_skipped = range->sync,
2491 .nr_free_secs = 0 };
2495 if (!capable(CAP_SYS_ADMIN))
2497 if (f2fs_readonly(sbi->sb))
2500 end = range->start + range->len;
2501 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2502 end >= MAX_BLKADDR(sbi))
2505 ret = mnt_want_write_file(filp);
2511 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2516 f2fs_down_write(&sbi->gc_lock);
2519 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2520 stat_inc_gc_call_count(sbi, FOREGROUND);
2521 ret = f2fs_gc(sbi, &gc_control);
2527 range->start += CAP_BLKS_PER_SEC(sbi);
2528 if (range->start <= end)
2531 mnt_drop_write_file(filp);
2535 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2537 struct f2fs_gc_range range;
2539 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2542 return __f2fs_ioc_gc_range(filp, &range);
2545 static int f2fs_ioc_write_checkpoint(struct file *filp)
2547 struct inode *inode = file_inode(filp);
2548 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2551 if (!capable(CAP_SYS_ADMIN))
2554 if (f2fs_readonly(sbi->sb))
2557 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2558 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2562 ret = mnt_want_write_file(filp);
2566 ret = f2fs_sync_fs(sbi->sb, 1);
2568 mnt_drop_write_file(filp);
2572 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2574 struct f2fs_defragment *range)
2576 struct inode *inode = file_inode(filp);
2577 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2578 .m_seg_type = NO_CHECK_TYPE,
2579 .m_may_create = false };
2580 struct extent_info ei = {};
2581 pgoff_t pg_start, pg_end, next_pgofs;
2582 unsigned int blk_per_seg = sbi->blocks_per_seg;
2583 unsigned int total = 0, sec_num;
2584 block_t blk_end = 0;
2585 bool fragmented = false;
2588 pg_start = range->start >> PAGE_SHIFT;
2589 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2591 f2fs_balance_fs(sbi, true);
2595 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
2600 /* if in-place-update policy is enabled, don't waste time here */
2601 set_inode_flag(inode, FI_OPU_WRITE);
2602 if (f2fs_should_update_inplace(inode, NULL)) {
2607 /* writeback all dirty pages in the range */
2608 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2609 range->start + range->len - 1);
2614 * lookup mapping info in extent cache, skip defragmenting if physical
2615 * block addresses are continuous.
2617 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2618 if (ei.fofs + ei.len >= pg_end)
2622 map.m_lblk = pg_start;
2623 map.m_next_pgofs = &next_pgofs;
2626 * lookup mapping info in dnode page cache, skip defragmenting if all
2627 * physical block addresses are continuous even if there are hole(s)
2628 * in logical blocks.
2630 while (map.m_lblk < pg_end) {
2631 map.m_len = pg_end - map.m_lblk;
2632 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2636 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2637 map.m_lblk = next_pgofs;
2641 if (blk_end && blk_end != map.m_pblk)
2644 /* record total count of block that we're going to move */
2647 blk_end = map.m_pblk + map.m_len;
2649 map.m_lblk += map.m_len;
2657 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2660 * make sure there are enough free section for LFS allocation, this can
2661 * avoid defragment running in SSR mode when free section are allocated
2664 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2669 map.m_lblk = pg_start;
2670 map.m_len = pg_end - pg_start;
2673 while (map.m_lblk < pg_end) {
2678 map.m_len = pg_end - map.m_lblk;
2679 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2683 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2684 map.m_lblk = next_pgofs;
2688 set_inode_flag(inode, FI_SKIP_WRITES);
2691 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2694 page = f2fs_get_lock_data_page(inode, idx, true);
2696 err = PTR_ERR(page);
2700 set_page_dirty(page);
2701 set_page_private_gcing(page);
2702 f2fs_put_page(page, 1);
2711 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2714 clear_inode_flag(inode, FI_SKIP_WRITES);
2716 err = filemap_fdatawrite(inode->i_mapping);
2721 clear_inode_flag(inode, FI_SKIP_WRITES);
2723 clear_inode_flag(inode, FI_OPU_WRITE);
2725 inode_unlock(inode);
2727 range->len = (u64)total << PAGE_SHIFT;
2731 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2733 struct inode *inode = file_inode(filp);
2734 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2735 struct f2fs_defragment range;
2738 if (!capable(CAP_SYS_ADMIN))
2741 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2744 if (f2fs_readonly(sbi->sb))
2747 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2751 /* verify alignment of offset & size */
2752 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2755 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2756 max_file_blocks(inode)))
2759 err = mnt_want_write_file(filp);
2763 err = f2fs_defragment_range(sbi, filp, &range);
2764 mnt_drop_write_file(filp);
2766 f2fs_update_time(sbi, REQ_TIME);
2770 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2777 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2778 struct file *file_out, loff_t pos_out, size_t len)
2780 struct inode *src = file_inode(file_in);
2781 struct inode *dst = file_inode(file_out);
2782 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2783 size_t olen = len, dst_max_i_size = 0;
2787 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2788 src->i_sb != dst->i_sb)
2791 if (unlikely(f2fs_readonly(src->i_sb)))
2794 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2797 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2800 if (pos_out < 0 || pos_in < 0)
2804 if (pos_in == pos_out)
2806 if (pos_out > pos_in && pos_out < pos_in + len)
2813 if (!inode_trylock(dst))
2817 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2823 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2826 olen = len = src->i_size - pos_in;
2827 if (pos_in + len == src->i_size)
2828 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2834 dst_osize = dst->i_size;
2835 if (pos_out + olen > dst->i_size)
2836 dst_max_i_size = pos_out + olen;
2838 /* verify the end result is block aligned */
2839 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2840 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2841 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2844 ret = f2fs_convert_inline_inode(src);
2848 ret = f2fs_convert_inline_inode(dst);
2852 /* write out all dirty pages from offset */
2853 ret = filemap_write_and_wait_range(src->i_mapping,
2854 pos_in, pos_in + len);
2858 ret = filemap_write_and_wait_range(dst->i_mapping,
2859 pos_out, pos_out + len);
2863 f2fs_balance_fs(sbi, true);
2865 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2868 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2873 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2874 pos_out >> F2FS_BLKSIZE_BITS,
2875 len >> F2FS_BLKSIZE_BITS, false);
2879 f2fs_i_size_write(dst, dst_max_i_size);
2880 else if (dst_osize != dst->i_size)
2881 f2fs_i_size_write(dst, dst_osize);
2883 f2fs_unlock_op(sbi);
2886 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2888 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2892 inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
2893 f2fs_mark_inode_dirty_sync(src, false);
2895 inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
2896 f2fs_mark_inode_dirty_sync(dst, false);
2898 f2fs_update_time(sbi, REQ_TIME);
2908 static int __f2fs_ioc_move_range(struct file *filp,
2909 struct f2fs_move_range *range)
2914 if (!(filp->f_mode & FMODE_READ) ||
2915 !(filp->f_mode & FMODE_WRITE))
2918 dst = fdget(range->dst_fd);
2922 if (!(dst.file->f_mode & FMODE_WRITE)) {
2927 err = mnt_want_write_file(filp);
2931 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2932 range->pos_out, range->len);
2934 mnt_drop_write_file(filp);
2940 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2942 struct f2fs_move_range range;
2944 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2947 return __f2fs_ioc_move_range(filp, &range);
2950 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2952 struct inode *inode = file_inode(filp);
2953 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2954 struct sit_info *sm = SIT_I(sbi);
2955 unsigned int start_segno = 0, end_segno = 0;
2956 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2957 struct f2fs_flush_device range;
2958 struct f2fs_gc_control gc_control = {
2959 .init_gc_type = FG_GC,
2960 .should_migrate_blocks = true,
2961 .err_gc_skipped = true,
2962 .nr_free_secs = 0 };
2965 if (!capable(CAP_SYS_ADMIN))
2968 if (f2fs_readonly(sbi->sb))
2971 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2974 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2978 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2979 __is_large_section(sbi)) {
2980 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2981 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2985 ret = mnt_want_write_file(filp);
2989 if (range.dev_num != 0)
2990 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2991 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2993 start_segno = sm->last_victim[FLUSH_DEVICE];
2994 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2995 start_segno = dev_start_segno;
2996 end_segno = min(start_segno + range.segments, dev_end_segno);
2998 while (start_segno < end_segno) {
2999 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3003 sm->last_victim[GC_CB] = end_segno + 1;
3004 sm->last_victim[GC_GREEDY] = end_segno + 1;
3005 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3007 gc_control.victim_segno = start_segno;
3008 stat_inc_gc_call_count(sbi, FOREGROUND);
3009 ret = f2fs_gc(sbi, &gc_control);
3017 mnt_drop_write_file(filp);
3021 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3023 struct inode *inode = file_inode(filp);
3024 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3026 /* Must validate to set it with SQLite behavior in Android. */
3027 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3029 return put_user(sb_feature, (u32 __user *)arg);
3033 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3035 struct dquot *transfer_to[MAXQUOTAS] = {};
3036 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3037 struct super_block *sb = sbi->sb;
3040 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3041 if (IS_ERR(transfer_to[PRJQUOTA]))
3042 return PTR_ERR(transfer_to[PRJQUOTA]);
3044 err = __dquot_transfer(inode, transfer_to);
3046 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3047 dqput(transfer_to[PRJQUOTA]);
3051 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3053 struct f2fs_inode_info *fi = F2FS_I(inode);
3054 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3055 struct f2fs_inode *ri = NULL;
3059 if (!f2fs_sb_has_project_quota(sbi)) {
3060 if (projid != F2FS_DEF_PROJID)
3066 if (!f2fs_has_extra_attr(inode))
3069 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3071 if (projid_eq(kprojid, fi->i_projid))
3075 /* Is it quota file? Do not allow user to mess with it */
3076 if (IS_NOQUOTA(inode))
3079 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3082 err = f2fs_dquot_initialize(inode);
3087 err = f2fs_transfer_project_quota(inode, kprojid);
3091 fi->i_projid = kprojid;
3092 inode_set_ctime_current(inode);
3093 f2fs_mark_inode_dirty_sync(inode, true);
3095 f2fs_unlock_op(sbi);
3099 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3104 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3106 if (projid != F2FS_DEF_PROJID)
3112 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3114 struct inode *inode = d_inode(dentry);
3115 struct f2fs_inode_info *fi = F2FS_I(inode);
3116 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3118 if (IS_ENCRYPTED(inode))
3119 fsflags |= FS_ENCRYPT_FL;
3120 if (IS_VERITY(inode))
3121 fsflags |= FS_VERITY_FL;
3122 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3123 fsflags |= FS_INLINE_DATA_FL;
3124 if (is_inode_flag_set(inode, FI_PIN_FILE))
3125 fsflags |= FS_NOCOW_FL;
3127 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3129 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3130 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3135 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3136 struct dentry *dentry, struct fileattr *fa)
3138 struct inode *inode = d_inode(dentry);
3139 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3143 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3145 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3147 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3149 fsflags &= F2FS_SETTABLE_FS_FL;
3150 if (!fa->flags_valid)
3151 mask &= FS_COMMON_FL;
3153 iflags = f2fs_fsflags_to_iflags(fsflags);
3154 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3157 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3159 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3164 int f2fs_pin_file_control(struct inode *inode, bool inc)
3166 struct f2fs_inode_info *fi = F2FS_I(inode);
3167 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3169 /* Use i_gc_failures for normal file as a risk signal. */
3171 f2fs_i_gc_failures_write(inode,
3172 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3174 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3175 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3176 __func__, inode->i_ino,
3177 fi->i_gc_failures[GC_FAILURE_PIN]);
3178 clear_inode_flag(inode, FI_PIN_FILE);
3184 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3186 struct inode *inode = file_inode(filp);
3190 if (get_user(pin, (__u32 __user *)arg))
3193 if (!S_ISREG(inode->i_mode))
3196 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3199 ret = mnt_want_write_file(filp);
3206 clear_inode_flag(inode, FI_PIN_FILE);
3207 f2fs_i_gc_failures_write(inode, 0);
3211 if (f2fs_should_update_outplace(inode, NULL)) {
3216 if (f2fs_pin_file_control(inode, false)) {
3221 ret = f2fs_convert_inline_inode(inode);
3225 if (!f2fs_disable_compressed_file(inode)) {
3230 set_inode_flag(inode, FI_PIN_FILE);
3231 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3233 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3235 inode_unlock(inode);
3236 mnt_drop_write_file(filp);
3240 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3242 struct inode *inode = file_inode(filp);
3245 if (is_inode_flag_set(inode, FI_PIN_FILE))
3246 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3247 return put_user(pin, (u32 __user *)arg);
3250 int f2fs_precache_extents(struct inode *inode)
3252 struct f2fs_inode_info *fi = F2FS_I(inode);
3253 struct f2fs_map_blocks map;
3254 pgoff_t m_next_extent;
3258 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3263 map.m_next_pgofs = NULL;
3264 map.m_next_extent = &m_next_extent;
3265 map.m_seg_type = NO_CHECK_TYPE;
3266 map.m_may_create = false;
3267 end = F2FS_BLK_ALIGN(i_size_read(inode));
3269 while (map.m_lblk < end) {
3270 map.m_len = end - map.m_lblk;
3272 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3273 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3274 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3275 if (err || !map.m_len)
3278 map.m_lblk = m_next_extent;
3284 static int f2fs_ioc_precache_extents(struct file *filp)
3286 return f2fs_precache_extents(file_inode(filp));
3289 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3291 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3294 if (!capable(CAP_SYS_ADMIN))
3297 if (f2fs_readonly(sbi->sb))
3300 if (copy_from_user(&block_count, (void __user *)arg,
3301 sizeof(block_count)))
3304 return f2fs_resize_fs(filp, block_count);
3307 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3309 struct inode *inode = file_inode(filp);
3311 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3313 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3314 f2fs_warn(F2FS_I_SB(inode),
3315 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3320 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3323 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3325 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3328 return fsverity_ioctl_measure(filp, (void __user *)arg);
3331 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3333 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3336 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3339 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3341 struct inode *inode = file_inode(filp);
3342 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3347 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3351 f2fs_down_read(&sbi->sb_lock);
3352 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3353 ARRAY_SIZE(sbi->raw_super->volume_name),
3354 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3355 f2fs_up_read(&sbi->sb_lock);
3357 if (copy_to_user((char __user *)arg, vbuf,
3358 min(FSLABEL_MAX, count)))
3365 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3367 struct inode *inode = file_inode(filp);
3368 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3372 if (!capable(CAP_SYS_ADMIN))
3375 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3377 return PTR_ERR(vbuf);
3379 err = mnt_want_write_file(filp);
3383 f2fs_down_write(&sbi->sb_lock);
3385 memset(sbi->raw_super->volume_name, 0,
3386 sizeof(sbi->raw_super->volume_name));
3387 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3388 sbi->raw_super->volume_name,
3389 ARRAY_SIZE(sbi->raw_super->volume_name));
3391 err = f2fs_commit_super(sbi, false);
3393 f2fs_up_write(&sbi->sb_lock);
3395 mnt_drop_write_file(filp);
3401 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
3403 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3406 if (!f2fs_compressed_file(inode))
3409 *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3414 static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
3416 struct inode *inode = file_inode(filp);
3420 ret = f2fs_get_compress_blocks(inode, &blocks);
3424 return put_user(blocks, (u64 __user *)arg);
3427 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3429 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3430 unsigned int released_blocks = 0;
3431 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3435 for (i = 0; i < count; i++) {
3436 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3437 dn->ofs_in_node + i);
3439 if (!__is_valid_data_blkaddr(blkaddr))
3441 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3442 DATA_GENERIC_ENHANCE))) {
3443 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3444 return -EFSCORRUPTED;
3449 int compr_blocks = 0;
3451 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3452 blkaddr = f2fs_data_blkaddr(dn);
3455 if (blkaddr == COMPRESS_ADDR)
3457 dn->ofs_in_node += cluster_size;
3461 if (__is_valid_data_blkaddr(blkaddr))
3464 if (blkaddr != NEW_ADDR)
3467 f2fs_set_data_blkaddr(dn, NULL_ADDR);
3470 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3471 dec_valid_block_count(sbi, dn->inode,
3472 cluster_size - compr_blocks);
3474 released_blocks += cluster_size - compr_blocks;
3476 count -= cluster_size;
3479 return released_blocks;
3482 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3484 struct inode *inode = file_inode(filp);
3485 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3486 pgoff_t page_idx = 0, last_idx;
3487 unsigned int released_blocks = 0;
3491 if (!f2fs_sb_has_compression(sbi))
3494 if (!f2fs_compressed_file(inode))
3497 if (f2fs_readonly(sbi->sb))
3500 ret = mnt_want_write_file(filp);
3504 f2fs_balance_fs(sbi, true);
3508 writecount = atomic_read(&inode->i_writecount);
3509 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3510 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3515 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3520 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3524 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3529 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3530 inode_set_ctime_current(inode);
3531 f2fs_mark_inode_dirty_sync(inode, true);
3533 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3534 filemap_invalidate_lock(inode->i_mapping);
3536 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3538 while (page_idx < last_idx) {
3539 struct dnode_of_data dn;
3540 pgoff_t end_offset, count;
3542 set_new_dnode(&dn, inode, NULL, NULL, 0);
3543 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3545 if (ret == -ENOENT) {
3546 page_idx = f2fs_get_next_page_offset(&dn,
3554 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3555 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3556 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3558 ret = release_compress_blocks(&dn, count);
3560 f2fs_put_dnode(&dn);
3566 released_blocks += ret;
3569 filemap_invalidate_unlock(inode->i_mapping);
3570 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3572 inode_unlock(inode);
3574 mnt_drop_write_file(filp);
3577 ret = put_user(released_blocks, (u64 __user *)arg);
3578 } else if (released_blocks &&
3579 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3580 set_sbi_flag(sbi, SBI_NEED_FSCK);
3581 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3582 "iblocks=%llu, released=%u, compr_blocks=%u, "
3584 __func__, inode->i_ino, inode->i_blocks,
3586 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3592 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
3593 unsigned int *reserved_blocks)
3595 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3596 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3600 for (i = 0; i < count; i++) {
3601 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3602 dn->ofs_in_node + i);
3604 if (!__is_valid_data_blkaddr(blkaddr))
3606 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3607 DATA_GENERIC_ENHANCE))) {
3608 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3609 return -EFSCORRUPTED;
3614 int compr_blocks = 0;
3618 for (i = 0; i < cluster_size; i++) {
3619 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3620 dn->ofs_in_node + i);
3623 if (blkaddr != COMPRESS_ADDR) {
3624 dn->ofs_in_node += cluster_size;
3631 * compressed cluster was not released due to it
3632 * fails in release_compress_blocks(), so NEW_ADDR
3633 * is a possible case.
3635 if (blkaddr == NEW_ADDR ||
3636 __is_valid_data_blkaddr(blkaddr)) {
3642 reserved = cluster_size - compr_blocks;
3644 /* for the case all blocks in cluster were reserved */
3648 ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
3652 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3653 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
3654 f2fs_set_data_blkaddr(dn, NEW_ADDR);
3657 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3659 *reserved_blocks += reserved;
3661 count -= cluster_size;
3667 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3669 struct inode *inode = file_inode(filp);
3670 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3671 pgoff_t page_idx = 0, last_idx;
3672 unsigned int reserved_blocks = 0;
3675 if (!f2fs_sb_has_compression(sbi))
3678 if (!f2fs_compressed_file(inode))
3681 if (f2fs_readonly(sbi->sb))
3684 ret = mnt_want_write_file(filp);
3688 f2fs_balance_fs(sbi, true);
3692 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3697 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3700 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3701 filemap_invalidate_lock(inode->i_mapping);
3703 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3705 while (page_idx < last_idx) {
3706 struct dnode_of_data dn;
3707 pgoff_t end_offset, count;
3709 set_new_dnode(&dn, inode, NULL, NULL, 0);
3710 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3712 if (ret == -ENOENT) {
3713 page_idx = f2fs_get_next_page_offset(&dn,
3721 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3722 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3723 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3725 ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
3727 f2fs_put_dnode(&dn);
3735 filemap_invalidate_unlock(inode->i_mapping);
3736 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3739 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3740 inode_set_ctime_current(inode);
3741 f2fs_mark_inode_dirty_sync(inode, true);
3744 inode_unlock(inode);
3745 mnt_drop_write_file(filp);
3748 ret = put_user(reserved_blocks, (u64 __user *)arg);
3749 } else if (reserved_blocks &&
3750 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3751 set_sbi_flag(sbi, SBI_NEED_FSCK);
3752 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3753 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3755 __func__, inode->i_ino, inode->i_blocks,
3757 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3763 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3764 pgoff_t off, block_t block, block_t len, u32 flags)
3766 sector_t sector = SECTOR_FROM_BLOCK(block);
3767 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3770 if (flags & F2FS_TRIM_FILE_DISCARD) {
3771 if (bdev_max_secure_erase_sectors(bdev))
3772 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3775 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3779 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3780 if (IS_ENCRYPTED(inode))
3781 ret = fscrypt_zeroout_range(inode, off, block, len);
3783 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3790 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3792 struct inode *inode = file_inode(filp);
3793 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3794 struct address_space *mapping = inode->i_mapping;
3795 struct block_device *prev_bdev = NULL;
3796 struct f2fs_sectrim_range range;
3797 pgoff_t index, pg_end, prev_index = 0;
3798 block_t prev_block = 0, len = 0;
3800 bool to_end = false;
3803 if (!(filp->f_mode & FMODE_WRITE))
3806 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3810 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3811 !S_ISREG(inode->i_mode))
3814 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3815 !f2fs_hw_support_discard(sbi)) ||
3816 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3817 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3820 file_start_write(filp);
3823 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3824 range.start >= inode->i_size) {
3832 if (inode->i_size - range.start > range.len) {
3833 end_addr = range.start + range.len;
3835 end_addr = range.len == (u64)-1 ?
3836 sbi->sb->s_maxbytes : inode->i_size;
3840 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3841 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3846 index = F2FS_BYTES_TO_BLK(range.start);
3847 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3849 ret = f2fs_convert_inline_inode(inode);
3853 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3854 filemap_invalidate_lock(mapping);
3856 ret = filemap_write_and_wait_range(mapping, range.start,
3857 to_end ? LLONG_MAX : end_addr - 1);
3861 truncate_inode_pages_range(mapping, range.start,
3862 to_end ? -1 : end_addr - 1);
3864 while (index < pg_end) {
3865 struct dnode_of_data dn;
3866 pgoff_t end_offset, count;
3869 set_new_dnode(&dn, inode, NULL, NULL, 0);
3870 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3872 if (ret == -ENOENT) {
3873 index = f2fs_get_next_page_offset(&dn, index);
3879 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3880 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3881 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3882 struct block_device *cur_bdev;
3883 block_t blkaddr = f2fs_data_blkaddr(&dn);
3885 if (!__is_valid_data_blkaddr(blkaddr))
3888 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3889 DATA_GENERIC_ENHANCE)) {
3890 ret = -EFSCORRUPTED;
3891 f2fs_put_dnode(&dn);
3892 f2fs_handle_error(sbi,
3893 ERROR_INVALID_BLKADDR);
3897 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3898 if (f2fs_is_multi_device(sbi)) {
3899 int di = f2fs_target_device_index(sbi, blkaddr);
3901 blkaddr -= FDEV(di).start_blk;
3905 if (prev_bdev == cur_bdev &&
3906 index == prev_index + len &&
3907 blkaddr == prev_block + len) {
3910 ret = f2fs_secure_erase(prev_bdev,
3911 inode, prev_index, prev_block,
3914 f2fs_put_dnode(&dn);
3923 prev_bdev = cur_bdev;
3925 prev_block = blkaddr;
3930 f2fs_put_dnode(&dn);
3932 if (fatal_signal_pending(current)) {
3940 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3941 prev_block, len, range.flags);
3943 filemap_invalidate_unlock(mapping);
3944 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3946 inode_unlock(inode);
3947 file_end_write(filp);
3952 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3954 struct inode *inode = file_inode(filp);
3955 struct f2fs_comp_option option;
3957 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3960 inode_lock_shared(inode);
3962 if (!f2fs_compressed_file(inode)) {
3963 inode_unlock_shared(inode);
3967 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3968 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3970 inode_unlock_shared(inode);
3972 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3979 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3981 struct inode *inode = file_inode(filp);
3982 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3983 struct f2fs_comp_option option;
3986 if (!f2fs_sb_has_compression(sbi))
3989 if (!(filp->f_mode & FMODE_WRITE))
3992 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3996 if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3997 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3998 option.algorithm >= COMPRESS_MAX)
4001 file_start_write(filp);
4004 f2fs_down_write(&F2FS_I(inode)->i_sem);
4005 if (!f2fs_compressed_file(inode)) {
4010 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4015 if (F2FS_HAS_BLOCKS(inode)) {
4020 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4021 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4022 F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
4023 /* Set default level */
4024 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
4025 F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
4027 F2FS_I(inode)->i_compress_level = 0;
4028 /* Adjust mount option level */
4029 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
4030 F2FS_OPTION(sbi).compress_level)
4031 F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
4032 f2fs_mark_inode_dirty_sync(inode, true);
4034 if (!f2fs_is_compress_backend_ready(inode))
4035 f2fs_warn(sbi, "compression algorithm is successfully set, "
4036 "but current kernel doesn't support this algorithm.");
4038 f2fs_up_write(&F2FS_I(inode)->i_sem);
4039 inode_unlock(inode);
4040 file_end_write(filp);
4045 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4047 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
4048 struct address_space *mapping = inode->i_mapping;
4050 pgoff_t redirty_idx = page_idx;
4051 int i, page_len = 0, ret = 0;
4053 page_cache_ra_unbounded(&ractl, len, 0);
4055 for (i = 0; i < len; i++, page_idx++) {
4056 page = read_cache_page(mapping, page_idx, NULL, NULL);
4058 ret = PTR_ERR(page);
4064 for (i = 0; i < page_len; i++, redirty_idx++) {
4065 page = find_lock_page(mapping, redirty_idx);
4067 /* It will never fail, when page has pinned above */
4068 f2fs_bug_on(F2FS_I_SB(inode), !page);
4070 set_page_dirty(page);
4071 set_page_private_gcing(page);
4072 f2fs_put_page(page, 1);
4073 f2fs_put_page(page, 0);
4079 static int f2fs_ioc_decompress_file(struct file *filp)
4081 struct inode *inode = file_inode(filp);
4082 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4083 struct f2fs_inode_info *fi = F2FS_I(inode);
4084 pgoff_t page_idx = 0, last_idx;
4085 unsigned int blk_per_seg = sbi->blocks_per_seg;
4086 int cluster_size = fi->i_cluster_size;
4089 if (!f2fs_sb_has_compression(sbi) ||
4090 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4093 if (!(filp->f_mode & FMODE_WRITE))
4096 if (!f2fs_compressed_file(inode))
4099 f2fs_balance_fs(sbi, true);
4101 file_start_write(filp);
4104 if (!f2fs_is_compress_backend_ready(inode)) {
4109 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4114 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4118 if (!atomic_read(&fi->i_compr_blocks))
4121 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4123 count = last_idx - page_idx;
4124 while (count && count >= cluster_size) {
4125 ret = redirty_blocks(inode, page_idx, cluster_size);
4129 if (get_dirty_pages(inode) >= blk_per_seg) {
4130 ret = filemap_fdatawrite(inode->i_mapping);
4135 count -= cluster_size;
4136 page_idx += cluster_size;
4139 if (fatal_signal_pending(current)) {
4146 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4150 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4153 inode_unlock(inode);
4154 file_end_write(filp);
4159 static int f2fs_ioc_compress_file(struct file *filp)
4161 struct inode *inode = file_inode(filp);
4162 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4163 pgoff_t page_idx = 0, last_idx;
4164 unsigned int blk_per_seg = sbi->blocks_per_seg;
4165 int cluster_size = F2FS_I(inode)->i_cluster_size;
4168 if (!f2fs_sb_has_compression(sbi) ||
4169 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4172 if (!(filp->f_mode & FMODE_WRITE))
4175 if (!f2fs_compressed_file(inode))
4178 f2fs_balance_fs(sbi, true);
4180 file_start_write(filp);
4183 if (!f2fs_is_compress_backend_ready(inode)) {
4188 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4193 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4197 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4199 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4201 count = last_idx - page_idx;
4202 while (count && count >= cluster_size) {
4203 ret = redirty_blocks(inode, page_idx, cluster_size);
4207 if (get_dirty_pages(inode) >= blk_per_seg) {
4208 ret = filemap_fdatawrite(inode->i_mapping);
4213 count -= cluster_size;
4214 page_idx += cluster_size;
4217 if (fatal_signal_pending(current)) {
4224 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4227 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4230 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4233 inode_unlock(inode);
4234 file_end_write(filp);
4239 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4242 case FS_IOC_GETVERSION:
4243 return f2fs_ioc_getversion(filp, arg);
4244 case F2FS_IOC_START_ATOMIC_WRITE:
4245 return f2fs_ioc_start_atomic_write(filp, false);
4246 case F2FS_IOC_START_ATOMIC_REPLACE:
4247 return f2fs_ioc_start_atomic_write(filp, true);
4248 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4249 return f2fs_ioc_commit_atomic_write(filp);
4250 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4251 return f2fs_ioc_abort_atomic_write(filp);
4252 case F2FS_IOC_START_VOLATILE_WRITE:
4253 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4255 case F2FS_IOC_SHUTDOWN:
4256 return f2fs_ioc_shutdown(filp, arg);
4258 return f2fs_ioc_fitrim(filp, arg);
4259 case FS_IOC_SET_ENCRYPTION_POLICY:
4260 return f2fs_ioc_set_encryption_policy(filp, arg);
4261 case FS_IOC_GET_ENCRYPTION_POLICY:
4262 return f2fs_ioc_get_encryption_policy(filp, arg);
4263 case FS_IOC_GET_ENCRYPTION_PWSALT:
4264 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4265 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4266 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4267 case FS_IOC_ADD_ENCRYPTION_KEY:
4268 return f2fs_ioc_add_encryption_key(filp, arg);
4269 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4270 return f2fs_ioc_remove_encryption_key(filp, arg);
4271 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4272 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4273 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4274 return f2fs_ioc_get_encryption_key_status(filp, arg);
4275 case FS_IOC_GET_ENCRYPTION_NONCE:
4276 return f2fs_ioc_get_encryption_nonce(filp, arg);
4277 case F2FS_IOC_GARBAGE_COLLECT:
4278 return f2fs_ioc_gc(filp, arg);
4279 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4280 return f2fs_ioc_gc_range(filp, arg);
4281 case F2FS_IOC_WRITE_CHECKPOINT:
4282 return f2fs_ioc_write_checkpoint(filp);
4283 case F2FS_IOC_DEFRAGMENT:
4284 return f2fs_ioc_defragment(filp, arg);
4285 case F2FS_IOC_MOVE_RANGE:
4286 return f2fs_ioc_move_range(filp, arg);
4287 case F2FS_IOC_FLUSH_DEVICE:
4288 return f2fs_ioc_flush_device(filp, arg);
4289 case F2FS_IOC_GET_FEATURES:
4290 return f2fs_ioc_get_features(filp, arg);
4291 case F2FS_IOC_GET_PIN_FILE:
4292 return f2fs_ioc_get_pin_file(filp, arg);
4293 case F2FS_IOC_SET_PIN_FILE:
4294 return f2fs_ioc_set_pin_file(filp, arg);
4295 case F2FS_IOC_PRECACHE_EXTENTS:
4296 return f2fs_ioc_precache_extents(filp);
4297 case F2FS_IOC_RESIZE_FS:
4298 return f2fs_ioc_resize_fs(filp, arg);
4299 case FS_IOC_ENABLE_VERITY:
4300 return f2fs_ioc_enable_verity(filp, arg);
4301 case FS_IOC_MEASURE_VERITY:
4302 return f2fs_ioc_measure_verity(filp, arg);
4303 case FS_IOC_READ_VERITY_METADATA:
4304 return f2fs_ioc_read_verity_metadata(filp, arg);
4305 case FS_IOC_GETFSLABEL:
4306 return f2fs_ioc_getfslabel(filp, arg);
4307 case FS_IOC_SETFSLABEL:
4308 return f2fs_ioc_setfslabel(filp, arg);
4309 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4310 return f2fs_ioc_get_compress_blocks(filp, arg);
4311 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4312 return f2fs_release_compress_blocks(filp, arg);
4313 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4314 return f2fs_reserve_compress_blocks(filp, arg);
4315 case F2FS_IOC_SEC_TRIM_FILE:
4316 return f2fs_sec_trim_file(filp, arg);
4317 case F2FS_IOC_GET_COMPRESS_OPTION:
4318 return f2fs_ioc_get_compress_option(filp, arg);
4319 case F2FS_IOC_SET_COMPRESS_OPTION:
4320 return f2fs_ioc_set_compress_option(filp, arg);
4321 case F2FS_IOC_DECOMPRESS_FILE:
4322 return f2fs_ioc_decompress_file(filp);
4323 case F2FS_IOC_COMPRESS_FILE:
4324 return f2fs_ioc_compress_file(filp);
4330 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4332 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4334 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4337 return __f2fs_ioctl(filp, cmd, arg);
4341 * Return %true if the given read or write request should use direct I/O, or
4342 * %false if it should use buffered I/O.
4344 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4345 struct iov_iter *iter)
4349 if (!(iocb->ki_flags & IOCB_DIRECT))
4352 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4356 * Direct I/O not aligned to the disk's logical_block_size will be
4357 * attempted, but will fail with -EINVAL.
4359 * f2fs additionally requires that direct I/O be aligned to the
4360 * filesystem block size, which is often a stricter requirement.
4361 * However, f2fs traditionally falls back to buffered I/O on requests
4362 * that are logical_block_size-aligned but not fs-block aligned.
4364 * The below logic implements this behavior.
4366 align = iocb->ki_pos | iov_iter_alignment(iter);
4367 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4368 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4374 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4377 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4379 dec_page_count(sbi, F2FS_DIO_READ);
4382 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4386 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4387 .end_io = f2fs_dio_read_end_io,
4390 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4392 struct file *file = iocb->ki_filp;
4393 struct inode *inode = file_inode(file);
4394 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4395 struct f2fs_inode_info *fi = F2FS_I(inode);
4396 const loff_t pos = iocb->ki_pos;
4397 const size_t count = iov_iter_count(to);
4398 struct iomap_dio *dio;
4402 return 0; /* skip atime update */
4404 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4406 if (iocb->ki_flags & IOCB_NOWAIT) {
4407 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4412 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4416 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4417 * the higher-level function iomap_dio_rw() in order to ensure that the
4418 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4420 inc_page_count(sbi, F2FS_DIO_READ);
4421 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4422 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4423 if (IS_ERR_OR_NULL(dio)) {
4424 ret = PTR_ERR_OR_ZERO(dio);
4425 if (ret != -EIOCBQUEUED)
4426 dec_page_count(sbi, F2FS_DIO_READ);
4428 ret = iomap_dio_complete(dio);
4431 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4433 file_accessed(file);
4435 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4439 static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
4442 struct inode *inode = file_inode(file);
4445 buf = f2fs_getname(F2FS_I_SB(inode));
4448 path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
4452 trace_f2fs_datawrite_start(inode, pos, count,
4453 current->pid, path, current->comm);
4455 trace_f2fs_dataread_start(inode, pos, count,
4456 current->pid, path, current->comm);
4461 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4463 struct inode *inode = file_inode(iocb->ki_filp);
4464 const loff_t pos = iocb->ki_pos;
4467 if (!f2fs_is_compress_backend_ready(inode))
4470 if (trace_f2fs_dataread_start_enabled())
4471 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4472 iov_iter_count(to), READ);
4474 if (f2fs_should_use_dio(inode, iocb, to)) {
4475 ret = f2fs_dio_read_iter(iocb, to);
4477 ret = filemap_read(iocb, to, 0);
4479 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4480 APP_BUFFERED_READ_IO, ret);
4482 if (trace_f2fs_dataread_end_enabled())
4483 trace_f2fs_dataread_end(inode, pos, ret);
4487 static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
4488 struct pipe_inode_info *pipe,
4489 size_t len, unsigned int flags)
4491 struct inode *inode = file_inode(in);
4492 const loff_t pos = *ppos;
4495 if (!f2fs_is_compress_backend_ready(inode))
4498 if (trace_f2fs_dataread_start_enabled())
4499 f2fs_trace_rw_file_path(in, pos, len, READ);
4501 ret = filemap_splice_read(in, ppos, pipe, len, flags);
4503 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4504 APP_BUFFERED_READ_IO, ret);
4506 if (trace_f2fs_dataread_end_enabled())
4507 trace_f2fs_dataread_end(inode, pos, ret);
4511 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4513 struct file *file = iocb->ki_filp;
4514 struct inode *inode = file_inode(file);
4518 if (IS_IMMUTABLE(inode))
4521 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4524 count = generic_write_checks(iocb, from);
4528 err = file_modified(file);
4535 * Preallocate blocks for a write request, if it is possible and helpful to do
4536 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4537 * blocks were preallocated, or a negative errno value if something went
4538 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4539 * requested blocks (not just some of them) have been allocated.
4541 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4544 struct inode *inode = file_inode(iocb->ki_filp);
4545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4546 const loff_t pos = iocb->ki_pos;
4547 const size_t count = iov_iter_count(iter);
4548 struct f2fs_map_blocks map = {};
4552 /* If it will be an out-of-place direct write, don't bother. */
4553 if (dio && f2fs_lfs_mode(sbi))
4556 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4557 * buffered IO, if DIO meets any holes.
4559 if (dio && i_size_read(inode) &&
4560 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4563 /* No-wait I/O can't allocate blocks. */
4564 if (iocb->ki_flags & IOCB_NOWAIT)
4567 /* If it will be a short write, don't bother. */
4568 if (fault_in_iov_iter_readable(iter, count))
4571 if (f2fs_has_inline_data(inode)) {
4572 /* If the data will fit inline, don't bother. */
4573 if (pos + count <= MAX_INLINE_DATA(inode))
4575 ret = f2fs_convert_inline_inode(inode);
4580 /* Do not preallocate blocks that will be written partially in 4KB. */
4581 map.m_lblk = F2FS_BLK_ALIGN(pos);
4582 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4583 if (map.m_len > map.m_lblk)
4584 map.m_len -= map.m_lblk;
4588 map.m_may_create = true;
4590 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4591 flag = F2FS_GET_BLOCK_PRE_DIO;
4593 map.m_seg_type = NO_CHECK_TYPE;
4594 flag = F2FS_GET_BLOCK_PRE_AIO;
4597 ret = f2fs_map_blocks(inode, &map, flag);
4598 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4599 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4602 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4606 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4607 struct iov_iter *from)
4609 struct file *file = iocb->ki_filp;
4610 struct inode *inode = file_inode(file);
4613 if (iocb->ki_flags & IOCB_NOWAIT)
4616 ret = generic_perform_write(iocb, from);
4619 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4620 APP_BUFFERED_IO, ret);
4625 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4628 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4630 dec_page_count(sbi, F2FS_DIO_WRITE);
4633 f2fs_update_time(sbi, REQ_TIME);
4634 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4638 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4639 .end_io = f2fs_dio_write_end_io,
4642 static void f2fs_flush_buffered_write(struct address_space *mapping,
4643 loff_t start_pos, loff_t end_pos)
4647 ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
4650 invalidate_mapping_pages(mapping,
4651 start_pos >> PAGE_SHIFT,
4652 end_pos >> PAGE_SHIFT);
4655 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4656 bool *may_need_sync)
4658 struct file *file = iocb->ki_filp;
4659 struct inode *inode = file_inode(file);
4660 struct f2fs_inode_info *fi = F2FS_I(inode);
4661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4662 const bool do_opu = f2fs_lfs_mode(sbi);
4663 const loff_t pos = iocb->ki_pos;
4664 const ssize_t count = iov_iter_count(from);
4665 unsigned int dio_flags;
4666 struct iomap_dio *dio;
4669 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4671 if (iocb->ki_flags & IOCB_NOWAIT) {
4672 /* f2fs_convert_inline_inode() and block allocation can block */
4673 if (f2fs_has_inline_data(inode) ||
4674 !f2fs_overwrite_io(inode, pos, count)) {
4679 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4683 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4684 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4689 ret = f2fs_convert_inline_inode(inode);
4693 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4695 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4699 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4700 * the higher-level function iomap_dio_rw() in order to ensure that the
4701 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4703 inc_page_count(sbi, F2FS_DIO_WRITE);
4705 if (pos + count > inode->i_size)
4706 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4707 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4708 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4709 if (IS_ERR_OR_NULL(dio)) {
4710 ret = PTR_ERR_OR_ZERO(dio);
4711 if (ret == -ENOTBLK)
4713 if (ret != -EIOCBQUEUED)
4714 dec_page_count(sbi, F2FS_DIO_WRITE);
4716 ret = iomap_dio_complete(dio);
4720 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4721 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4725 if (pos + ret > inode->i_size)
4726 f2fs_i_size_write(inode, pos + ret);
4728 set_inode_flag(inode, FI_UPDATE_WRITE);
4730 if (iov_iter_count(from)) {
4732 loff_t bufio_start_pos = iocb->ki_pos;
4735 * The direct write was partial, so we need to fall back to a
4736 * buffered write for the remainder.
4739 ret2 = f2fs_buffered_write_iter(iocb, from);
4740 if (iov_iter_count(from))
4741 f2fs_write_failed(inode, iocb->ki_pos);
4746 * Ensure that the pagecache pages are written to disk and
4747 * invalidated to preserve the expected O_DIRECT semantics.
4750 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4754 f2fs_flush_buffered_write(file->f_mapping,
4759 /* iomap_dio_rw() already handled the generic_write_sync(). */
4760 *may_need_sync = false;
4763 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4767 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4769 struct inode *inode = file_inode(iocb->ki_filp);
4770 const loff_t orig_pos = iocb->ki_pos;
4771 const size_t orig_count = iov_iter_count(from);
4774 bool may_need_sync = true;
4778 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4783 if (!f2fs_is_compress_backend_ready(inode)) {
4788 if (iocb->ki_flags & IOCB_NOWAIT) {
4789 if (!inode_trylock(inode)) {
4797 ret = f2fs_write_checks(iocb, from);
4801 /* Determine whether we will do a direct write or a buffered write. */
4802 dio = f2fs_should_use_dio(inode, iocb, from);
4804 /* Possibly preallocate the blocks for the write. */
4805 target_size = iocb->ki_pos + iov_iter_count(from);
4806 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4807 if (preallocated < 0) {
4810 if (trace_f2fs_datawrite_start_enabled())
4811 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4814 /* Do the actual write. */
4816 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4817 f2fs_buffered_write_iter(iocb, from);
4819 if (trace_f2fs_datawrite_end_enabled())
4820 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4823 /* Don't leave any preallocated blocks around past i_size. */
4824 if (preallocated && i_size_read(inode) < target_size) {
4825 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4826 filemap_invalidate_lock(inode->i_mapping);
4827 if (!f2fs_truncate(inode))
4828 file_dont_truncate(inode);
4829 filemap_invalidate_unlock(inode->i_mapping);
4830 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4832 file_dont_truncate(inode);
4835 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4837 inode_unlock(inode);
4839 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4841 if (ret > 0 && may_need_sync)
4842 ret = generic_write_sync(iocb, ret);
4844 /* If buffered IO was forced, flush and drop the data from
4845 * the page cache to preserve O_DIRECT semantics
4847 if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
4848 f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
4850 orig_pos + ret - 1);
4855 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4858 struct address_space *mapping;
4859 struct backing_dev_info *bdi;
4860 struct inode *inode = file_inode(filp);
4863 if (advice == POSIX_FADV_SEQUENTIAL) {
4864 if (S_ISFIFO(inode->i_mode))
4867 mapping = filp->f_mapping;
4868 if (!mapping || len < 0)
4871 bdi = inode_to_bdi(mapping->host);
4872 filp->f_ra.ra_pages = bdi->ra_pages *
4873 F2FS_I_SB(inode)->seq_file_ra_mul;
4874 spin_lock(&filp->f_lock);
4875 filp->f_mode &= ~FMODE_RANDOM;
4876 spin_unlock(&filp->f_lock);
4878 } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
4879 /* Load extent cache at the first readahead. */
4880 f2fs_precache_extents(inode);
4883 err = generic_fadvise(filp, offset, len, advice);
4884 if (!err && advice == POSIX_FADV_DONTNEED &&
4885 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4886 f2fs_compressed_file(inode))
4887 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4892 #ifdef CONFIG_COMPAT
4893 struct compat_f2fs_gc_range {
4898 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4899 struct compat_f2fs_gc_range)
4901 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4903 struct compat_f2fs_gc_range __user *urange;
4904 struct f2fs_gc_range range;
4907 urange = compat_ptr(arg);
4908 err = get_user(range.sync, &urange->sync);
4909 err |= get_user(range.start, &urange->start);
4910 err |= get_user(range.len, &urange->len);
4914 return __f2fs_ioc_gc_range(file, &range);
4917 struct compat_f2fs_move_range {
4923 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4924 struct compat_f2fs_move_range)
4926 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4928 struct compat_f2fs_move_range __user *urange;
4929 struct f2fs_move_range range;
4932 urange = compat_ptr(arg);
4933 err = get_user(range.dst_fd, &urange->dst_fd);
4934 err |= get_user(range.pos_in, &urange->pos_in);
4935 err |= get_user(range.pos_out, &urange->pos_out);
4936 err |= get_user(range.len, &urange->len);
4940 return __f2fs_ioc_move_range(file, &range);
4943 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4945 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4947 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4951 case FS_IOC32_GETVERSION:
4952 cmd = FS_IOC_GETVERSION;
4954 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4955 return f2fs_compat_ioc_gc_range(file, arg);
4956 case F2FS_IOC32_MOVE_RANGE:
4957 return f2fs_compat_ioc_move_range(file, arg);
4958 case F2FS_IOC_START_ATOMIC_WRITE:
4959 case F2FS_IOC_START_ATOMIC_REPLACE:
4960 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4961 case F2FS_IOC_START_VOLATILE_WRITE:
4962 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4963 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4964 case F2FS_IOC_SHUTDOWN:
4966 case FS_IOC_SET_ENCRYPTION_POLICY:
4967 case FS_IOC_GET_ENCRYPTION_PWSALT:
4968 case FS_IOC_GET_ENCRYPTION_POLICY:
4969 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4970 case FS_IOC_ADD_ENCRYPTION_KEY:
4971 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4972 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4973 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4974 case FS_IOC_GET_ENCRYPTION_NONCE:
4975 case F2FS_IOC_GARBAGE_COLLECT:
4976 case F2FS_IOC_WRITE_CHECKPOINT:
4977 case F2FS_IOC_DEFRAGMENT:
4978 case F2FS_IOC_FLUSH_DEVICE:
4979 case F2FS_IOC_GET_FEATURES:
4980 case F2FS_IOC_GET_PIN_FILE:
4981 case F2FS_IOC_SET_PIN_FILE:
4982 case F2FS_IOC_PRECACHE_EXTENTS:
4983 case F2FS_IOC_RESIZE_FS:
4984 case FS_IOC_ENABLE_VERITY:
4985 case FS_IOC_MEASURE_VERITY:
4986 case FS_IOC_READ_VERITY_METADATA:
4987 case FS_IOC_GETFSLABEL:
4988 case FS_IOC_SETFSLABEL:
4989 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4990 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4991 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4992 case F2FS_IOC_SEC_TRIM_FILE:
4993 case F2FS_IOC_GET_COMPRESS_OPTION:
4994 case F2FS_IOC_SET_COMPRESS_OPTION:
4995 case F2FS_IOC_DECOMPRESS_FILE:
4996 case F2FS_IOC_COMPRESS_FILE:
4999 return -ENOIOCTLCMD;
5001 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5005 const struct file_operations f2fs_file_operations = {
5006 .llseek = f2fs_llseek,
5007 .read_iter = f2fs_file_read_iter,
5008 .write_iter = f2fs_file_write_iter,
5009 .iopoll = iocb_bio_iopoll,
5010 .open = f2fs_file_open,
5011 .release = f2fs_release_file,
5012 .mmap = f2fs_file_mmap,
5013 .flush = f2fs_file_flush,
5014 .fsync = f2fs_sync_file,
5015 .fallocate = f2fs_fallocate,
5016 .unlocked_ioctl = f2fs_ioctl,
5017 #ifdef CONFIG_COMPAT
5018 .compat_ioctl = f2fs_compat_ioctl,
5020 .splice_read = f2fs_file_splice_read,
5021 .splice_write = iter_file_splice_write,
5022 .fadvise = f2fs_file_fadvise,