1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
38 struct inode *inode = file_inode(vmf->vma->vm_file);
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
45 if (ret & VM_FAULT_LOCKED)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
69 if (unlikely(f2fs_cp_error(sbi))) {
74 if (!f2fs_is_checkpoint_ready(sbi)) {
79 #ifdef CONFIG_F2FS_FS_COMPRESSION
80 if (f2fs_compressed_file(inode)) {
81 int ret = f2fs_is_compressed_cluster(inode, page->index);
91 /* should do out of any locked page */
93 f2fs_balance_fs(sbi, true);
95 sb_start_pagefault(inode->i_sb);
97 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
99 file_update_time(vmf->vma->vm_file);
100 down_read(&F2FS_I(inode)->i_mmap_sem);
102 if (unlikely(page->mapping != inode->i_mapping ||
103 page_offset(page) > i_size_read(inode) ||
104 !PageUptodate(page))) {
111 /* block allocation */
112 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
113 set_new_dnode(&dn, inode, NULL, NULL, 0);
114 err = f2fs_get_block(&dn, page->index);
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
119 #ifdef CONFIG_F2FS_FS_COMPRESSION
121 set_new_dnode(&dn, inode, NULL, NULL, 0);
122 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
131 f2fs_wait_on_page_writeback(page, DATA, false, true);
133 /* wait for GCed page writeback via META_MAPPING */
134 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
137 * check to see if the page is mapped already (no holes)
139 if (PageMappedToDisk(page))
142 /* page is wholly or partially inside EOF */
143 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
144 i_size_read(inode)) {
147 offset = i_size_read(inode) & ~PAGE_MASK;
148 zero_user_segment(page, offset, PAGE_SIZE);
150 set_page_dirty(page);
151 if (!PageUptodate(page))
152 SetPageUptodate(page);
154 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
155 f2fs_update_time(sbi, REQ_TIME);
157 trace_f2fs_vm_page_mkwrite(page, DATA);
159 up_read(&F2FS_I(inode)->i_mmap_sem);
161 sb_end_pagefault(inode->i_sb);
163 return block_page_mkwrite_return(err);
166 static const struct vm_operations_struct f2fs_file_vm_ops = {
167 .fault = f2fs_filemap_fault,
168 .map_pages = filemap_map_pages,
169 .page_mkwrite = f2fs_vm_page_mkwrite,
172 static int get_parent_ino(struct inode *inode, nid_t *pino)
174 struct dentry *dentry;
177 * Make sure to get the non-deleted alias. The alias associated with
178 * the open file descriptor being fsync()'ed may be deleted already.
180 dentry = d_find_alias(inode);
184 *pino = parent_ino(dentry);
189 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
191 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
192 enum cp_reason_type cp_reason = CP_NO_NEEDED;
194 if (!S_ISREG(inode->i_mode))
195 cp_reason = CP_NON_REGULAR;
196 else if (f2fs_compressed_file(inode))
197 cp_reason = CP_COMPRESSED;
198 else if (inode->i_nlink != 1)
199 cp_reason = CP_HARDLINK;
200 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
201 cp_reason = CP_SB_NEED_CP;
202 else if (file_wrong_pino(inode))
203 cp_reason = CP_WRONG_PINO;
204 else if (!f2fs_space_for_roll_forward(sbi))
205 cp_reason = CP_NO_SPC_ROLL;
206 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
207 cp_reason = CP_NODE_NEED_CP;
208 else if (test_opt(sbi, FASTBOOT))
209 cp_reason = CP_FASTBOOT_MODE;
210 else if (F2FS_OPTION(sbi).active_logs == 2)
211 cp_reason = CP_SPEC_LOG_NUM;
212 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
213 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
214 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
216 cp_reason = CP_RECOVER_DIR;
221 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
223 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
225 /* But we need to avoid that there are some inode updates */
226 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
232 static void try_to_fix_pino(struct inode *inode)
234 struct f2fs_inode_info *fi = F2FS_I(inode);
237 down_write(&fi->i_sem);
238 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
239 get_parent_ino(inode, &pino)) {
240 f2fs_i_pino_write(inode, pino);
241 file_got_pino(inode);
243 up_write(&fi->i_sem);
246 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
247 int datasync, bool atomic)
249 struct inode *inode = file->f_mapping->host;
250 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
251 nid_t ino = inode->i_ino;
253 enum cp_reason_type cp_reason = 0;
254 struct writeback_control wbc = {
255 .sync_mode = WB_SYNC_ALL,
256 .nr_to_write = LONG_MAX,
259 unsigned int seq_id = 0;
261 if (unlikely(f2fs_readonly(inode->i_sb)))
264 trace_f2fs_sync_file_enter(inode);
266 if (S_ISDIR(inode->i_mode))
269 /* if fdatasync is triggered, let's do in-place-update */
270 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
271 set_inode_flag(inode, FI_NEED_IPU);
272 ret = file_write_and_wait_range(file, start, end);
273 clear_inode_flag(inode, FI_NEED_IPU);
275 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
276 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
280 /* if the inode is dirty, let's recover all the time */
281 if (!f2fs_skip_inode_update(inode, datasync)) {
282 f2fs_write_inode(inode, NULL);
287 * if there is no written data, don't waste time to write recovery info.
289 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
290 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
292 /* it may call write_inode just prior to fsync */
293 if (need_inode_page_update(sbi, ino))
296 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
297 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
302 * for OPU case, during fsync(), node can be persisted before
303 * data when lower device doesn't support write barrier, result
304 * in data corruption after SPO.
305 * So for strict fsync mode, force to use atomic write semantics
306 * to keep write order in between data/node and last node to
307 * avoid potential data corruption.
309 if (F2FS_OPTION(sbi).fsync_mode ==
310 FSYNC_MODE_STRICT && !atomic)
315 * Both of fdatasync() and fsync() are able to be recovered from
318 down_read(&F2FS_I(inode)->i_sem);
319 cp_reason = need_do_checkpoint(inode);
320 up_read(&F2FS_I(inode)->i_sem);
323 /* all the dirty node pages should be flushed for POR */
324 ret = f2fs_sync_fs(inode->i_sb, 1);
327 * We've secured consistency through sync_fs. Following pino
328 * will be used only for fsynced inodes after checkpoint.
330 try_to_fix_pino(inode);
331 clear_inode_flag(inode, FI_APPEND_WRITE);
332 clear_inode_flag(inode, FI_UPDATE_WRITE);
336 atomic_inc(&sbi->wb_sync_req[NODE]);
337 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
338 atomic_dec(&sbi->wb_sync_req[NODE]);
342 /* if cp_error was enabled, we should avoid infinite loop */
343 if (unlikely(f2fs_cp_error(sbi))) {
348 if (f2fs_need_inode_block_update(sbi, ino)) {
349 f2fs_mark_inode_dirty_sync(inode, true);
350 f2fs_write_inode(inode, NULL);
355 * If it's atomic_write, it's just fine to keep write ordering. So
356 * here we don't need to wait for node write completion, since we use
357 * node chain which serializes node blocks. If one of node writes are
358 * reordered, we can see simply broken chain, resulting in stopping
359 * roll-forward recovery. It means we'll recover all or none node blocks
363 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
368 /* once recovery info is written, don't need to tack this */
369 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
370 clear_inode_flag(inode, FI_APPEND_WRITE);
372 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
373 ret = f2fs_issue_flush(sbi, inode->i_ino);
375 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
376 clear_inode_flag(inode, FI_UPDATE_WRITE);
377 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
379 f2fs_update_time(sbi, REQ_TIME);
381 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
382 f2fs_trace_ios(NULL, 1);
386 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
388 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
390 return f2fs_do_sync_file(file, start, end, datasync, false);
393 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
394 pgoff_t index, int whence)
398 if (__is_valid_data_blkaddr(blkaddr))
400 if (blkaddr == NEW_ADDR &&
401 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
405 if (blkaddr == NULL_ADDR)
412 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
414 struct inode *inode = file->f_mapping->host;
415 loff_t maxbytes = inode->i_sb->s_maxbytes;
416 struct dnode_of_data dn;
417 pgoff_t pgofs, end_offset;
418 loff_t data_ofs = offset;
424 isize = i_size_read(inode);
428 /* handle inline data case */
429 if (f2fs_has_inline_data(inode)) {
430 if (whence == SEEK_HOLE) {
433 } else if (whence == SEEK_DATA) {
439 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
441 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
442 set_new_dnode(&dn, inode, NULL, NULL, 0);
443 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
444 if (err && err != -ENOENT) {
446 } else if (err == -ENOENT) {
447 /* direct node does not exists */
448 if (whence == SEEK_DATA) {
449 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
456 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
458 /* find data/hole in dnode block */
459 for (; dn.ofs_in_node < end_offset;
460 dn.ofs_in_node++, pgofs++,
461 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
464 blkaddr = f2fs_data_blkaddr(&dn);
466 if (__is_valid_data_blkaddr(blkaddr) &&
467 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
468 blkaddr, DATA_GENERIC_ENHANCE)) {
473 if (__found_offset(file->f_mapping, blkaddr,
482 if (whence == SEEK_DATA)
485 if (whence == SEEK_HOLE && data_ofs > isize)
488 return vfs_setpos(file, data_ofs, maxbytes);
494 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
496 struct inode *inode = file->f_mapping->host;
497 loff_t maxbytes = inode->i_sb->s_maxbytes;
503 return generic_file_llseek_size(file, offset, whence,
504 maxbytes, i_size_read(inode));
509 return f2fs_seek_block(file, offset, whence);
515 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
517 struct inode *inode = file_inode(file);
520 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
523 if (!f2fs_is_compress_backend_ready(inode))
526 /* we don't need to use inline_data strictly */
527 err = f2fs_convert_inline_inode(inode);
532 vma->vm_ops = &f2fs_file_vm_ops;
533 set_inode_flag(inode, FI_MMAP_FILE);
537 static int f2fs_file_open(struct inode *inode, struct file *filp)
539 int err = fscrypt_file_open(inode, filp);
544 if (!f2fs_is_compress_backend_ready(inode))
547 err = fsverity_file_open(inode, filp);
551 filp->f_mode |= FMODE_NOWAIT;
553 return dquot_file_open(inode, filp);
556 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
558 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
559 struct f2fs_node *raw_node;
560 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
563 bool compressed_cluster = false;
564 int cluster_index = 0, valid_blocks = 0;
565 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
566 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
568 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
569 base = get_extra_isize(dn->inode);
571 raw_node = F2FS_NODE(dn->node_page);
572 addr = blkaddr_in_node(raw_node) + base + ofs;
574 /* Assumption: truncateion starts with cluster */
575 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
576 block_t blkaddr = le32_to_cpu(*addr);
578 if (f2fs_compressed_file(dn->inode) &&
579 !(cluster_index & (cluster_size - 1))) {
580 if (compressed_cluster)
581 f2fs_i_compr_blocks_update(dn->inode,
582 valid_blocks, false);
583 compressed_cluster = (blkaddr == COMPRESS_ADDR);
587 if (blkaddr == NULL_ADDR)
590 dn->data_blkaddr = NULL_ADDR;
591 f2fs_set_data_blkaddr(dn);
593 if (__is_valid_data_blkaddr(blkaddr)) {
594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595 DATA_GENERIC_ENHANCE))
597 if (compressed_cluster)
601 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
602 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
604 f2fs_invalidate_blocks(sbi, blkaddr);
606 if (!released || blkaddr != COMPRESS_ADDR)
610 if (compressed_cluster)
611 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
616 * once we invalidate valid blkaddr in range [ofs, ofs + count],
617 * we will invalidate all blkaddr in the whole range.
619 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
621 f2fs_update_extent_cache_range(dn, fofs, 0, len);
622 dec_valid_block_count(sbi, dn->inode, nr_free);
624 dn->ofs_in_node = ofs;
626 f2fs_update_time(sbi, REQ_TIME);
627 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
628 dn->ofs_in_node, nr_free);
631 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
633 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
636 static int truncate_partial_data_page(struct inode *inode, u64 from,
639 loff_t offset = from & (PAGE_SIZE - 1);
640 pgoff_t index = from >> PAGE_SHIFT;
641 struct address_space *mapping = inode->i_mapping;
644 if (!offset && !cache_only)
648 page = find_lock_page(mapping, index);
649 if (page && PageUptodate(page))
651 f2fs_put_page(page, 1);
655 page = f2fs_get_lock_data_page(inode, index, true);
657 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
659 f2fs_wait_on_page_writeback(page, DATA, true, true);
660 zero_user(page, offset, PAGE_SIZE - offset);
662 /* An encrypted inode should have a key and truncate the last page. */
663 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
665 set_page_dirty(page);
666 f2fs_put_page(page, 1);
670 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
672 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
673 struct dnode_of_data dn;
675 int count = 0, err = 0;
677 bool truncate_page = false;
679 trace_f2fs_truncate_blocks_enter(inode, from);
681 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
683 if (free_from >= sbi->max_file_blocks)
689 ipage = f2fs_get_node_page(sbi, inode->i_ino);
691 err = PTR_ERR(ipage);
695 if (f2fs_has_inline_data(inode)) {
696 f2fs_truncate_inline_inode(inode, ipage, from);
697 f2fs_put_page(ipage, 1);
698 truncate_page = true;
702 set_new_dnode(&dn, inode, ipage, NULL, 0);
703 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
710 count = ADDRS_PER_PAGE(dn.node_page, inode);
712 count -= dn.ofs_in_node;
713 f2fs_bug_on(sbi, count < 0);
715 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
716 f2fs_truncate_data_blocks_range(&dn, count);
722 err = f2fs_truncate_inode_blocks(inode, free_from);
727 /* lastly zero out the first data page */
729 err = truncate_partial_data_page(inode, from, truncate_page);
731 trace_f2fs_truncate_blocks_exit(inode, err);
735 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
737 u64 free_from = from;
740 #ifdef CONFIG_F2FS_FS_COMPRESSION
742 * for compressed file, only support cluster size
743 * aligned truncation.
745 if (f2fs_compressed_file(inode))
746 free_from = round_up(from,
747 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
750 err = f2fs_do_truncate_blocks(inode, free_from, lock);
754 #ifdef CONFIG_F2FS_FS_COMPRESSION
755 if (from != free_from) {
756 err = f2fs_truncate_partial_cluster(inode, from, lock);
765 int f2fs_truncate(struct inode *inode)
769 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
772 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
773 S_ISLNK(inode->i_mode)))
776 trace_f2fs_truncate(inode);
778 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
779 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
783 err = dquot_initialize(inode);
787 /* we should check inline_data size */
788 if (!f2fs_may_inline_data(inode)) {
789 err = f2fs_convert_inline_inode(inode);
794 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
798 inode->i_mtime = inode->i_ctime = current_time(inode);
799 f2fs_mark_inode_dirty_sync(inode, false);
803 int f2fs_getattr(const struct path *path, struct kstat *stat,
804 u32 request_mask, unsigned int query_flags)
806 struct inode *inode = d_inode(path->dentry);
807 struct f2fs_inode_info *fi = F2FS_I(inode);
808 struct f2fs_inode *ri;
811 if (f2fs_has_extra_attr(inode) &&
812 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
813 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
814 stat->result_mask |= STATX_BTIME;
815 stat->btime.tv_sec = fi->i_crtime.tv_sec;
816 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
820 if (flags & F2FS_COMPR_FL)
821 stat->attributes |= STATX_ATTR_COMPRESSED;
822 if (flags & F2FS_APPEND_FL)
823 stat->attributes |= STATX_ATTR_APPEND;
824 if (IS_ENCRYPTED(inode))
825 stat->attributes |= STATX_ATTR_ENCRYPTED;
826 if (flags & F2FS_IMMUTABLE_FL)
827 stat->attributes |= STATX_ATTR_IMMUTABLE;
828 if (flags & F2FS_NODUMP_FL)
829 stat->attributes |= STATX_ATTR_NODUMP;
830 if (IS_VERITY(inode))
831 stat->attributes |= STATX_ATTR_VERITY;
833 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
835 STATX_ATTR_ENCRYPTED |
836 STATX_ATTR_IMMUTABLE |
840 generic_fillattr(inode, stat);
842 /* we need to show initial sectors used for inline_data/dentries */
843 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
844 f2fs_has_inline_dentry(inode))
845 stat->blocks += (stat->size + 511) >> 9;
850 #ifdef CONFIG_F2FS_FS_POSIX_ACL
851 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
853 unsigned int ia_valid = attr->ia_valid;
855 if (ia_valid & ATTR_UID)
856 inode->i_uid = attr->ia_uid;
857 if (ia_valid & ATTR_GID)
858 inode->i_gid = attr->ia_gid;
859 if (ia_valid & ATTR_ATIME)
860 inode->i_atime = attr->ia_atime;
861 if (ia_valid & ATTR_MTIME)
862 inode->i_mtime = attr->ia_mtime;
863 if (ia_valid & ATTR_CTIME)
864 inode->i_ctime = attr->ia_ctime;
865 if (ia_valid & ATTR_MODE) {
866 umode_t mode = attr->ia_mode;
868 if (!in_group_p(inode->i_gid) &&
869 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
871 set_acl_inode(inode, mode);
875 #define __setattr_copy setattr_copy
878 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
880 struct inode *inode = d_inode(dentry);
883 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
886 if (unlikely(IS_IMMUTABLE(inode)))
889 if (unlikely(IS_APPEND(inode) &&
890 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
891 ATTR_GID | ATTR_TIMES_SET))))
894 if ((attr->ia_valid & ATTR_SIZE)) {
895 if (!f2fs_is_compress_backend_ready(inode))
897 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
898 !IS_ALIGNED(attr->ia_size,
899 F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
903 err = setattr_prepare(dentry, attr);
907 err = fscrypt_prepare_setattr(dentry, attr);
911 err = fsverity_prepare_setattr(dentry, attr);
915 if (is_quota_modification(inode, attr)) {
916 err = dquot_initialize(inode);
920 if ((attr->ia_valid & ATTR_UID &&
921 !uid_eq(attr->ia_uid, inode->i_uid)) ||
922 (attr->ia_valid & ATTR_GID &&
923 !gid_eq(attr->ia_gid, inode->i_gid))) {
924 f2fs_lock_op(F2FS_I_SB(inode));
925 err = dquot_transfer(inode, attr);
927 set_sbi_flag(F2FS_I_SB(inode),
928 SBI_QUOTA_NEED_REPAIR);
929 f2fs_unlock_op(F2FS_I_SB(inode));
933 * update uid/gid under lock_op(), so that dquot and inode can
934 * be updated atomically.
936 if (attr->ia_valid & ATTR_UID)
937 inode->i_uid = attr->ia_uid;
938 if (attr->ia_valid & ATTR_GID)
939 inode->i_gid = attr->ia_gid;
940 f2fs_mark_inode_dirty_sync(inode, true);
941 f2fs_unlock_op(F2FS_I_SB(inode));
944 if (attr->ia_valid & ATTR_SIZE) {
945 loff_t old_size = i_size_read(inode);
947 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
949 * should convert inline inode before i_size_write to
950 * keep smaller than inline_data size with inline flag.
952 err = f2fs_convert_inline_inode(inode);
957 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
958 down_write(&F2FS_I(inode)->i_mmap_sem);
960 truncate_setsize(inode, attr->ia_size);
962 if (attr->ia_size <= old_size)
963 err = f2fs_truncate(inode);
965 * do not trim all blocks after i_size if target size is
966 * larger than i_size.
968 up_write(&F2FS_I(inode)->i_mmap_sem);
969 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
973 spin_lock(&F2FS_I(inode)->i_size_lock);
974 inode->i_mtime = inode->i_ctime = current_time(inode);
975 F2FS_I(inode)->last_disk_size = i_size_read(inode);
976 spin_unlock(&F2FS_I(inode)->i_size_lock);
979 __setattr_copy(inode, attr);
981 if (attr->ia_valid & ATTR_MODE) {
982 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
983 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
984 inode->i_mode = F2FS_I(inode)->i_acl_mode;
985 clear_inode_flag(inode, FI_ACL_MODE);
989 /* file size may changed here */
990 f2fs_mark_inode_dirty_sync(inode, true);
992 /* inode change will produce dirty node pages flushed by checkpoint */
993 f2fs_balance_fs(F2FS_I_SB(inode), true);
998 const struct inode_operations f2fs_file_inode_operations = {
999 .getattr = f2fs_getattr,
1000 .setattr = f2fs_setattr,
1001 .get_acl = f2fs_get_acl,
1002 .set_acl = f2fs_set_acl,
1003 .listxattr = f2fs_listxattr,
1004 .fiemap = f2fs_fiemap,
1007 static int fill_zero(struct inode *inode, pgoff_t index,
1008 loff_t start, loff_t len)
1010 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1016 f2fs_balance_fs(sbi, true);
1019 page = f2fs_get_new_data_page(inode, NULL, index, false);
1020 f2fs_unlock_op(sbi);
1023 return PTR_ERR(page);
1025 f2fs_wait_on_page_writeback(page, DATA, true, true);
1026 zero_user(page, start, len);
1027 set_page_dirty(page);
1028 f2fs_put_page(page, 1);
1032 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1036 while (pg_start < pg_end) {
1037 struct dnode_of_data dn;
1038 pgoff_t end_offset, count;
1040 set_new_dnode(&dn, inode, NULL, NULL, 0);
1041 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1043 if (err == -ENOENT) {
1044 pg_start = f2fs_get_next_page_offset(&dn,
1051 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1052 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1054 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1056 f2fs_truncate_data_blocks_range(&dn, count);
1057 f2fs_put_dnode(&dn);
1064 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1066 pgoff_t pg_start, pg_end;
1067 loff_t off_start, off_end;
1070 ret = f2fs_convert_inline_inode(inode);
1074 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1075 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1077 off_start = offset & (PAGE_SIZE - 1);
1078 off_end = (offset + len) & (PAGE_SIZE - 1);
1080 if (pg_start == pg_end) {
1081 ret = fill_zero(inode, pg_start, off_start,
1082 off_end - off_start);
1087 ret = fill_zero(inode, pg_start++, off_start,
1088 PAGE_SIZE - off_start);
1093 ret = fill_zero(inode, pg_end, 0, off_end);
1098 if (pg_start < pg_end) {
1099 loff_t blk_start, blk_end;
1100 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1102 f2fs_balance_fs(sbi, true);
1104 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1105 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1107 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1108 down_write(&F2FS_I(inode)->i_mmap_sem);
1110 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1113 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1114 f2fs_unlock_op(sbi);
1116 up_write(&F2FS_I(inode)->i_mmap_sem);
1117 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1124 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1125 int *do_replace, pgoff_t off, pgoff_t len)
1127 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1128 struct dnode_of_data dn;
1132 set_new_dnode(&dn, inode, NULL, NULL, 0);
1133 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1134 if (ret && ret != -ENOENT) {
1136 } else if (ret == -ENOENT) {
1137 if (dn.max_level == 0)
1139 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1140 dn.ofs_in_node, len);
1146 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1147 dn.ofs_in_node, len);
1148 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1149 *blkaddr = f2fs_data_blkaddr(&dn);
1151 if (__is_valid_data_blkaddr(*blkaddr) &&
1152 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1153 DATA_GENERIC_ENHANCE)) {
1154 f2fs_put_dnode(&dn);
1155 return -EFSCORRUPTED;
1158 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1160 if (f2fs_lfs_mode(sbi)) {
1161 f2fs_put_dnode(&dn);
1165 /* do not invalidate this block address */
1166 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1170 f2fs_put_dnode(&dn);
1179 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1180 int *do_replace, pgoff_t off, int len)
1182 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1183 struct dnode_of_data dn;
1186 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1187 if (*do_replace == 0)
1190 set_new_dnode(&dn, inode, NULL, NULL, 0);
1191 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1193 dec_valid_block_count(sbi, inode, 1);
1194 f2fs_invalidate_blocks(sbi, *blkaddr);
1196 f2fs_update_data_blkaddr(&dn, *blkaddr);
1198 f2fs_put_dnode(&dn);
1203 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1204 block_t *blkaddr, int *do_replace,
1205 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1207 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1212 if (blkaddr[i] == NULL_ADDR && !full) {
1217 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1218 struct dnode_of_data dn;
1219 struct node_info ni;
1223 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1224 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1228 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1230 f2fs_put_dnode(&dn);
1234 ilen = min((pgoff_t)
1235 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1236 dn.ofs_in_node, len - i);
1238 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1239 f2fs_truncate_data_blocks_range(&dn, 1);
1241 if (do_replace[i]) {
1242 f2fs_i_blocks_write(src_inode,
1244 f2fs_i_blocks_write(dst_inode,
1246 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1247 blkaddr[i], ni.version, true, false);
1253 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1254 if (dst_inode->i_size < new_size)
1255 f2fs_i_size_write(dst_inode, new_size);
1256 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1258 f2fs_put_dnode(&dn);
1260 struct page *psrc, *pdst;
1262 psrc = f2fs_get_lock_data_page(src_inode,
1265 return PTR_ERR(psrc);
1266 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1269 f2fs_put_page(psrc, 1);
1270 return PTR_ERR(pdst);
1272 f2fs_wait_on_page_writeback(pdst, DATA, true, true);
1274 f2fs_copy_page(psrc, pdst);
1275 set_page_dirty(pdst);
1276 f2fs_put_page(pdst, 1);
1277 f2fs_put_page(psrc, 1);
1279 ret = f2fs_truncate_hole(src_inode,
1280 src + i, src + i + 1);
1289 static int __exchange_data_block(struct inode *src_inode,
1290 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1291 pgoff_t len, bool full)
1293 block_t *src_blkaddr;
1299 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1301 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1302 array_size(olen, sizeof(block_t)),
1307 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1308 array_size(olen, sizeof(int)),
1311 kvfree(src_blkaddr);
1315 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1316 do_replace, src, olen);
1320 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1321 do_replace, src, dst, olen, full);
1329 kvfree(src_blkaddr);
1335 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1336 kvfree(src_blkaddr);
1341 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1343 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1344 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1345 pgoff_t start = offset >> PAGE_SHIFT;
1346 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1349 f2fs_balance_fs(sbi, true);
1351 /* avoid gc operation during block exchange */
1352 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1353 down_write(&F2FS_I(inode)->i_mmap_sem);
1356 f2fs_drop_extent_tree(inode);
1357 truncate_pagecache(inode, offset);
1358 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1359 f2fs_unlock_op(sbi);
1361 up_write(&F2FS_I(inode)->i_mmap_sem);
1362 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1366 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1371 if (offset + len >= i_size_read(inode))
1374 /* collapse range should be aligned to block size of f2fs. */
1375 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1378 ret = f2fs_convert_inline_inode(inode);
1382 /* write out all dirty pages from offset */
1383 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1387 ret = f2fs_do_collapse(inode, offset, len);
1391 /* write out all moved pages, if possible */
1392 down_write(&F2FS_I(inode)->i_mmap_sem);
1393 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1394 truncate_pagecache(inode, offset);
1396 new_size = i_size_read(inode) - len;
1397 ret = f2fs_truncate_blocks(inode, new_size, true);
1398 up_write(&F2FS_I(inode)->i_mmap_sem);
1400 f2fs_i_size_write(inode, new_size);
1404 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1407 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1408 pgoff_t index = start;
1409 unsigned int ofs_in_node = dn->ofs_in_node;
1413 for (; index < end; index++, dn->ofs_in_node++) {
1414 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1418 dn->ofs_in_node = ofs_in_node;
1419 ret = f2fs_reserve_new_blocks(dn, count);
1423 dn->ofs_in_node = ofs_in_node;
1424 for (index = start; index < end; index++, dn->ofs_in_node++) {
1425 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1427 * f2fs_reserve_new_blocks will not guarantee entire block
1430 if (dn->data_blkaddr == NULL_ADDR) {
1435 if (dn->data_blkaddr == NEW_ADDR)
1438 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1439 DATA_GENERIC_ENHANCE)) {
1440 ret = -EFSCORRUPTED;
1444 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1445 dn->data_blkaddr = NEW_ADDR;
1446 f2fs_set_data_blkaddr(dn);
1449 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1454 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1457 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1458 struct address_space *mapping = inode->i_mapping;
1459 pgoff_t index, pg_start, pg_end;
1460 loff_t new_size = i_size_read(inode);
1461 loff_t off_start, off_end;
1464 ret = inode_newsize_ok(inode, (len + offset));
1468 ret = f2fs_convert_inline_inode(inode);
1472 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1476 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1477 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1479 off_start = offset & (PAGE_SIZE - 1);
1480 off_end = (offset + len) & (PAGE_SIZE - 1);
1482 if (pg_start == pg_end) {
1483 ret = fill_zero(inode, pg_start, off_start,
1484 off_end - off_start);
1488 new_size = max_t(loff_t, new_size, offset + len);
1491 ret = fill_zero(inode, pg_start++, off_start,
1492 PAGE_SIZE - off_start);
1496 new_size = max_t(loff_t, new_size,
1497 (loff_t)pg_start << PAGE_SHIFT);
1500 for (index = pg_start; index < pg_end;) {
1501 struct dnode_of_data dn;
1502 unsigned int end_offset;
1505 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1506 down_write(&F2FS_I(inode)->i_mmap_sem);
1508 truncate_pagecache_range(inode,
1509 (loff_t)index << PAGE_SHIFT,
1510 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1514 set_new_dnode(&dn, inode, NULL, NULL, 0);
1515 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1517 f2fs_unlock_op(sbi);
1518 up_write(&F2FS_I(inode)->i_mmap_sem);
1519 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1523 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1524 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1526 ret = f2fs_do_zero_range(&dn, index, end);
1527 f2fs_put_dnode(&dn);
1529 f2fs_unlock_op(sbi);
1530 up_write(&F2FS_I(inode)->i_mmap_sem);
1531 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1533 f2fs_balance_fs(sbi, dn.node_changed);
1539 new_size = max_t(loff_t, new_size,
1540 (loff_t)index << PAGE_SHIFT);
1544 ret = fill_zero(inode, pg_end, 0, off_end);
1548 new_size = max_t(loff_t, new_size, offset + len);
1553 if (new_size > i_size_read(inode)) {
1554 if (mode & FALLOC_FL_KEEP_SIZE)
1555 file_set_keep_isize(inode);
1557 f2fs_i_size_write(inode, new_size);
1562 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1564 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1565 pgoff_t nr, pg_start, pg_end, delta, idx;
1569 new_size = i_size_read(inode) + len;
1570 ret = inode_newsize_ok(inode, new_size);
1574 if (offset >= i_size_read(inode))
1577 /* insert range should be aligned to block size of f2fs. */
1578 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1581 ret = f2fs_convert_inline_inode(inode);
1585 f2fs_balance_fs(sbi, true);
1587 down_write(&F2FS_I(inode)->i_mmap_sem);
1588 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1589 up_write(&F2FS_I(inode)->i_mmap_sem);
1593 /* write out all dirty pages from offset */
1594 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1598 pg_start = offset >> PAGE_SHIFT;
1599 pg_end = (offset + len) >> PAGE_SHIFT;
1600 delta = pg_end - pg_start;
1601 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1603 /* avoid gc operation during block exchange */
1604 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1605 down_write(&F2FS_I(inode)->i_mmap_sem);
1606 truncate_pagecache(inode, offset);
1608 while (!ret && idx > pg_start) {
1609 nr = idx - pg_start;
1615 f2fs_drop_extent_tree(inode);
1617 ret = __exchange_data_block(inode, inode, idx,
1618 idx + delta, nr, false);
1619 f2fs_unlock_op(sbi);
1621 up_write(&F2FS_I(inode)->i_mmap_sem);
1622 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1624 /* write out all moved pages, if possible */
1625 down_write(&F2FS_I(inode)->i_mmap_sem);
1626 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1627 truncate_pagecache(inode, offset);
1628 up_write(&F2FS_I(inode)->i_mmap_sem);
1631 f2fs_i_size_write(inode, new_size);
1635 static int expand_inode_data(struct inode *inode, loff_t offset,
1636 loff_t len, int mode)
1638 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1639 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1640 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1641 .m_may_create = true };
1642 pgoff_t pg_start, pg_end;
1643 loff_t new_size = i_size_read(inode);
1645 block_t expanded = 0;
1648 err = inode_newsize_ok(inode, (len + offset));
1652 err = f2fs_convert_inline_inode(inode);
1656 f2fs_balance_fs(sbi, true);
1658 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1659 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1660 off_end = (offset + len) & (PAGE_SIZE - 1);
1662 map.m_lblk = pg_start;
1663 map.m_len = pg_end - pg_start;
1670 if (f2fs_is_pinned_file(inode)) {
1671 block_t sec_blks = BLKS_PER_SEC(sbi);
1672 block_t sec_len = roundup(map.m_len, sec_blks);
1674 map.m_len = sec_blks;
1676 if (has_not_enough_free_secs(sbi, 0,
1677 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1678 down_write(&sbi->gc_lock);
1679 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1680 if (err && err != -ENODATA && err != -EAGAIN)
1684 down_write(&sbi->pin_sem);
1687 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1688 f2fs_unlock_op(sbi);
1690 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1691 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1693 up_write(&sbi->pin_sem);
1695 expanded += map.m_len;
1696 sec_len -= map.m_len;
1697 map.m_lblk += map.m_len;
1698 if (!err && sec_len)
1701 map.m_len = expanded;
1703 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1704 expanded = map.m_len;
1713 last_off = pg_start + expanded - 1;
1715 /* update new size to the failed position */
1716 new_size = (last_off == pg_end) ? offset + len :
1717 (loff_t)(last_off + 1) << PAGE_SHIFT;
1719 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1722 if (new_size > i_size_read(inode)) {
1723 if (mode & FALLOC_FL_KEEP_SIZE)
1724 file_set_keep_isize(inode);
1726 f2fs_i_size_write(inode, new_size);
1732 static long f2fs_fallocate(struct file *file, int mode,
1733 loff_t offset, loff_t len)
1735 struct inode *inode = file_inode(file);
1738 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1740 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1742 if (!f2fs_is_compress_backend_ready(inode))
1745 /* f2fs only support ->fallocate for regular file */
1746 if (!S_ISREG(inode->i_mode))
1749 if (IS_ENCRYPTED(inode) &&
1750 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1753 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1754 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1755 FALLOC_FL_INSERT_RANGE))
1761 * Pinned file should not support partial truncation since the block
1762 * can be used by applications.
1764 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1765 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1766 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
1771 ret = file_modified(file);
1775 if (mode & FALLOC_FL_PUNCH_HOLE) {
1776 if (offset >= inode->i_size)
1779 ret = punch_hole(inode, offset, len);
1780 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1781 ret = f2fs_collapse_range(inode, offset, len);
1782 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1783 ret = f2fs_zero_range(inode, offset, len, mode);
1784 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1785 ret = f2fs_insert_range(inode, offset, len);
1787 ret = expand_inode_data(inode, offset, len, mode);
1791 inode->i_mtime = inode->i_ctime = current_time(inode);
1792 f2fs_mark_inode_dirty_sync(inode, false);
1793 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1797 inode_unlock(inode);
1799 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1803 static int f2fs_release_file(struct inode *inode, struct file *filp)
1806 * f2fs_release_file is called at every close calls. So we should
1807 * not drop any inmemory pages by close called by other process.
1809 if (!(filp->f_mode & FMODE_WRITE) ||
1810 atomic_read(&inode->i_writecount) != 1)
1813 /* some remained atomic pages should discarded */
1814 if (f2fs_is_atomic_file(inode))
1815 f2fs_drop_inmem_pages(inode);
1816 if (f2fs_is_volatile_file(inode)) {
1817 set_inode_flag(inode, FI_DROP_CACHE);
1818 filemap_fdatawrite(inode->i_mapping);
1819 clear_inode_flag(inode, FI_DROP_CACHE);
1820 clear_inode_flag(inode, FI_VOLATILE_FILE);
1821 stat_dec_volatile_write(inode);
1826 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1828 struct inode *inode = file_inode(file);
1831 * If the process doing a transaction is crashed, we should do
1832 * roll-back. Otherwise, other reader/write can see corrupted database
1833 * until all the writers close its file. Since this should be done
1834 * before dropping file lock, it needs to do in ->flush.
1836 if (f2fs_is_atomic_file(inode) &&
1837 F2FS_I(inode)->inmem_task == current)
1838 f2fs_drop_inmem_pages(inode);
1842 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1844 struct f2fs_inode_info *fi = F2FS_I(inode);
1845 u32 masked_flags = fi->i_flags & mask;
1847 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1849 /* Is it quota file? Do not allow user to mess with it */
1850 if (IS_NOQUOTA(inode))
1853 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1854 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1856 if (!f2fs_empty_dir(inode))
1860 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1861 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1863 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1867 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1868 if (masked_flags & F2FS_COMPR_FL) {
1869 if (!f2fs_disable_compressed_file(inode))
1872 if (!f2fs_may_compress(inode))
1874 if (S_ISREG(inode->i_mode) && inode->i_size)
1877 set_compress_context(inode);
1881 fi->i_flags = iflags | (fi->i_flags & ~mask);
1882 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1883 (fi->i_flags & F2FS_NOCOMP_FL));
1885 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1886 set_inode_flag(inode, FI_PROJ_INHERIT);
1888 clear_inode_flag(inode, FI_PROJ_INHERIT);
1890 inode->i_ctime = current_time(inode);
1891 f2fs_set_inode_flags(inode);
1892 f2fs_mark_inode_dirty_sync(inode, true);
1896 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1899 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1900 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1901 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1902 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1905 static const struct {
1908 } f2fs_fsflags_map[] = {
1909 { F2FS_COMPR_FL, FS_COMPR_FL },
1910 { F2FS_SYNC_FL, FS_SYNC_FL },
1911 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1912 { F2FS_APPEND_FL, FS_APPEND_FL },
1913 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1914 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1915 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1916 { F2FS_INDEX_FL, FS_INDEX_FL },
1917 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1918 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1919 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1922 #define F2FS_GETTABLE_FS_FL ( \
1932 FS_PROJINHERIT_FL | \
1934 FS_INLINE_DATA_FL | \
1939 #define F2FS_SETTABLE_FS_FL ( \
1948 FS_PROJINHERIT_FL | \
1951 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1952 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1957 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1958 if (iflags & f2fs_fsflags_map[i].iflag)
1959 fsflags |= f2fs_fsflags_map[i].fsflag;
1964 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1965 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1970 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1971 if (fsflags & f2fs_fsflags_map[i].fsflag)
1972 iflags |= f2fs_fsflags_map[i].iflag;
1977 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1979 struct inode *inode = file_inode(filp);
1980 struct f2fs_inode_info *fi = F2FS_I(inode);
1981 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1983 if (IS_ENCRYPTED(inode))
1984 fsflags |= FS_ENCRYPT_FL;
1985 if (IS_VERITY(inode))
1986 fsflags |= FS_VERITY_FL;
1987 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1988 fsflags |= FS_INLINE_DATA_FL;
1989 if (is_inode_flag_set(inode, FI_PIN_FILE))
1990 fsflags |= FS_NOCOW_FL;
1992 fsflags &= F2FS_GETTABLE_FS_FL;
1994 return put_user(fsflags, (int __user *)arg);
1997 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1999 struct inode *inode = file_inode(filp);
2000 struct f2fs_inode_info *fi = F2FS_I(inode);
2001 u32 fsflags, old_fsflags;
2005 if (!inode_owner_or_capable(inode))
2008 if (get_user(fsflags, (int __user *)arg))
2011 if (fsflags & ~F2FS_GETTABLE_FS_FL)
2013 fsflags &= F2FS_SETTABLE_FS_FL;
2015 iflags = f2fs_fsflags_to_iflags(fsflags);
2016 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2019 ret = mnt_want_write_file(filp);
2025 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2026 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2030 ret = f2fs_setflags_common(inode, iflags,
2031 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2033 inode_unlock(inode);
2034 mnt_drop_write_file(filp);
2038 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2040 struct inode *inode = file_inode(filp);
2042 return put_user(inode->i_generation, (int __user *)arg);
2045 static int f2fs_ioc_start_atomic_write(struct file *filp)
2047 struct inode *inode = file_inode(filp);
2048 struct f2fs_inode_info *fi = F2FS_I(inode);
2049 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2052 if (!inode_owner_or_capable(inode))
2055 if (!S_ISREG(inode->i_mode))
2058 if (filp->f_flags & O_DIRECT)
2061 ret = mnt_want_write_file(filp);
2067 if (!f2fs_disable_compressed_file(inode)) {
2072 if (f2fs_is_atomic_file(inode)) {
2073 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2078 ret = f2fs_convert_inline_inode(inode);
2082 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2085 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2086 * f2fs_is_atomic_file.
2088 if (get_dirty_pages(inode))
2089 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2090 inode->i_ino, get_dirty_pages(inode));
2091 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2093 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2097 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2098 if (list_empty(&fi->inmem_ilist))
2099 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2100 sbi->atomic_files++;
2101 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2103 /* add inode in inmem_list first and set atomic_file */
2104 set_inode_flag(inode, FI_ATOMIC_FILE);
2105 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2106 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2108 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2109 F2FS_I(inode)->inmem_task = current;
2110 stat_update_max_atomic_write(inode);
2112 inode_unlock(inode);
2113 mnt_drop_write_file(filp);
2117 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2119 struct inode *inode = file_inode(filp);
2122 if (!inode_owner_or_capable(inode))
2125 ret = mnt_want_write_file(filp);
2129 f2fs_balance_fs(F2FS_I_SB(inode), true);
2133 if (f2fs_is_volatile_file(inode)) {
2138 if (f2fs_is_atomic_file(inode)) {
2139 ret = f2fs_commit_inmem_pages(inode);
2143 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2145 f2fs_drop_inmem_pages(inode);
2147 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2150 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2151 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2154 inode_unlock(inode);
2155 mnt_drop_write_file(filp);
2159 static int f2fs_ioc_start_volatile_write(struct file *filp)
2161 struct inode *inode = file_inode(filp);
2164 if (!inode_owner_or_capable(inode))
2167 if (!S_ISREG(inode->i_mode))
2170 ret = mnt_want_write_file(filp);
2176 if (f2fs_is_volatile_file(inode))
2179 ret = f2fs_convert_inline_inode(inode);
2183 stat_inc_volatile_write(inode);
2184 stat_update_max_volatile_write(inode);
2186 set_inode_flag(inode, FI_VOLATILE_FILE);
2187 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2189 inode_unlock(inode);
2190 mnt_drop_write_file(filp);
2194 static int f2fs_ioc_release_volatile_write(struct file *filp)
2196 struct inode *inode = file_inode(filp);
2199 if (!inode_owner_or_capable(inode))
2202 ret = mnt_want_write_file(filp);
2208 if (!f2fs_is_volatile_file(inode))
2211 if (!f2fs_is_first_block_written(inode)) {
2212 ret = truncate_partial_data_page(inode, 0, true);
2216 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2218 inode_unlock(inode);
2219 mnt_drop_write_file(filp);
2223 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2225 struct inode *inode = file_inode(filp);
2228 if (!inode_owner_or_capable(inode))
2231 ret = mnt_want_write_file(filp);
2237 if (f2fs_is_atomic_file(inode))
2238 f2fs_drop_inmem_pages(inode);
2239 if (f2fs_is_volatile_file(inode)) {
2240 clear_inode_flag(inode, FI_VOLATILE_FILE);
2241 stat_dec_volatile_write(inode);
2242 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2245 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2247 inode_unlock(inode);
2249 mnt_drop_write_file(filp);
2250 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2254 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2256 struct inode *inode = file_inode(filp);
2257 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2258 struct super_block *sb = sbi->sb;
2262 if (!capable(CAP_SYS_ADMIN))
2265 if (get_user(in, (__u32 __user *)arg))
2268 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2269 ret = mnt_want_write_file(filp);
2271 if (ret == -EROFS) {
2273 f2fs_stop_checkpoint(sbi, false);
2274 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2275 trace_f2fs_shutdown(sbi, in, ret);
2282 case F2FS_GOING_DOWN_FULLSYNC:
2283 sb = freeze_bdev(sb->s_bdev);
2289 f2fs_stop_checkpoint(sbi, false);
2290 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2291 thaw_bdev(sb->s_bdev, sb);
2294 case F2FS_GOING_DOWN_METASYNC:
2295 /* do checkpoint only */
2296 ret = f2fs_sync_fs(sb, 1);
2299 f2fs_stop_checkpoint(sbi, false);
2300 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2302 case F2FS_GOING_DOWN_NOSYNC:
2303 f2fs_stop_checkpoint(sbi, false);
2304 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2306 case F2FS_GOING_DOWN_METAFLUSH:
2307 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2308 f2fs_stop_checkpoint(sbi, false);
2309 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2311 case F2FS_GOING_DOWN_NEED_FSCK:
2312 set_sbi_flag(sbi, SBI_NEED_FSCK);
2313 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2314 set_sbi_flag(sbi, SBI_IS_DIRTY);
2315 /* do checkpoint only */
2316 ret = f2fs_sync_fs(sb, 1);
2323 f2fs_stop_gc_thread(sbi);
2324 f2fs_stop_discard_thread(sbi);
2326 f2fs_drop_discard_cmd(sbi);
2327 clear_opt(sbi, DISCARD);
2329 f2fs_update_time(sbi, REQ_TIME);
2331 if (in != F2FS_GOING_DOWN_FULLSYNC)
2332 mnt_drop_write_file(filp);
2334 trace_f2fs_shutdown(sbi, in, ret);
2339 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2341 struct inode *inode = file_inode(filp);
2342 struct super_block *sb = inode->i_sb;
2343 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2344 struct fstrim_range range;
2347 if (!capable(CAP_SYS_ADMIN))
2350 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2353 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2357 ret = mnt_want_write_file(filp);
2361 range.minlen = max((unsigned int)range.minlen,
2362 q->limits.discard_granularity);
2363 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2364 mnt_drop_write_file(filp);
2368 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2371 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2375 static bool uuid_is_nonzero(__u8 u[16])
2379 for (i = 0; i < 16; i++)
2385 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2387 struct inode *inode = file_inode(filp);
2389 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2392 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2394 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2397 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2399 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2401 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2404 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2406 struct inode *inode = file_inode(filp);
2407 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2410 if (!f2fs_sb_has_encrypt(sbi))
2413 err = mnt_want_write_file(filp);
2417 down_write(&sbi->sb_lock);
2419 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2422 /* update superblock with uuid */
2423 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2425 err = f2fs_commit_super(sbi, false);
2428 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2432 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2436 up_write(&sbi->sb_lock);
2437 mnt_drop_write_file(filp);
2441 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2444 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2447 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2450 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2452 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2455 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2458 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2460 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2463 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2466 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2469 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2472 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2475 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2478 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2481 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2484 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2486 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2489 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2492 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2494 struct inode *inode = file_inode(filp);
2495 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2499 if (!capable(CAP_SYS_ADMIN))
2502 if (get_user(sync, (__u32 __user *)arg))
2505 if (f2fs_readonly(sbi->sb))
2508 ret = mnt_want_write_file(filp);
2513 if (!down_write_trylock(&sbi->gc_lock)) {
2518 down_write(&sbi->gc_lock);
2521 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2523 mnt_drop_write_file(filp);
2527 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2529 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2533 if (!capable(CAP_SYS_ADMIN))
2535 if (f2fs_readonly(sbi->sb))
2538 end = range->start + range->len;
2539 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2540 end >= MAX_BLKADDR(sbi))
2543 ret = mnt_want_write_file(filp);
2549 if (!down_write_trylock(&sbi->gc_lock)) {
2554 down_write(&sbi->gc_lock);
2557 ret = f2fs_gc(sbi, range->sync, true, false,
2558 GET_SEGNO(sbi, range->start));
2564 range->start += BLKS_PER_SEC(sbi);
2565 if (range->start <= end)
2568 mnt_drop_write_file(filp);
2572 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2574 struct f2fs_gc_range range;
2576 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2579 return __f2fs_ioc_gc_range(filp, &range);
2582 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2584 struct inode *inode = file_inode(filp);
2585 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2588 if (!capable(CAP_SYS_ADMIN))
2591 if (f2fs_readonly(sbi->sb))
2594 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2595 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2599 ret = mnt_want_write_file(filp);
2603 ret = f2fs_sync_fs(sbi->sb, 1);
2605 mnt_drop_write_file(filp);
2609 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2611 struct f2fs_defragment *range)
2613 struct inode *inode = file_inode(filp);
2614 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2615 .m_seg_type = NO_CHECK_TYPE ,
2616 .m_may_create = false };
2617 struct extent_info ei = {0, 0, 0};
2618 pgoff_t pg_start, pg_end, next_pgofs;
2619 unsigned int blk_per_seg = sbi->blocks_per_seg;
2620 unsigned int total = 0, sec_num;
2621 block_t blk_end = 0;
2622 bool fragmented = false;
2625 /* if in-place-update policy is enabled, don't waste time here */
2626 if (f2fs_should_update_inplace(inode, NULL))
2629 pg_start = range->start >> PAGE_SHIFT;
2630 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2632 f2fs_balance_fs(sbi, true);
2636 /* writeback all dirty pages in the range */
2637 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2638 range->start + range->len - 1);
2643 * lookup mapping info in extent cache, skip defragmenting if physical
2644 * block addresses are continuous.
2646 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2647 if (ei.fofs + ei.len >= pg_end)
2651 map.m_lblk = pg_start;
2652 map.m_next_pgofs = &next_pgofs;
2655 * lookup mapping info in dnode page cache, skip defragmenting if all
2656 * physical block addresses are continuous even if there are hole(s)
2657 * in logical blocks.
2659 while (map.m_lblk < pg_end) {
2660 map.m_len = pg_end - map.m_lblk;
2661 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2665 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2666 map.m_lblk = next_pgofs;
2670 if (blk_end && blk_end != map.m_pblk)
2673 /* record total count of block that we're going to move */
2676 blk_end = map.m_pblk + map.m_len;
2678 map.m_lblk += map.m_len;
2686 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2689 * make sure there are enough free section for LFS allocation, this can
2690 * avoid defragment running in SSR mode when free section are allocated
2693 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2698 map.m_lblk = pg_start;
2699 map.m_len = pg_end - pg_start;
2702 while (map.m_lblk < pg_end) {
2707 map.m_len = pg_end - map.m_lblk;
2708 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2712 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2713 map.m_lblk = next_pgofs;
2717 set_inode_flag(inode, FI_DO_DEFRAG);
2720 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2723 page = f2fs_get_lock_data_page(inode, idx, true);
2725 err = PTR_ERR(page);
2729 set_page_dirty(page);
2730 f2fs_put_page(page, 1);
2739 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2742 clear_inode_flag(inode, FI_DO_DEFRAG);
2744 err = filemap_fdatawrite(inode->i_mapping);
2749 clear_inode_flag(inode, FI_DO_DEFRAG);
2751 inode_unlock(inode);
2753 range->len = (u64)total << PAGE_SHIFT;
2757 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2759 struct inode *inode = file_inode(filp);
2760 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2761 struct f2fs_defragment range;
2764 if (!capable(CAP_SYS_ADMIN))
2767 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2770 if (f2fs_readonly(sbi->sb))
2773 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2777 /* verify alignment of offset & size */
2778 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2781 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2782 sbi->max_file_blocks))
2785 err = mnt_want_write_file(filp);
2789 err = f2fs_defragment_range(sbi, filp, &range);
2790 mnt_drop_write_file(filp);
2792 f2fs_update_time(sbi, REQ_TIME);
2796 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2803 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2804 struct file *file_out, loff_t pos_out, size_t len)
2806 struct inode *src = file_inode(file_in);
2807 struct inode *dst = file_inode(file_out);
2808 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2809 size_t olen = len, dst_max_i_size = 0;
2813 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2814 src->i_sb != dst->i_sb)
2817 if (unlikely(f2fs_readonly(src->i_sb)))
2820 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2823 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2826 if (pos_out < 0 || pos_in < 0)
2830 if (pos_in == pos_out)
2832 if (pos_out > pos_in && pos_out < pos_in + len)
2839 if (!inode_trylock(dst))
2843 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
2844 f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
2850 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2853 olen = len = src->i_size - pos_in;
2854 if (pos_in + len == src->i_size)
2855 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2861 dst_osize = dst->i_size;
2862 if (pos_out + olen > dst->i_size)
2863 dst_max_i_size = pos_out + olen;
2865 /* verify the end result is block aligned */
2866 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2867 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2868 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2871 ret = f2fs_convert_inline_inode(src);
2875 ret = f2fs_convert_inline_inode(dst);
2879 /* write out all dirty pages from offset */
2880 ret = filemap_write_and_wait_range(src->i_mapping,
2881 pos_in, pos_in + len);
2885 ret = filemap_write_and_wait_range(dst->i_mapping,
2886 pos_out, pos_out + len);
2890 f2fs_balance_fs(sbi, true);
2892 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2895 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2900 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2901 pos_out >> F2FS_BLKSIZE_BITS,
2902 len >> F2FS_BLKSIZE_BITS, false);
2906 f2fs_i_size_write(dst, dst_max_i_size);
2907 else if (dst_osize != dst->i_size)
2908 f2fs_i_size_write(dst, dst_osize);
2910 f2fs_unlock_op(sbi);
2913 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2915 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2924 static int __f2fs_ioc_move_range(struct file *filp,
2925 struct f2fs_move_range *range)
2930 if (!(filp->f_mode & FMODE_READ) ||
2931 !(filp->f_mode & FMODE_WRITE))
2934 dst = fdget(range->dst_fd);
2938 if (!(dst.file->f_mode & FMODE_WRITE)) {
2943 err = mnt_want_write_file(filp);
2947 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2948 range->pos_out, range->len);
2950 mnt_drop_write_file(filp);
2956 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2958 struct f2fs_move_range range;
2960 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2963 return __f2fs_ioc_move_range(filp, &range);
2966 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2968 struct inode *inode = file_inode(filp);
2969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2970 struct sit_info *sm = SIT_I(sbi);
2971 unsigned int start_segno = 0, end_segno = 0;
2972 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2973 struct f2fs_flush_device range;
2976 if (!capable(CAP_SYS_ADMIN))
2979 if (f2fs_readonly(sbi->sb))
2982 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2985 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2989 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2990 __is_large_section(sbi)) {
2991 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2992 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2996 ret = mnt_want_write_file(filp);
3000 if (range.dev_num != 0)
3001 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
3002 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
3004 start_segno = sm->last_victim[FLUSH_DEVICE];
3005 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3006 start_segno = dev_start_segno;
3007 end_segno = min(start_segno + range.segments, dev_end_segno);
3009 while (start_segno < end_segno) {
3010 if (!down_write_trylock(&sbi->gc_lock)) {
3014 sm->last_victim[GC_CB] = end_segno + 1;
3015 sm->last_victim[GC_GREEDY] = end_segno + 1;
3016 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3017 ret = f2fs_gc(sbi, true, true, true, start_segno);
3025 mnt_drop_write_file(filp);
3029 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3031 struct inode *inode = file_inode(filp);
3032 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3034 /* Must validate to set it with SQLite behavior in Android. */
3035 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3037 return put_user(sb_feature, (u32 __user *)arg);
3041 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3043 struct dquot *transfer_to[MAXQUOTAS] = {};
3044 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3045 struct super_block *sb = sbi->sb;
3048 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3049 if (IS_ERR(transfer_to[PRJQUOTA]))
3050 return PTR_ERR(transfer_to[PRJQUOTA]);
3052 err = __dquot_transfer(inode, transfer_to);
3054 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3055 dqput(transfer_to[PRJQUOTA]);
3059 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3061 struct inode *inode = file_inode(filp);
3062 struct f2fs_inode_info *fi = F2FS_I(inode);
3063 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3068 if (!f2fs_sb_has_project_quota(sbi)) {
3069 if (projid != F2FS_DEF_PROJID)
3075 if (!f2fs_has_extra_attr(inode))
3078 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3080 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3084 /* Is it quota file? Do not allow user to mess with it */
3085 if (IS_NOQUOTA(inode))
3088 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3090 return PTR_ERR(ipage);
3092 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3095 f2fs_put_page(ipage, 1);
3098 f2fs_put_page(ipage, 1);
3100 err = dquot_initialize(inode);
3105 err = f2fs_transfer_project_quota(inode, kprojid);
3109 F2FS_I(inode)->i_projid = kprojid;
3110 inode->i_ctime = current_time(inode);
3111 f2fs_mark_inode_dirty_sync(inode, true);
3113 f2fs_unlock_op(sbi);
3117 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3122 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3124 if (projid != F2FS_DEF_PROJID)
3130 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3133 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3134 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3135 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3138 static const struct {
3141 } f2fs_xflags_map[] = {
3142 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3143 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3144 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3145 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3146 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3147 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3150 #define F2FS_SUPPORTED_XFLAGS ( \
3152 FS_XFLAG_IMMUTABLE | \
3155 FS_XFLAG_NOATIME | \
3156 FS_XFLAG_PROJINHERIT)
3158 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3159 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3164 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3165 if (iflags & f2fs_xflags_map[i].iflag)
3166 xflags |= f2fs_xflags_map[i].xflag;
3171 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3172 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3177 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3178 if (xflags & f2fs_xflags_map[i].xflag)
3179 iflags |= f2fs_xflags_map[i].iflag;
3184 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3186 struct f2fs_inode_info *fi = F2FS_I(inode);
3188 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3190 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3191 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3194 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3196 struct inode *inode = file_inode(filp);
3199 f2fs_fill_fsxattr(inode, &fa);
3201 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3206 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3208 struct inode *inode = file_inode(filp);
3209 struct fsxattr fa, old_fa;
3213 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3216 /* Make sure caller has proper permission */
3217 if (!inode_owner_or_capable(inode))
3220 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3223 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3224 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3227 err = mnt_want_write_file(filp);
3233 f2fs_fill_fsxattr(inode, &old_fa);
3234 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3238 err = f2fs_setflags_common(inode, iflags,
3239 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3243 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3245 inode_unlock(inode);
3246 mnt_drop_write_file(filp);
3250 int f2fs_pin_file_control(struct inode *inode, bool inc)
3252 struct f2fs_inode_info *fi = F2FS_I(inode);
3253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3255 /* Use i_gc_failures for normal file as a risk signal. */
3257 f2fs_i_gc_failures_write(inode,
3258 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3260 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3261 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3262 __func__, inode->i_ino,
3263 fi->i_gc_failures[GC_FAILURE_PIN]);
3264 clear_inode_flag(inode, FI_PIN_FILE);
3270 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3272 struct inode *inode = file_inode(filp);
3276 if (get_user(pin, (__u32 __user *)arg))
3279 if (!S_ISREG(inode->i_mode))
3282 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3285 ret = mnt_want_write_file(filp);
3291 if (f2fs_should_update_outplace(inode, NULL)) {
3297 clear_inode_flag(inode, FI_PIN_FILE);
3298 f2fs_i_gc_failures_write(inode, 0);
3302 if (f2fs_pin_file_control(inode, false)) {
3307 ret = f2fs_convert_inline_inode(inode);
3311 if (!f2fs_disable_compressed_file(inode)) {
3316 set_inode_flag(inode, FI_PIN_FILE);
3317 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3319 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3321 inode_unlock(inode);
3322 mnt_drop_write_file(filp);
3326 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3328 struct inode *inode = file_inode(filp);
3331 if (is_inode_flag_set(inode, FI_PIN_FILE))
3332 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3333 return put_user(pin, (u32 __user *)arg);
3336 int f2fs_precache_extents(struct inode *inode)
3338 struct f2fs_inode_info *fi = F2FS_I(inode);
3339 struct f2fs_map_blocks map;
3340 pgoff_t m_next_extent;
3344 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3349 map.m_next_pgofs = NULL;
3350 map.m_next_extent = &m_next_extent;
3351 map.m_seg_type = NO_CHECK_TYPE;
3352 map.m_may_create = false;
3353 end = F2FS_I_SB(inode)->max_file_blocks;
3355 while (map.m_lblk < end) {
3356 map.m_len = end - map.m_lblk;
3358 down_write(&fi->i_gc_rwsem[WRITE]);
3359 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3360 up_write(&fi->i_gc_rwsem[WRITE]);
3364 map.m_lblk = m_next_extent;
3370 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3372 return f2fs_precache_extents(file_inode(filp));
3375 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3377 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3380 if (!capable(CAP_SYS_ADMIN))
3383 if (f2fs_readonly(sbi->sb))
3386 if (copy_from_user(&block_count, (void __user *)arg,
3387 sizeof(block_count)))
3390 return f2fs_resize_fs(filp, block_count);
3393 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3395 struct inode *inode = file_inode(filp);
3397 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3399 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3400 f2fs_warn(F2FS_I_SB(inode),
3401 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3406 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3409 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3411 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3414 return fsverity_ioctl_measure(filp, (void __user *)arg);
3417 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3419 struct inode *inode = file_inode(filp);
3420 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3425 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3429 down_read(&sbi->sb_lock);
3430 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3431 ARRAY_SIZE(sbi->raw_super->volume_name),
3432 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3433 up_read(&sbi->sb_lock);
3435 if (copy_to_user((char __user *)arg, vbuf,
3436 min(FSLABEL_MAX, count)))
3443 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3445 struct inode *inode = file_inode(filp);
3446 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3450 if (!capable(CAP_SYS_ADMIN))
3453 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3455 return PTR_ERR(vbuf);
3457 err = mnt_want_write_file(filp);
3461 down_write(&sbi->sb_lock);
3463 memset(sbi->raw_super->volume_name, 0,
3464 sizeof(sbi->raw_super->volume_name));
3465 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3466 sbi->raw_super->volume_name,
3467 ARRAY_SIZE(sbi->raw_super->volume_name));
3469 err = f2fs_commit_super(sbi, false);
3471 up_write(&sbi->sb_lock);
3473 mnt_drop_write_file(filp);
3479 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3481 struct inode *inode = file_inode(filp);
3484 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3487 if (!f2fs_compressed_file(inode))
3490 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3491 return put_user(blocks, (u64 __user *)arg);
3494 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3496 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3497 unsigned int released_blocks = 0;
3498 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3502 for (i = 0; i < count; i++) {
3503 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3504 dn->ofs_in_node + i);
3506 if (!__is_valid_data_blkaddr(blkaddr))
3508 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3509 DATA_GENERIC_ENHANCE)))
3510 return -EFSCORRUPTED;
3514 int compr_blocks = 0;
3516 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3517 blkaddr = f2fs_data_blkaddr(dn);
3520 if (blkaddr == COMPRESS_ADDR)
3522 dn->ofs_in_node += cluster_size;
3526 if (__is_valid_data_blkaddr(blkaddr))
3529 if (blkaddr != NEW_ADDR)
3532 dn->data_blkaddr = NULL_ADDR;
3533 f2fs_set_data_blkaddr(dn);
3536 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3537 dec_valid_block_count(sbi, dn->inode,
3538 cluster_size - compr_blocks);
3540 released_blocks += cluster_size - compr_blocks;
3542 count -= cluster_size;
3545 return released_blocks;
3548 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3550 struct inode *inode = file_inode(filp);
3551 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3552 pgoff_t page_idx = 0, last_idx;
3553 unsigned int released_blocks = 0;
3557 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3560 if (f2fs_readonly(sbi->sb))
3563 ret = mnt_want_write_file(filp);
3567 f2fs_balance_fs(F2FS_I_SB(inode), true);
3571 writecount = atomic_read(&inode->i_writecount);
3572 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3573 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3578 if (!f2fs_compressed_file(inode) ||
3579 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3584 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3588 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3589 inode->i_ctime = current_time(inode);
3590 f2fs_mark_inode_dirty_sync(inode, true);
3592 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3595 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3596 down_write(&F2FS_I(inode)->i_mmap_sem);
3598 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3600 while (page_idx < last_idx) {
3601 struct dnode_of_data dn;
3602 pgoff_t end_offset, count;
3606 set_new_dnode(&dn, inode, NULL, NULL, 0);
3607 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3609 f2fs_unlock_op(sbi);
3610 if (ret == -ENOENT) {
3611 page_idx = f2fs_get_next_page_offset(&dn,
3619 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3620 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3621 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3623 ret = release_compress_blocks(&dn, count);
3625 f2fs_put_dnode(&dn);
3627 f2fs_unlock_op(sbi);
3633 released_blocks += ret;
3636 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3637 up_write(&F2FS_I(inode)->i_mmap_sem);
3639 inode_unlock(inode);
3641 mnt_drop_write_file(filp);
3644 ret = put_user(released_blocks, (u64 __user *)arg);
3645 } else if (released_blocks &&
3646 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3647 set_sbi_flag(sbi, SBI_NEED_FSCK);
3648 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3649 "iblocks=%llu, released=%u, compr_blocks=%u, "
3651 __func__, inode->i_ino, inode->i_blocks,
3653 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3659 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3661 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3662 unsigned int reserved_blocks = 0;
3663 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3667 for (i = 0; i < count; i++) {
3668 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3669 dn->ofs_in_node + i);
3671 if (!__is_valid_data_blkaddr(blkaddr))
3673 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3674 DATA_GENERIC_ENHANCE)))
3675 return -EFSCORRUPTED;
3679 int compr_blocks = 0;
3683 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3684 blkaddr = f2fs_data_blkaddr(dn);
3687 if (blkaddr == COMPRESS_ADDR)
3689 dn->ofs_in_node += cluster_size;
3694 * compressed cluster was not released due to it
3695 * fails in release_compress_blocks(), so NEW_ADDR
3696 * is a possible case.
3698 if (blkaddr == NEW_ADDR ||
3699 __is_valid_data_blkaddr(blkaddr)) {
3704 dn->data_blkaddr = NEW_ADDR;
3705 f2fs_set_data_blkaddr(dn);
3708 reserved = cluster_size - compr_blocks;
3710 /* for the case all blocks in cluster were reserved */
3714 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3718 if (reserved != cluster_size - compr_blocks)
3721 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3723 reserved_blocks += reserved;
3725 count -= cluster_size;
3728 return reserved_blocks;
3731 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3733 struct inode *inode = file_inode(filp);
3734 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3735 pgoff_t page_idx = 0, last_idx;
3736 unsigned int reserved_blocks = 0;
3739 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3742 if (f2fs_readonly(sbi->sb))
3745 ret = mnt_want_write_file(filp);
3749 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3752 f2fs_balance_fs(F2FS_I_SB(inode), true);
3756 if (!f2fs_compressed_file(inode) ||
3757 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3762 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3763 down_write(&F2FS_I(inode)->i_mmap_sem);
3765 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3767 while (page_idx < last_idx) {
3768 struct dnode_of_data dn;
3769 pgoff_t end_offset, count;
3773 set_new_dnode(&dn, inode, NULL, NULL, 0);
3774 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3776 f2fs_unlock_op(sbi);
3777 if (ret == -ENOENT) {
3778 page_idx = f2fs_get_next_page_offset(&dn,
3786 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3787 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3788 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3790 ret = reserve_compress_blocks(&dn, count);
3792 f2fs_put_dnode(&dn);
3794 f2fs_unlock_op(sbi);
3800 reserved_blocks += ret;
3803 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3804 up_write(&F2FS_I(inode)->i_mmap_sem);
3807 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3808 inode->i_ctime = current_time(inode);
3809 f2fs_mark_inode_dirty_sync(inode, true);
3812 inode_unlock(inode);
3814 mnt_drop_write_file(filp);
3817 ret = put_user(reserved_blocks, (u64 __user *)arg);
3818 } else if (reserved_blocks &&
3819 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3820 set_sbi_flag(sbi, SBI_NEED_FSCK);
3821 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3822 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3824 __func__, inode->i_ino, inode->i_blocks,
3826 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3832 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3833 pgoff_t off, block_t block, block_t len, u32 flags)
3835 struct request_queue *q = bdev_get_queue(bdev);
3836 sector_t sector = SECTOR_FROM_BLOCK(block);
3837 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3843 if (flags & F2FS_TRIM_FILE_DISCARD)
3844 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3845 blk_queue_secure_erase(q) ?
3846 BLKDEV_DISCARD_SECURE : 0);
3848 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3849 if (IS_ENCRYPTED(inode))
3850 ret = fscrypt_zeroout_range(inode, off, block, len);
3852 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3859 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3861 struct inode *inode = file_inode(filp);
3862 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3863 struct address_space *mapping = inode->i_mapping;
3864 struct block_device *prev_bdev = NULL;
3865 struct f2fs_sectrim_range range;
3866 pgoff_t index, pg_end, prev_index = 0;
3867 block_t prev_block = 0, len = 0;
3869 bool to_end = false;
3872 if (!(filp->f_mode & FMODE_WRITE))
3875 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3879 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3880 !S_ISREG(inode->i_mode))
3883 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3884 !f2fs_hw_support_discard(sbi)) ||
3885 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3886 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3889 file_start_write(filp);
3892 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3893 range.start >= inode->i_size) {
3901 if (inode->i_size - range.start > range.len) {
3902 end_addr = range.start + range.len;
3904 end_addr = range.len == (u64)-1 ?
3905 sbi->sb->s_maxbytes : inode->i_size;
3909 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3910 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3915 index = F2FS_BYTES_TO_BLK(range.start);
3916 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3918 ret = f2fs_convert_inline_inode(inode);
3922 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3923 down_write(&F2FS_I(inode)->i_mmap_sem);
3925 ret = filemap_write_and_wait_range(mapping, range.start,
3926 to_end ? LLONG_MAX : end_addr - 1);
3930 truncate_inode_pages_range(mapping, range.start,
3931 to_end ? -1 : end_addr - 1);
3933 while (index < pg_end) {
3934 struct dnode_of_data dn;
3935 pgoff_t end_offset, count;
3938 set_new_dnode(&dn, inode, NULL, NULL, 0);
3939 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3941 if (ret == -ENOENT) {
3942 index = f2fs_get_next_page_offset(&dn, index);
3948 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3949 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3950 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3951 struct block_device *cur_bdev;
3952 block_t blkaddr = f2fs_data_blkaddr(&dn);
3954 if (!__is_valid_data_blkaddr(blkaddr))
3957 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3958 DATA_GENERIC_ENHANCE)) {
3959 ret = -EFSCORRUPTED;
3960 f2fs_put_dnode(&dn);
3964 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3965 if (f2fs_is_multi_device(sbi)) {
3966 int di = f2fs_target_device_index(sbi, blkaddr);
3968 blkaddr -= FDEV(di).start_blk;
3972 if (prev_bdev == cur_bdev &&
3973 index == prev_index + len &&
3974 blkaddr == prev_block + len) {
3977 ret = f2fs_secure_erase(prev_bdev,
3978 inode, prev_index, prev_block,
3981 f2fs_put_dnode(&dn);
3990 prev_bdev = cur_bdev;
3992 prev_block = blkaddr;
3997 f2fs_put_dnode(&dn);
3999 if (fatal_signal_pending(current)) {
4007 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
4008 prev_block, len, range.flags);
4010 up_write(&F2FS_I(inode)->i_mmap_sem);
4011 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4013 inode_unlock(inode);
4014 file_end_write(filp);
4019 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4022 case FS_IOC_GETFLAGS:
4023 return f2fs_ioc_getflags(filp, arg);
4024 case FS_IOC_SETFLAGS:
4025 return f2fs_ioc_setflags(filp, arg);
4026 case FS_IOC_GETVERSION:
4027 return f2fs_ioc_getversion(filp, arg);
4028 case F2FS_IOC_START_ATOMIC_WRITE:
4029 return f2fs_ioc_start_atomic_write(filp);
4030 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4031 return f2fs_ioc_commit_atomic_write(filp);
4032 case F2FS_IOC_START_VOLATILE_WRITE:
4033 return f2fs_ioc_start_volatile_write(filp);
4034 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4035 return f2fs_ioc_release_volatile_write(filp);
4036 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4037 return f2fs_ioc_abort_volatile_write(filp);
4038 case F2FS_IOC_SHUTDOWN:
4039 return f2fs_ioc_shutdown(filp, arg);
4041 return f2fs_ioc_fitrim(filp, arg);
4042 case FS_IOC_SET_ENCRYPTION_POLICY:
4043 return f2fs_ioc_set_encryption_policy(filp, arg);
4044 case FS_IOC_GET_ENCRYPTION_POLICY:
4045 return f2fs_ioc_get_encryption_policy(filp, arg);
4046 case FS_IOC_GET_ENCRYPTION_PWSALT:
4047 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4048 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4049 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4050 case FS_IOC_ADD_ENCRYPTION_KEY:
4051 return f2fs_ioc_add_encryption_key(filp, arg);
4052 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4053 return f2fs_ioc_remove_encryption_key(filp, arg);
4054 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4055 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4056 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4057 return f2fs_ioc_get_encryption_key_status(filp, arg);
4058 case FS_IOC_GET_ENCRYPTION_NONCE:
4059 return f2fs_ioc_get_encryption_nonce(filp, arg);
4060 case F2FS_IOC_GARBAGE_COLLECT:
4061 return f2fs_ioc_gc(filp, arg);
4062 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4063 return f2fs_ioc_gc_range(filp, arg);
4064 case F2FS_IOC_WRITE_CHECKPOINT:
4065 return f2fs_ioc_write_checkpoint(filp, arg);
4066 case F2FS_IOC_DEFRAGMENT:
4067 return f2fs_ioc_defragment(filp, arg);
4068 case F2FS_IOC_MOVE_RANGE:
4069 return f2fs_ioc_move_range(filp, arg);
4070 case F2FS_IOC_FLUSH_DEVICE:
4071 return f2fs_ioc_flush_device(filp, arg);
4072 case F2FS_IOC_GET_FEATURES:
4073 return f2fs_ioc_get_features(filp, arg);
4074 case FS_IOC_FSGETXATTR:
4075 return f2fs_ioc_fsgetxattr(filp, arg);
4076 case FS_IOC_FSSETXATTR:
4077 return f2fs_ioc_fssetxattr(filp, arg);
4078 case F2FS_IOC_GET_PIN_FILE:
4079 return f2fs_ioc_get_pin_file(filp, arg);
4080 case F2FS_IOC_SET_PIN_FILE:
4081 return f2fs_ioc_set_pin_file(filp, arg);
4082 case F2FS_IOC_PRECACHE_EXTENTS:
4083 return f2fs_ioc_precache_extents(filp, arg);
4084 case F2FS_IOC_RESIZE_FS:
4085 return f2fs_ioc_resize_fs(filp, arg);
4086 case FS_IOC_ENABLE_VERITY:
4087 return f2fs_ioc_enable_verity(filp, arg);
4088 case FS_IOC_MEASURE_VERITY:
4089 return f2fs_ioc_measure_verity(filp, arg);
4090 case FS_IOC_GETFSLABEL:
4091 return f2fs_ioc_getfslabel(filp, arg);
4092 case FS_IOC_SETFSLABEL:
4093 return f2fs_ioc_setfslabel(filp, arg);
4094 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4095 return f2fs_get_compress_blocks(filp, arg);
4096 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4097 return f2fs_release_compress_blocks(filp, arg);
4098 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4099 return f2fs_reserve_compress_blocks(filp, arg);
4100 case F2FS_IOC_SEC_TRIM_FILE:
4101 return f2fs_sec_trim_file(filp, arg);
4107 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4109 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4111 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4114 return __f2fs_ioctl(filp, cmd, arg);
4117 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4119 struct file *file = iocb->ki_filp;
4120 struct inode *inode = file_inode(file);
4123 if (!f2fs_is_compress_backend_ready(inode))
4126 ret = generic_file_read_iter(iocb, iter);
4129 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4134 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4136 struct file *file = iocb->ki_filp;
4137 struct inode *inode = file_inode(file);
4140 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4145 if (!f2fs_is_compress_backend_ready(inode)) {
4150 if (iocb->ki_flags & IOCB_NOWAIT) {
4151 if (!inode_trylock(inode)) {
4159 if (unlikely(IS_IMMUTABLE(inode))) {
4164 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4169 ret = generic_write_checks(iocb, from);
4171 bool preallocated = false;
4172 size_t target_size = 0;
4175 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4176 set_inode_flag(inode, FI_NO_PREALLOC);
4178 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4179 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4180 iov_iter_count(from)) ||
4181 f2fs_has_inline_data(inode) ||
4182 f2fs_force_buffered_io(inode, iocb, from)) {
4183 clear_inode_flag(inode, FI_NO_PREALLOC);
4184 inode_unlock(inode);
4191 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4194 if (iocb->ki_flags & IOCB_DIRECT) {
4196 * Convert inline data for Direct I/O before entering
4199 err = f2fs_convert_inline_inode(inode);
4203 * If force_buffere_io() is true, we have to allocate
4204 * blocks all the time, since f2fs_direct_IO will fall
4205 * back to buffered IO.
4207 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4208 allow_outplace_dio(inode, iocb, from))
4211 preallocated = true;
4212 target_size = iocb->ki_pos + iov_iter_count(from);
4214 err = f2fs_preallocate_blocks(iocb, from);
4217 clear_inode_flag(inode, FI_NO_PREALLOC);
4218 inode_unlock(inode);
4223 ret = __generic_file_write_iter(iocb, from);
4224 clear_inode_flag(inode, FI_NO_PREALLOC);
4226 /* if we couldn't write data, we should deallocate blocks. */
4227 if (preallocated && i_size_read(inode) < target_size)
4228 f2fs_truncate(inode);
4231 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4234 inode_unlock(inode);
4236 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4237 iov_iter_count(from), ret);
4239 ret = generic_write_sync(iocb, ret);
4243 #ifdef CONFIG_COMPAT
4244 struct compat_f2fs_gc_range {
4249 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4250 struct compat_f2fs_gc_range)
4252 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4254 struct compat_f2fs_gc_range __user *urange;
4255 struct f2fs_gc_range range;
4258 urange = compat_ptr(arg);
4259 err = get_user(range.sync, &urange->sync);
4260 err |= get_user(range.start, &urange->start);
4261 err |= get_user(range.len, &urange->len);
4265 return __f2fs_ioc_gc_range(file, &range);
4268 struct compat_f2fs_move_range {
4274 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4275 struct compat_f2fs_move_range)
4277 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4279 struct compat_f2fs_move_range __user *urange;
4280 struct f2fs_move_range range;
4283 urange = compat_ptr(arg);
4284 err = get_user(range.dst_fd, &urange->dst_fd);
4285 err |= get_user(range.pos_in, &urange->pos_in);
4286 err |= get_user(range.pos_out, &urange->pos_out);
4287 err |= get_user(range.len, &urange->len);
4291 return __f2fs_ioc_move_range(file, &range);
4294 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4296 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4298 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4302 case FS_IOC32_GETFLAGS:
4303 cmd = FS_IOC_GETFLAGS;
4305 case FS_IOC32_SETFLAGS:
4306 cmd = FS_IOC_SETFLAGS;
4308 case FS_IOC32_GETVERSION:
4309 cmd = FS_IOC_GETVERSION;
4311 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4312 return f2fs_compat_ioc_gc_range(file, arg);
4313 case F2FS_IOC32_MOVE_RANGE:
4314 return f2fs_compat_ioc_move_range(file, arg);
4315 case F2FS_IOC_START_ATOMIC_WRITE:
4316 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4317 case F2FS_IOC_START_VOLATILE_WRITE:
4318 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4319 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4320 case F2FS_IOC_SHUTDOWN:
4322 case FS_IOC_SET_ENCRYPTION_POLICY:
4323 case FS_IOC_GET_ENCRYPTION_PWSALT:
4324 case FS_IOC_GET_ENCRYPTION_POLICY:
4325 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4326 case FS_IOC_ADD_ENCRYPTION_KEY:
4327 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4328 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4329 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4330 case FS_IOC_GET_ENCRYPTION_NONCE:
4331 case F2FS_IOC_GARBAGE_COLLECT:
4332 case F2FS_IOC_WRITE_CHECKPOINT:
4333 case F2FS_IOC_DEFRAGMENT:
4334 case F2FS_IOC_FLUSH_DEVICE:
4335 case F2FS_IOC_GET_FEATURES:
4336 case FS_IOC_FSGETXATTR:
4337 case FS_IOC_FSSETXATTR:
4338 case F2FS_IOC_GET_PIN_FILE:
4339 case F2FS_IOC_SET_PIN_FILE:
4340 case F2FS_IOC_PRECACHE_EXTENTS:
4341 case F2FS_IOC_RESIZE_FS:
4342 case FS_IOC_ENABLE_VERITY:
4343 case FS_IOC_MEASURE_VERITY:
4344 case FS_IOC_GETFSLABEL:
4345 case FS_IOC_SETFSLABEL:
4346 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4347 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4348 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4349 case F2FS_IOC_SEC_TRIM_FILE:
4352 return -ENOIOCTLCMD;
4354 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4358 const struct file_operations f2fs_file_operations = {
4359 .llseek = f2fs_llseek,
4360 .read_iter = f2fs_file_read_iter,
4361 .write_iter = f2fs_file_write_iter,
4362 .open = f2fs_file_open,
4363 .release = f2fs_release_file,
4364 .mmap = f2fs_file_mmap,
4365 .flush = f2fs_file_flush,
4366 .fsync = f2fs_sync_file,
4367 .fallocate = f2fs_fallocate,
4368 .unlocked_ioctl = f2fs_ioctl,
4369 #ifdef CONFIG_COMPAT
4370 .compat_ioctl = f2fs_compat_ioctl,
4372 .splice_read = generic_file_splice_read,
4373 .splice_write = iter_file_splice_write,