GNU Linux-libre 6.8.7-gnu
[releases.git] / fs / f2fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
28
29 #include "f2fs.h"
30 #include "node.h"
31 #include "segment.h"
32 #include "xattr.h"
33 #include "acl.h"
34 #include "gc.h"
35 #include "iostat.h"
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
38
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
40 {
41         struct inode *inode = file_inode(vmf->vma->vm_file);
42         vm_flags_t flags = vmf->vma->vm_flags;
43         vm_fault_t ret;
44
45         ret = filemap_fault(vmf);
46         if (ret & VM_FAULT_LOCKED)
47                 f2fs_update_iostat(F2FS_I_SB(inode), inode,
48                                         APP_MAPPED_READ_IO, F2FS_BLKSIZE);
49
50         trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret);
51
52         return ret;
53 }
54
55 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 {
57         struct page *page = vmf->page;
58         struct inode *inode = file_inode(vmf->vma->vm_file);
59         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
60         struct dnode_of_data dn;
61         bool need_alloc = true;
62         int err = 0;
63         vm_fault_t ret;
64
65         if (unlikely(IS_IMMUTABLE(inode)))
66                 return VM_FAULT_SIGBUS;
67
68         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
69                 err = -EIO;
70                 goto out;
71         }
72
73         if (unlikely(f2fs_cp_error(sbi))) {
74                 err = -EIO;
75                 goto out;
76         }
77
78         if (!f2fs_is_checkpoint_ready(sbi)) {
79                 err = -ENOSPC;
80                 goto out;
81         }
82
83         err = f2fs_convert_inline_inode(inode);
84         if (err)
85                 goto out;
86
87 #ifdef CONFIG_F2FS_FS_COMPRESSION
88         if (f2fs_compressed_file(inode)) {
89                 int ret = f2fs_is_compressed_cluster(inode, page->index);
90
91                 if (ret < 0) {
92                         err = ret;
93                         goto out;
94                 } else if (ret) {
95                         need_alloc = false;
96                 }
97         }
98 #endif
99         /* should do out of any locked page */
100         if (need_alloc)
101                 f2fs_balance_fs(sbi, true);
102
103         sb_start_pagefault(inode->i_sb);
104
105         f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
106
107         file_update_time(vmf->vma->vm_file);
108         filemap_invalidate_lock_shared(inode->i_mapping);
109         lock_page(page);
110         if (unlikely(page->mapping != inode->i_mapping ||
111                         page_offset(page) > i_size_read(inode) ||
112                         !PageUptodate(page))) {
113                 unlock_page(page);
114                 err = -EFAULT;
115                 goto out_sem;
116         }
117
118         if (need_alloc) {
119                 /* block allocation */
120                 set_new_dnode(&dn, inode, NULL, NULL, 0);
121                 err = f2fs_get_block_locked(&dn, page->index);
122         }
123
124 #ifdef CONFIG_F2FS_FS_COMPRESSION
125         if (!need_alloc) {
126                 set_new_dnode(&dn, inode, NULL, NULL, 0);
127                 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
128                 f2fs_put_dnode(&dn);
129         }
130 #endif
131         if (err) {
132                 unlock_page(page);
133                 goto out_sem;
134         }
135
136         f2fs_wait_on_page_writeback(page, DATA, false, true);
137
138         /* wait for GCed page writeback via META_MAPPING */
139         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
140
141         /*
142          * check to see if the page is mapped already (no holes)
143          */
144         if (PageMappedToDisk(page))
145                 goto out_sem;
146
147         /* page is wholly or partially inside EOF */
148         if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
149                                                 i_size_read(inode)) {
150                 loff_t offset;
151
152                 offset = i_size_read(inode) & ~PAGE_MASK;
153                 zero_user_segment(page, offset, PAGE_SIZE);
154         }
155         set_page_dirty(page);
156
157         f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
158         f2fs_update_time(sbi, REQ_TIME);
159
160 out_sem:
161         filemap_invalidate_unlock_shared(inode->i_mapping);
162
163         sb_end_pagefault(inode->i_sb);
164 out:
165         ret = vmf_fs_error(err);
166
167         trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
168         return ret;
169 }
170
171 static const struct vm_operations_struct f2fs_file_vm_ops = {
172         .fault          = f2fs_filemap_fault,
173         .map_pages      = filemap_map_pages,
174         .page_mkwrite   = f2fs_vm_page_mkwrite,
175 };
176
177 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 {
179         struct dentry *dentry;
180
181         /*
182          * Make sure to get the non-deleted alias.  The alias associated with
183          * the open file descriptor being fsync()'ed may be deleted already.
184          */
185         dentry = d_find_alias(inode);
186         if (!dentry)
187                 return 0;
188
189         *pino = parent_ino(dentry);
190         dput(dentry);
191         return 1;
192 }
193
194 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 {
196         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197         enum cp_reason_type cp_reason = CP_NO_NEEDED;
198
199         if (!S_ISREG(inode->i_mode))
200                 cp_reason = CP_NON_REGULAR;
201         else if (f2fs_compressed_file(inode))
202                 cp_reason = CP_COMPRESSED;
203         else if (inode->i_nlink != 1)
204                 cp_reason = CP_HARDLINK;
205         else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
206                 cp_reason = CP_SB_NEED_CP;
207         else if (file_wrong_pino(inode))
208                 cp_reason = CP_WRONG_PINO;
209         else if (!f2fs_space_for_roll_forward(sbi))
210                 cp_reason = CP_NO_SPC_ROLL;
211         else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212                 cp_reason = CP_NODE_NEED_CP;
213         else if (test_opt(sbi, FASTBOOT))
214                 cp_reason = CP_FASTBOOT_MODE;
215         else if (F2FS_OPTION(sbi).active_logs == 2)
216                 cp_reason = CP_SPEC_LOG_NUM;
217         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
218                 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
219                 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220                                                         TRANS_DIR_INO))
221                 cp_reason = CP_RECOVER_DIR;
222
223         return cp_reason;
224 }
225
226 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 {
228         struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229         bool ret = false;
230         /* But we need to avoid that there are some inode updates */
231         if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
232                 ret = true;
233         f2fs_put_page(i, 0);
234         return ret;
235 }
236
237 static void try_to_fix_pino(struct inode *inode)
238 {
239         struct f2fs_inode_info *fi = F2FS_I(inode);
240         nid_t pino;
241
242         f2fs_down_write(&fi->i_sem);
243         if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
244                         get_parent_ino(inode, &pino)) {
245                 f2fs_i_pino_write(inode, pino);
246                 file_got_pino(inode);
247         }
248         f2fs_up_write(&fi->i_sem);
249 }
250
251 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
252                                                 int datasync, bool atomic)
253 {
254         struct inode *inode = file->f_mapping->host;
255         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
256         nid_t ino = inode->i_ino;
257         int ret = 0;
258         enum cp_reason_type cp_reason = 0;
259         struct writeback_control wbc = {
260                 .sync_mode = WB_SYNC_ALL,
261                 .nr_to_write = LONG_MAX,
262                 .for_reclaim = 0,
263         };
264         unsigned int seq_id = 0;
265
266         if (unlikely(f2fs_readonly(inode->i_sb)))
267                 return 0;
268
269         trace_f2fs_sync_file_enter(inode);
270
271         if (S_ISDIR(inode->i_mode))
272                 goto go_write;
273
274         /* if fdatasync is triggered, let's do in-place-update */
275         if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276                 set_inode_flag(inode, FI_NEED_IPU);
277         ret = file_write_and_wait_range(file, start, end);
278         clear_inode_flag(inode, FI_NEED_IPU);
279
280         if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
281                 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
282                 return ret;
283         }
284
285         /* if the inode is dirty, let's recover all the time */
286         if (!f2fs_skip_inode_update(inode, datasync)) {
287                 f2fs_write_inode(inode, NULL);
288                 goto go_write;
289         }
290
291         /*
292          * if there is no written data, don't waste time to write recovery info.
293          */
294         if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295                         !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
296
297                 /* it may call write_inode just prior to fsync */
298                 if (need_inode_page_update(sbi, ino))
299                         goto go_write;
300
301                 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302                                 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
303                         goto flush_out;
304                 goto out;
305         } else {
306                 /*
307                  * for OPU case, during fsync(), node can be persisted before
308                  * data when lower device doesn't support write barrier, result
309                  * in data corruption after SPO.
310                  * So for strict fsync mode, force to use atomic write semantics
311                  * to keep write order in between data/node and last node to
312                  * avoid potential data corruption.
313                  */
314                 if (F2FS_OPTION(sbi).fsync_mode ==
315                                 FSYNC_MODE_STRICT && !atomic)
316                         atomic = true;
317         }
318 go_write:
319         /*
320          * Both of fdatasync() and fsync() are able to be recovered from
321          * sudden-power-off.
322          */
323         f2fs_down_read(&F2FS_I(inode)->i_sem);
324         cp_reason = need_do_checkpoint(inode);
325         f2fs_up_read(&F2FS_I(inode)->i_sem);
326
327         if (cp_reason) {
328                 /* all the dirty node pages should be flushed for POR */
329                 ret = f2fs_sync_fs(inode->i_sb, 1);
330
331                 /*
332                  * We've secured consistency through sync_fs. Following pino
333                  * will be used only for fsynced inodes after checkpoint.
334                  */
335                 try_to_fix_pino(inode);
336                 clear_inode_flag(inode, FI_APPEND_WRITE);
337                 clear_inode_flag(inode, FI_UPDATE_WRITE);
338                 goto out;
339         }
340 sync_nodes:
341         atomic_inc(&sbi->wb_sync_req[NODE]);
342         ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
343         atomic_dec(&sbi->wb_sync_req[NODE]);
344         if (ret)
345                 goto out;
346
347         /* if cp_error was enabled, we should avoid infinite loop */
348         if (unlikely(f2fs_cp_error(sbi))) {
349                 ret = -EIO;
350                 goto out;
351         }
352
353         if (f2fs_need_inode_block_update(sbi, ino)) {
354                 f2fs_mark_inode_dirty_sync(inode, true);
355                 f2fs_write_inode(inode, NULL);
356                 goto sync_nodes;
357         }
358
359         /*
360          * If it's atomic_write, it's just fine to keep write ordering. So
361          * here we don't need to wait for node write completion, since we use
362          * node chain which serializes node blocks. If one of node writes are
363          * reordered, we can see simply broken chain, resulting in stopping
364          * roll-forward recovery. It means we'll recover all or none node blocks
365          * given fsync mark.
366          */
367         if (!atomic) {
368                 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
369                 if (ret)
370                         goto out;
371         }
372
373         /* once recovery info is written, don't need to tack this */
374         f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
375         clear_inode_flag(inode, FI_APPEND_WRITE);
376 flush_out:
377         if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
378             (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
379                 ret = f2fs_issue_flush(sbi, inode->i_ino);
380         if (!ret) {
381                 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
382                 clear_inode_flag(inode, FI_UPDATE_WRITE);
383                 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
384         }
385         f2fs_update_time(sbi, REQ_TIME);
386 out:
387         trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
388         return ret;
389 }
390
391 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
392 {
393         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
394                 return -EIO;
395         return f2fs_do_sync_file(file, start, end, datasync, false);
396 }
397
398 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
399                                 pgoff_t index, int whence)
400 {
401         switch (whence) {
402         case SEEK_DATA:
403                 if (__is_valid_data_blkaddr(blkaddr))
404                         return true;
405                 if (blkaddr == NEW_ADDR &&
406                     xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
407                         return true;
408                 break;
409         case SEEK_HOLE:
410                 if (blkaddr == NULL_ADDR)
411                         return true;
412                 break;
413         }
414         return false;
415 }
416
417 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
418 {
419         struct inode *inode = file->f_mapping->host;
420         loff_t maxbytes = inode->i_sb->s_maxbytes;
421         struct dnode_of_data dn;
422         pgoff_t pgofs, end_offset;
423         loff_t data_ofs = offset;
424         loff_t isize;
425         int err = 0;
426
427         inode_lock_shared(inode);
428
429         isize = i_size_read(inode);
430         if (offset >= isize)
431                 goto fail;
432
433         /* handle inline data case */
434         if (f2fs_has_inline_data(inode)) {
435                 if (whence == SEEK_HOLE) {
436                         data_ofs = isize;
437                         goto found;
438                 } else if (whence == SEEK_DATA) {
439                         data_ofs = offset;
440                         goto found;
441                 }
442         }
443
444         pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
445
446         for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
447                 set_new_dnode(&dn, inode, NULL, NULL, 0);
448                 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
449                 if (err && err != -ENOENT) {
450                         goto fail;
451                 } else if (err == -ENOENT) {
452                         /* direct node does not exists */
453                         if (whence == SEEK_DATA) {
454                                 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
455                                 continue;
456                         } else {
457                                 goto found;
458                         }
459                 }
460
461                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
462
463                 /* find data/hole in dnode block */
464                 for (; dn.ofs_in_node < end_offset;
465                                 dn.ofs_in_node++, pgofs++,
466                                 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
467                         block_t blkaddr;
468
469                         blkaddr = f2fs_data_blkaddr(&dn);
470
471                         if (__is_valid_data_blkaddr(blkaddr) &&
472                                 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
473                                         blkaddr, DATA_GENERIC_ENHANCE)) {
474                                 f2fs_put_dnode(&dn);
475                                 goto fail;
476                         }
477
478                         if (__found_offset(file->f_mapping, blkaddr,
479                                                         pgofs, whence)) {
480                                 f2fs_put_dnode(&dn);
481                                 goto found;
482                         }
483                 }
484                 f2fs_put_dnode(&dn);
485         }
486
487         if (whence == SEEK_DATA)
488                 goto fail;
489 found:
490         if (whence == SEEK_HOLE && data_ofs > isize)
491                 data_ofs = isize;
492         inode_unlock_shared(inode);
493         return vfs_setpos(file, data_ofs, maxbytes);
494 fail:
495         inode_unlock_shared(inode);
496         return -ENXIO;
497 }
498
499 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
500 {
501         struct inode *inode = file->f_mapping->host;
502         loff_t maxbytes = inode->i_sb->s_maxbytes;
503
504         if (f2fs_compressed_file(inode))
505                 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
506
507         switch (whence) {
508         case SEEK_SET:
509         case SEEK_CUR:
510         case SEEK_END:
511                 return generic_file_llseek_size(file, offset, whence,
512                                                 maxbytes, i_size_read(inode));
513         case SEEK_DATA:
514         case SEEK_HOLE:
515                 if (offset < 0)
516                         return -ENXIO;
517                 return f2fs_seek_block(file, offset, whence);
518         }
519
520         return -EINVAL;
521 }
522
523 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
524 {
525         struct inode *inode = file_inode(file);
526
527         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
528                 return -EIO;
529
530         if (!f2fs_is_compress_backend_ready(inode))
531                 return -EOPNOTSUPP;
532
533         file_accessed(file);
534         vma->vm_ops = &f2fs_file_vm_ops;
535
536         f2fs_down_read(&F2FS_I(inode)->i_sem);
537         set_inode_flag(inode, FI_MMAP_FILE);
538         f2fs_up_read(&F2FS_I(inode)->i_sem);
539
540         return 0;
541 }
542
543 static int f2fs_file_open(struct inode *inode, struct file *filp)
544 {
545         int err = fscrypt_file_open(inode, filp);
546
547         if (err)
548                 return err;
549
550         if (!f2fs_is_compress_backend_ready(inode))
551                 return -EOPNOTSUPP;
552
553         err = fsverity_file_open(inode, filp);
554         if (err)
555                 return err;
556
557         filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
558         filp->f_mode |= FMODE_CAN_ODIRECT;
559
560         return dquot_file_open(inode, filp);
561 }
562
563 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
564 {
565         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
566         int nr_free = 0, ofs = dn->ofs_in_node, len = count;
567         __le32 *addr;
568         bool compressed_cluster = false;
569         int cluster_index = 0, valid_blocks = 0;
570         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
571         bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
572
573         addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
574
575         /* Assumption: truncation starts with cluster */
576         for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
577                 block_t blkaddr = le32_to_cpu(*addr);
578
579                 if (f2fs_compressed_file(dn->inode) &&
580                                         !(cluster_index & (cluster_size - 1))) {
581                         if (compressed_cluster)
582                                 f2fs_i_compr_blocks_update(dn->inode,
583                                                         valid_blocks, false);
584                         compressed_cluster = (blkaddr == COMPRESS_ADDR);
585                         valid_blocks = 0;
586                 }
587
588                 if (blkaddr == NULL_ADDR)
589                         continue;
590
591                 f2fs_set_data_blkaddr(dn, NULL_ADDR);
592
593                 if (__is_valid_data_blkaddr(blkaddr)) {
594                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595                                         DATA_GENERIC_ENHANCE))
596                                 continue;
597                         if (compressed_cluster)
598                                 valid_blocks++;
599                 }
600
601                 f2fs_invalidate_blocks(sbi, blkaddr);
602
603                 if (!released || blkaddr != COMPRESS_ADDR)
604                         nr_free++;
605         }
606
607         if (compressed_cluster)
608                 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
609
610         if (nr_free) {
611                 pgoff_t fofs;
612                 /*
613                  * once we invalidate valid blkaddr in range [ofs, ofs + count],
614                  * we will invalidate all blkaddr in the whole range.
615                  */
616                 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
617                                                         dn->inode) + ofs;
618                 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
619                 f2fs_update_age_extent_cache_range(dn, fofs, len);
620                 dec_valid_block_count(sbi, dn->inode, nr_free);
621         }
622         dn->ofs_in_node = ofs;
623
624         f2fs_update_time(sbi, REQ_TIME);
625         trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
626                                          dn->ofs_in_node, nr_free);
627 }
628
629 static int truncate_partial_data_page(struct inode *inode, u64 from,
630                                                                 bool cache_only)
631 {
632         loff_t offset = from & (PAGE_SIZE - 1);
633         pgoff_t index = from >> PAGE_SHIFT;
634         struct address_space *mapping = inode->i_mapping;
635         struct page *page;
636
637         if (!offset && !cache_only)
638                 return 0;
639
640         if (cache_only) {
641                 page = find_lock_page(mapping, index);
642                 if (page && PageUptodate(page))
643                         goto truncate_out;
644                 f2fs_put_page(page, 1);
645                 return 0;
646         }
647
648         page = f2fs_get_lock_data_page(inode, index, true);
649         if (IS_ERR(page))
650                 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
651 truncate_out:
652         f2fs_wait_on_page_writeback(page, DATA, true, true);
653         zero_user(page, offset, PAGE_SIZE - offset);
654
655         /* An encrypted inode should have a key and truncate the last page. */
656         f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
657         if (!cache_only)
658                 set_page_dirty(page);
659         f2fs_put_page(page, 1);
660         return 0;
661 }
662
663 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
664 {
665         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
666         struct dnode_of_data dn;
667         pgoff_t free_from;
668         int count = 0, err = 0;
669         struct page *ipage;
670         bool truncate_page = false;
671
672         trace_f2fs_truncate_blocks_enter(inode, from);
673
674         free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
675
676         if (free_from >= max_file_blocks(inode))
677                 goto free_partial;
678
679         if (lock)
680                 f2fs_lock_op(sbi);
681
682         ipage = f2fs_get_node_page(sbi, inode->i_ino);
683         if (IS_ERR(ipage)) {
684                 err = PTR_ERR(ipage);
685                 goto out;
686         }
687
688         if (f2fs_has_inline_data(inode)) {
689                 f2fs_truncate_inline_inode(inode, ipage, from);
690                 f2fs_put_page(ipage, 1);
691                 truncate_page = true;
692                 goto out;
693         }
694
695         set_new_dnode(&dn, inode, ipage, NULL, 0);
696         err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
697         if (err) {
698                 if (err == -ENOENT)
699                         goto free_next;
700                 goto out;
701         }
702
703         count = ADDRS_PER_PAGE(dn.node_page, inode);
704
705         count -= dn.ofs_in_node;
706         f2fs_bug_on(sbi, count < 0);
707
708         if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
709                 f2fs_truncate_data_blocks_range(&dn, count);
710                 free_from += count;
711         }
712
713         f2fs_put_dnode(&dn);
714 free_next:
715         err = f2fs_truncate_inode_blocks(inode, free_from);
716 out:
717         if (lock)
718                 f2fs_unlock_op(sbi);
719 free_partial:
720         /* lastly zero out the first data page */
721         if (!err)
722                 err = truncate_partial_data_page(inode, from, truncate_page);
723
724         trace_f2fs_truncate_blocks_exit(inode, err);
725         return err;
726 }
727
728 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
729 {
730         u64 free_from = from;
731         int err;
732
733 #ifdef CONFIG_F2FS_FS_COMPRESSION
734         /*
735          * for compressed file, only support cluster size
736          * aligned truncation.
737          */
738         if (f2fs_compressed_file(inode))
739                 free_from = round_up(from,
740                                 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
741 #endif
742
743         err = f2fs_do_truncate_blocks(inode, free_from, lock);
744         if (err)
745                 return err;
746
747 #ifdef CONFIG_F2FS_FS_COMPRESSION
748         /*
749          * For compressed file, after release compress blocks, don't allow write
750          * direct, but we should allow write direct after truncate to zero.
751          */
752         if (f2fs_compressed_file(inode) && !free_from
753                         && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
754                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
755
756         if (from != free_from) {
757                 err = f2fs_truncate_partial_cluster(inode, from, lock);
758                 if (err)
759                         return err;
760         }
761 #endif
762
763         return 0;
764 }
765
766 int f2fs_truncate(struct inode *inode)
767 {
768         int err;
769
770         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
771                 return -EIO;
772
773         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
774                                 S_ISLNK(inode->i_mode)))
775                 return 0;
776
777         trace_f2fs_truncate(inode);
778
779         if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
780                 return -EIO;
781
782         err = f2fs_dquot_initialize(inode);
783         if (err)
784                 return err;
785
786         /* we should check inline_data size */
787         if (!f2fs_may_inline_data(inode)) {
788                 err = f2fs_convert_inline_inode(inode);
789                 if (err)
790                         return err;
791         }
792
793         err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
794         if (err)
795                 return err;
796
797         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
798         f2fs_mark_inode_dirty_sync(inode, false);
799         return 0;
800 }
801
802 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
803 {
804         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
805
806         if (!fscrypt_dio_supported(inode))
807                 return true;
808         if (fsverity_active(inode))
809                 return true;
810         if (f2fs_compressed_file(inode))
811                 return true;
812
813         /* disallow direct IO if any of devices has unaligned blksize */
814         if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
815                 return true;
816         /*
817          * for blkzoned device, fallback direct IO to buffered IO, so
818          * all IOs can be serialized by log-structured write.
819          */
820         if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
821                 return true;
822         if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
823                 return true;
824         if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
825                 return true;
826
827         return false;
828 }
829
830 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
831                  struct kstat *stat, u32 request_mask, unsigned int query_flags)
832 {
833         struct inode *inode = d_inode(path->dentry);
834         struct f2fs_inode_info *fi = F2FS_I(inode);
835         struct f2fs_inode *ri = NULL;
836         unsigned int flags;
837
838         if (f2fs_has_extra_attr(inode) &&
839                         f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
840                         F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
841                 stat->result_mask |= STATX_BTIME;
842                 stat->btime.tv_sec = fi->i_crtime.tv_sec;
843                 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
844         }
845
846         /*
847          * Return the DIO alignment restrictions if requested.  We only return
848          * this information when requested, since on encrypted files it might
849          * take a fair bit of work to get if the file wasn't opened recently.
850          *
851          * f2fs sometimes supports DIO reads but not DIO writes.  STATX_DIOALIGN
852          * cannot represent that, so in that case we report no DIO support.
853          */
854         if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
855                 unsigned int bsize = i_blocksize(inode);
856
857                 stat->result_mask |= STATX_DIOALIGN;
858                 if (!f2fs_force_buffered_io(inode, WRITE)) {
859                         stat->dio_mem_align = bsize;
860                         stat->dio_offset_align = bsize;
861                 }
862         }
863
864         flags = fi->i_flags;
865         if (flags & F2FS_COMPR_FL)
866                 stat->attributes |= STATX_ATTR_COMPRESSED;
867         if (flags & F2FS_APPEND_FL)
868                 stat->attributes |= STATX_ATTR_APPEND;
869         if (IS_ENCRYPTED(inode))
870                 stat->attributes |= STATX_ATTR_ENCRYPTED;
871         if (flags & F2FS_IMMUTABLE_FL)
872                 stat->attributes |= STATX_ATTR_IMMUTABLE;
873         if (flags & F2FS_NODUMP_FL)
874                 stat->attributes |= STATX_ATTR_NODUMP;
875         if (IS_VERITY(inode))
876                 stat->attributes |= STATX_ATTR_VERITY;
877
878         stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
879                                   STATX_ATTR_APPEND |
880                                   STATX_ATTR_ENCRYPTED |
881                                   STATX_ATTR_IMMUTABLE |
882                                   STATX_ATTR_NODUMP |
883                                   STATX_ATTR_VERITY);
884
885         generic_fillattr(idmap, request_mask, inode, stat);
886
887         /* we need to show initial sectors used for inline_data/dentries */
888         if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
889                                         f2fs_has_inline_dentry(inode))
890                 stat->blocks += (stat->size + 511) >> 9;
891
892         return 0;
893 }
894
895 #ifdef CONFIG_F2FS_FS_POSIX_ACL
896 static void __setattr_copy(struct mnt_idmap *idmap,
897                            struct inode *inode, const struct iattr *attr)
898 {
899         unsigned int ia_valid = attr->ia_valid;
900
901         i_uid_update(idmap, attr, inode);
902         i_gid_update(idmap, attr, inode);
903         if (ia_valid & ATTR_ATIME)
904                 inode_set_atime_to_ts(inode, attr->ia_atime);
905         if (ia_valid & ATTR_MTIME)
906                 inode_set_mtime_to_ts(inode, attr->ia_mtime);
907         if (ia_valid & ATTR_CTIME)
908                 inode_set_ctime_to_ts(inode, attr->ia_ctime);
909         if (ia_valid & ATTR_MODE) {
910                 umode_t mode = attr->ia_mode;
911                 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
912
913                 if (!vfsgid_in_group_p(vfsgid) &&
914                     !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
915                         mode &= ~S_ISGID;
916                 set_acl_inode(inode, mode);
917         }
918 }
919 #else
920 #define __setattr_copy setattr_copy
921 #endif
922
923 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
924                  struct iattr *attr)
925 {
926         struct inode *inode = d_inode(dentry);
927         int err;
928
929         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
930                 return -EIO;
931
932         if (unlikely(IS_IMMUTABLE(inode)))
933                 return -EPERM;
934
935         if (unlikely(IS_APPEND(inode) &&
936                         (attr->ia_valid & (ATTR_MODE | ATTR_UID |
937                                   ATTR_GID | ATTR_TIMES_SET))))
938                 return -EPERM;
939
940         if ((attr->ia_valid & ATTR_SIZE) &&
941                 !f2fs_is_compress_backend_ready(inode))
942                 return -EOPNOTSUPP;
943
944         err = setattr_prepare(idmap, dentry, attr);
945         if (err)
946                 return err;
947
948         err = fscrypt_prepare_setattr(dentry, attr);
949         if (err)
950                 return err;
951
952         err = fsverity_prepare_setattr(dentry, attr);
953         if (err)
954                 return err;
955
956         if (is_quota_modification(idmap, inode, attr)) {
957                 err = f2fs_dquot_initialize(inode);
958                 if (err)
959                         return err;
960         }
961         if (i_uid_needs_update(idmap, attr, inode) ||
962             i_gid_needs_update(idmap, attr, inode)) {
963                 f2fs_lock_op(F2FS_I_SB(inode));
964                 err = dquot_transfer(idmap, inode, attr);
965                 if (err) {
966                         set_sbi_flag(F2FS_I_SB(inode),
967                                         SBI_QUOTA_NEED_REPAIR);
968                         f2fs_unlock_op(F2FS_I_SB(inode));
969                         return err;
970                 }
971                 /*
972                  * update uid/gid under lock_op(), so that dquot and inode can
973                  * be updated atomically.
974                  */
975                 i_uid_update(idmap, attr, inode);
976                 i_gid_update(idmap, attr, inode);
977                 f2fs_mark_inode_dirty_sync(inode, true);
978                 f2fs_unlock_op(F2FS_I_SB(inode));
979         }
980
981         if (attr->ia_valid & ATTR_SIZE) {
982                 loff_t old_size = i_size_read(inode);
983
984                 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
985                         /*
986                          * should convert inline inode before i_size_write to
987                          * keep smaller than inline_data size with inline flag.
988                          */
989                         err = f2fs_convert_inline_inode(inode);
990                         if (err)
991                                 return err;
992                 }
993
994                 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
995                 filemap_invalidate_lock(inode->i_mapping);
996
997                 truncate_setsize(inode, attr->ia_size);
998
999                 if (attr->ia_size <= old_size)
1000                         err = f2fs_truncate(inode);
1001                 /*
1002                  * do not trim all blocks after i_size if target size is
1003                  * larger than i_size.
1004                  */
1005                 filemap_invalidate_unlock(inode->i_mapping);
1006                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1007                 if (err)
1008                         return err;
1009
1010                 spin_lock(&F2FS_I(inode)->i_size_lock);
1011                 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1012                 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1013                 spin_unlock(&F2FS_I(inode)->i_size_lock);
1014         }
1015
1016         __setattr_copy(idmap, inode, attr);
1017
1018         if (attr->ia_valid & ATTR_MODE) {
1019                 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1020
1021                 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1022                         if (!err)
1023                                 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1024                         clear_inode_flag(inode, FI_ACL_MODE);
1025                 }
1026         }
1027
1028         /* file size may changed here */
1029         f2fs_mark_inode_dirty_sync(inode, true);
1030
1031         /* inode change will produce dirty node pages flushed by checkpoint */
1032         f2fs_balance_fs(F2FS_I_SB(inode), true);
1033
1034         return err;
1035 }
1036
1037 const struct inode_operations f2fs_file_inode_operations = {
1038         .getattr        = f2fs_getattr,
1039         .setattr        = f2fs_setattr,
1040         .get_inode_acl  = f2fs_get_acl,
1041         .set_acl        = f2fs_set_acl,
1042         .listxattr      = f2fs_listxattr,
1043         .fiemap         = f2fs_fiemap,
1044         .fileattr_get   = f2fs_fileattr_get,
1045         .fileattr_set   = f2fs_fileattr_set,
1046 };
1047
1048 static int fill_zero(struct inode *inode, pgoff_t index,
1049                                         loff_t start, loff_t len)
1050 {
1051         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1052         struct page *page;
1053
1054         if (!len)
1055                 return 0;
1056
1057         f2fs_balance_fs(sbi, true);
1058
1059         f2fs_lock_op(sbi);
1060         page = f2fs_get_new_data_page(inode, NULL, index, false);
1061         f2fs_unlock_op(sbi);
1062
1063         if (IS_ERR(page))
1064                 return PTR_ERR(page);
1065
1066         f2fs_wait_on_page_writeback(page, DATA, true, true);
1067         zero_user(page, start, len);
1068         set_page_dirty(page);
1069         f2fs_put_page(page, 1);
1070         return 0;
1071 }
1072
1073 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1074 {
1075         int err;
1076
1077         while (pg_start < pg_end) {
1078                 struct dnode_of_data dn;
1079                 pgoff_t end_offset, count;
1080
1081                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1082                 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1083                 if (err) {
1084                         if (err == -ENOENT) {
1085                                 pg_start = f2fs_get_next_page_offset(&dn,
1086                                                                 pg_start);
1087                                 continue;
1088                         }
1089                         return err;
1090                 }
1091
1092                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1093                 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1094
1095                 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1096
1097                 f2fs_truncate_data_blocks_range(&dn, count);
1098                 f2fs_put_dnode(&dn);
1099
1100                 pg_start += count;
1101         }
1102         return 0;
1103 }
1104
1105 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1106 {
1107         pgoff_t pg_start, pg_end;
1108         loff_t off_start, off_end;
1109         int ret;
1110
1111         ret = f2fs_convert_inline_inode(inode);
1112         if (ret)
1113                 return ret;
1114
1115         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1116         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1117
1118         off_start = offset & (PAGE_SIZE - 1);
1119         off_end = (offset + len) & (PAGE_SIZE - 1);
1120
1121         if (pg_start == pg_end) {
1122                 ret = fill_zero(inode, pg_start, off_start,
1123                                                 off_end - off_start);
1124                 if (ret)
1125                         return ret;
1126         } else {
1127                 if (off_start) {
1128                         ret = fill_zero(inode, pg_start++, off_start,
1129                                                 PAGE_SIZE - off_start);
1130                         if (ret)
1131                                 return ret;
1132                 }
1133                 if (off_end) {
1134                         ret = fill_zero(inode, pg_end, 0, off_end);
1135                         if (ret)
1136                                 return ret;
1137                 }
1138
1139                 if (pg_start < pg_end) {
1140                         loff_t blk_start, blk_end;
1141                         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1142
1143                         f2fs_balance_fs(sbi, true);
1144
1145                         blk_start = (loff_t)pg_start << PAGE_SHIFT;
1146                         blk_end = (loff_t)pg_end << PAGE_SHIFT;
1147
1148                         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1149                         filemap_invalidate_lock(inode->i_mapping);
1150
1151                         truncate_pagecache_range(inode, blk_start, blk_end - 1);
1152
1153                         f2fs_lock_op(sbi);
1154                         ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1155                         f2fs_unlock_op(sbi);
1156
1157                         filemap_invalidate_unlock(inode->i_mapping);
1158                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1159                 }
1160         }
1161
1162         return ret;
1163 }
1164
1165 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1166                                 int *do_replace, pgoff_t off, pgoff_t len)
1167 {
1168         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1169         struct dnode_of_data dn;
1170         int ret, done, i;
1171
1172 next_dnode:
1173         set_new_dnode(&dn, inode, NULL, NULL, 0);
1174         ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1175         if (ret && ret != -ENOENT) {
1176                 return ret;
1177         } else if (ret == -ENOENT) {
1178                 if (dn.max_level == 0)
1179                         return -ENOENT;
1180                 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1181                                                 dn.ofs_in_node, len);
1182                 blkaddr += done;
1183                 do_replace += done;
1184                 goto next;
1185         }
1186
1187         done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1188                                                         dn.ofs_in_node, len);
1189         for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1190                 *blkaddr = f2fs_data_blkaddr(&dn);
1191
1192                 if (__is_valid_data_blkaddr(*blkaddr) &&
1193                         !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1194                                         DATA_GENERIC_ENHANCE)) {
1195                         f2fs_put_dnode(&dn);
1196                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1197                         return -EFSCORRUPTED;
1198                 }
1199
1200                 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1201
1202                         if (f2fs_lfs_mode(sbi)) {
1203                                 f2fs_put_dnode(&dn);
1204                                 return -EOPNOTSUPP;
1205                         }
1206
1207                         /* do not invalidate this block address */
1208                         f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1209                         *do_replace = 1;
1210                 }
1211         }
1212         f2fs_put_dnode(&dn);
1213 next:
1214         len -= done;
1215         off += done;
1216         if (len)
1217                 goto next_dnode;
1218         return 0;
1219 }
1220
1221 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1222                                 int *do_replace, pgoff_t off, int len)
1223 {
1224         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1225         struct dnode_of_data dn;
1226         int ret, i;
1227
1228         for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1229                 if (*do_replace == 0)
1230                         continue;
1231
1232                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1233                 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1234                 if (ret) {
1235                         dec_valid_block_count(sbi, inode, 1);
1236                         f2fs_invalidate_blocks(sbi, *blkaddr);
1237                 } else {
1238                         f2fs_update_data_blkaddr(&dn, *blkaddr);
1239                 }
1240                 f2fs_put_dnode(&dn);
1241         }
1242         return 0;
1243 }
1244
1245 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1246                         block_t *blkaddr, int *do_replace,
1247                         pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1248 {
1249         struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1250         pgoff_t i = 0;
1251         int ret;
1252
1253         while (i < len) {
1254                 if (blkaddr[i] == NULL_ADDR && !full) {
1255                         i++;
1256                         continue;
1257                 }
1258
1259                 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1260                         struct dnode_of_data dn;
1261                         struct node_info ni;
1262                         size_t new_size;
1263                         pgoff_t ilen;
1264
1265                         set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1266                         ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1267                         if (ret)
1268                                 return ret;
1269
1270                         ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1271                         if (ret) {
1272                                 f2fs_put_dnode(&dn);
1273                                 return ret;
1274                         }
1275
1276                         ilen = min((pgoff_t)
1277                                 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1278                                                 dn.ofs_in_node, len - i);
1279                         do {
1280                                 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1281                                 f2fs_truncate_data_blocks_range(&dn, 1);
1282
1283                                 if (do_replace[i]) {
1284                                         f2fs_i_blocks_write(src_inode,
1285                                                         1, false, false);
1286                                         f2fs_i_blocks_write(dst_inode,
1287                                                         1, true, false);
1288                                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1289                                         blkaddr[i], ni.version, true, false);
1290
1291                                         do_replace[i] = 0;
1292                                 }
1293                                 dn.ofs_in_node++;
1294                                 i++;
1295                                 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1296                                 if (dst_inode->i_size < new_size)
1297                                         f2fs_i_size_write(dst_inode, new_size);
1298                         } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1299
1300                         f2fs_put_dnode(&dn);
1301                 } else {
1302                         struct page *psrc, *pdst;
1303
1304                         psrc = f2fs_get_lock_data_page(src_inode,
1305                                                         src + i, true);
1306                         if (IS_ERR(psrc))
1307                                 return PTR_ERR(psrc);
1308                         pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1309                                                                 true);
1310                         if (IS_ERR(pdst)) {
1311                                 f2fs_put_page(psrc, 1);
1312                                 return PTR_ERR(pdst);
1313                         }
1314                         memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1315                         set_page_dirty(pdst);
1316                         set_page_private_gcing(pdst);
1317                         f2fs_put_page(pdst, 1);
1318                         f2fs_put_page(psrc, 1);
1319
1320                         ret = f2fs_truncate_hole(src_inode,
1321                                                 src + i, src + i + 1);
1322                         if (ret)
1323                                 return ret;
1324                         i++;
1325                 }
1326         }
1327         return 0;
1328 }
1329
1330 static int __exchange_data_block(struct inode *src_inode,
1331                         struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1332                         pgoff_t len, bool full)
1333 {
1334         block_t *src_blkaddr;
1335         int *do_replace;
1336         pgoff_t olen;
1337         int ret;
1338
1339         while (len) {
1340                 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1341
1342                 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1343                                         array_size(olen, sizeof(block_t)),
1344                                         GFP_NOFS);
1345                 if (!src_blkaddr)
1346                         return -ENOMEM;
1347
1348                 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1349                                         array_size(olen, sizeof(int)),
1350                                         GFP_NOFS);
1351                 if (!do_replace) {
1352                         kvfree(src_blkaddr);
1353                         return -ENOMEM;
1354                 }
1355
1356                 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1357                                         do_replace, src, olen);
1358                 if (ret)
1359                         goto roll_back;
1360
1361                 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1362                                         do_replace, src, dst, olen, full);
1363                 if (ret)
1364                         goto roll_back;
1365
1366                 src += olen;
1367                 dst += olen;
1368                 len -= olen;
1369
1370                 kvfree(src_blkaddr);
1371                 kvfree(do_replace);
1372         }
1373         return 0;
1374
1375 roll_back:
1376         __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1377         kvfree(src_blkaddr);
1378         kvfree(do_replace);
1379         return ret;
1380 }
1381
1382 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1383 {
1384         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1385         pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1386         pgoff_t start = offset >> PAGE_SHIFT;
1387         pgoff_t end = (offset + len) >> PAGE_SHIFT;
1388         int ret;
1389
1390         f2fs_balance_fs(sbi, true);
1391
1392         /* avoid gc operation during block exchange */
1393         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1394         filemap_invalidate_lock(inode->i_mapping);
1395
1396         f2fs_lock_op(sbi);
1397         f2fs_drop_extent_tree(inode);
1398         truncate_pagecache(inode, offset);
1399         ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1400         f2fs_unlock_op(sbi);
1401
1402         filemap_invalidate_unlock(inode->i_mapping);
1403         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1404         return ret;
1405 }
1406
1407 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1408 {
1409         loff_t new_size;
1410         int ret;
1411
1412         if (offset + len >= i_size_read(inode))
1413                 return -EINVAL;
1414
1415         /* collapse range should be aligned to block size of f2fs. */
1416         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1417                 return -EINVAL;
1418
1419         ret = f2fs_convert_inline_inode(inode);
1420         if (ret)
1421                 return ret;
1422
1423         /* write out all dirty pages from offset */
1424         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1425         if (ret)
1426                 return ret;
1427
1428         ret = f2fs_do_collapse(inode, offset, len);
1429         if (ret)
1430                 return ret;
1431
1432         /* write out all moved pages, if possible */
1433         filemap_invalidate_lock(inode->i_mapping);
1434         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1435         truncate_pagecache(inode, offset);
1436
1437         new_size = i_size_read(inode) - len;
1438         ret = f2fs_truncate_blocks(inode, new_size, true);
1439         filemap_invalidate_unlock(inode->i_mapping);
1440         if (!ret)
1441                 f2fs_i_size_write(inode, new_size);
1442         return ret;
1443 }
1444
1445 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1446                                                                 pgoff_t end)
1447 {
1448         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1449         pgoff_t index = start;
1450         unsigned int ofs_in_node = dn->ofs_in_node;
1451         blkcnt_t count = 0;
1452         int ret;
1453
1454         for (; index < end; index++, dn->ofs_in_node++) {
1455                 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1456                         count++;
1457         }
1458
1459         dn->ofs_in_node = ofs_in_node;
1460         ret = f2fs_reserve_new_blocks(dn, count);
1461         if (ret)
1462                 return ret;
1463
1464         dn->ofs_in_node = ofs_in_node;
1465         for (index = start; index < end; index++, dn->ofs_in_node++) {
1466                 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1467                 /*
1468                  * f2fs_reserve_new_blocks will not guarantee entire block
1469                  * allocation.
1470                  */
1471                 if (dn->data_blkaddr == NULL_ADDR) {
1472                         ret = -ENOSPC;
1473                         break;
1474                 }
1475
1476                 if (dn->data_blkaddr == NEW_ADDR)
1477                         continue;
1478
1479                 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1480                                         DATA_GENERIC_ENHANCE)) {
1481                         ret = -EFSCORRUPTED;
1482                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1483                         break;
1484                 }
1485
1486                 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1487                 f2fs_set_data_blkaddr(dn, NEW_ADDR);
1488         }
1489
1490         f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1491         f2fs_update_age_extent_cache_range(dn, start, index - start);
1492
1493         return ret;
1494 }
1495
1496 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1497                                                                 int mode)
1498 {
1499         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1500         struct address_space *mapping = inode->i_mapping;
1501         pgoff_t index, pg_start, pg_end;
1502         loff_t new_size = i_size_read(inode);
1503         loff_t off_start, off_end;
1504         int ret = 0;
1505
1506         ret = inode_newsize_ok(inode, (len + offset));
1507         if (ret)
1508                 return ret;
1509
1510         ret = f2fs_convert_inline_inode(inode);
1511         if (ret)
1512                 return ret;
1513
1514         ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1515         if (ret)
1516                 return ret;
1517
1518         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1519         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1520
1521         off_start = offset & (PAGE_SIZE - 1);
1522         off_end = (offset + len) & (PAGE_SIZE - 1);
1523
1524         if (pg_start == pg_end) {
1525                 ret = fill_zero(inode, pg_start, off_start,
1526                                                 off_end - off_start);
1527                 if (ret)
1528                         return ret;
1529
1530                 new_size = max_t(loff_t, new_size, offset + len);
1531         } else {
1532                 if (off_start) {
1533                         ret = fill_zero(inode, pg_start++, off_start,
1534                                                 PAGE_SIZE - off_start);
1535                         if (ret)
1536                                 return ret;
1537
1538                         new_size = max_t(loff_t, new_size,
1539                                         (loff_t)pg_start << PAGE_SHIFT);
1540                 }
1541
1542                 for (index = pg_start; index < pg_end;) {
1543                         struct dnode_of_data dn;
1544                         unsigned int end_offset;
1545                         pgoff_t end;
1546
1547                         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1548                         filemap_invalidate_lock(mapping);
1549
1550                         truncate_pagecache_range(inode,
1551                                 (loff_t)index << PAGE_SHIFT,
1552                                 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1553
1554                         f2fs_lock_op(sbi);
1555
1556                         set_new_dnode(&dn, inode, NULL, NULL, 0);
1557                         ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1558                         if (ret) {
1559                                 f2fs_unlock_op(sbi);
1560                                 filemap_invalidate_unlock(mapping);
1561                                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1562                                 goto out;
1563                         }
1564
1565                         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1566                         end = min(pg_end, end_offset - dn.ofs_in_node + index);
1567
1568                         ret = f2fs_do_zero_range(&dn, index, end);
1569                         f2fs_put_dnode(&dn);
1570
1571                         f2fs_unlock_op(sbi);
1572                         filemap_invalidate_unlock(mapping);
1573                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1574
1575                         f2fs_balance_fs(sbi, dn.node_changed);
1576
1577                         if (ret)
1578                                 goto out;
1579
1580                         index = end;
1581                         new_size = max_t(loff_t, new_size,
1582                                         (loff_t)index << PAGE_SHIFT);
1583                 }
1584
1585                 if (off_end) {
1586                         ret = fill_zero(inode, pg_end, 0, off_end);
1587                         if (ret)
1588                                 goto out;
1589
1590                         new_size = max_t(loff_t, new_size, offset + len);
1591                 }
1592         }
1593
1594 out:
1595         if (new_size > i_size_read(inode)) {
1596                 if (mode & FALLOC_FL_KEEP_SIZE)
1597                         file_set_keep_isize(inode);
1598                 else
1599                         f2fs_i_size_write(inode, new_size);
1600         }
1601         return ret;
1602 }
1603
1604 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1605 {
1606         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1607         struct address_space *mapping = inode->i_mapping;
1608         pgoff_t nr, pg_start, pg_end, delta, idx;
1609         loff_t new_size;
1610         int ret = 0;
1611
1612         new_size = i_size_read(inode) + len;
1613         ret = inode_newsize_ok(inode, new_size);
1614         if (ret)
1615                 return ret;
1616
1617         if (offset >= i_size_read(inode))
1618                 return -EINVAL;
1619
1620         /* insert range should be aligned to block size of f2fs. */
1621         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1622                 return -EINVAL;
1623
1624         ret = f2fs_convert_inline_inode(inode);
1625         if (ret)
1626                 return ret;
1627
1628         f2fs_balance_fs(sbi, true);
1629
1630         filemap_invalidate_lock(mapping);
1631         ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1632         filemap_invalidate_unlock(mapping);
1633         if (ret)
1634                 return ret;
1635
1636         /* write out all dirty pages from offset */
1637         ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1638         if (ret)
1639                 return ret;
1640
1641         pg_start = offset >> PAGE_SHIFT;
1642         pg_end = (offset + len) >> PAGE_SHIFT;
1643         delta = pg_end - pg_start;
1644         idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1645
1646         /* avoid gc operation during block exchange */
1647         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1648         filemap_invalidate_lock(mapping);
1649         truncate_pagecache(inode, offset);
1650
1651         while (!ret && idx > pg_start) {
1652                 nr = idx - pg_start;
1653                 if (nr > delta)
1654                         nr = delta;
1655                 idx -= nr;
1656
1657                 f2fs_lock_op(sbi);
1658                 f2fs_drop_extent_tree(inode);
1659
1660                 ret = __exchange_data_block(inode, inode, idx,
1661                                         idx + delta, nr, false);
1662                 f2fs_unlock_op(sbi);
1663         }
1664         filemap_invalidate_unlock(mapping);
1665         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1666
1667         /* write out all moved pages, if possible */
1668         filemap_invalidate_lock(mapping);
1669         filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1670         truncate_pagecache(inode, offset);
1671         filemap_invalidate_unlock(mapping);
1672
1673         if (!ret)
1674                 f2fs_i_size_write(inode, new_size);
1675         return ret;
1676 }
1677
1678 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1679                                         loff_t len, int mode)
1680 {
1681         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1682         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1683                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1684                         .m_may_create = true };
1685         struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1686                         .init_gc_type = FG_GC,
1687                         .should_migrate_blocks = false,
1688                         .err_gc_skipped = true,
1689                         .nr_free_secs = 0 };
1690         pgoff_t pg_start, pg_end;
1691         loff_t new_size;
1692         loff_t off_end;
1693         block_t expanded = 0;
1694         int err;
1695
1696         err = inode_newsize_ok(inode, (len + offset));
1697         if (err)
1698                 return err;
1699
1700         err = f2fs_convert_inline_inode(inode);
1701         if (err)
1702                 return err;
1703
1704         f2fs_balance_fs(sbi, true);
1705
1706         pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1707         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1708         off_end = (offset + len) & (PAGE_SIZE - 1);
1709
1710         map.m_lblk = pg_start;
1711         map.m_len = pg_end - pg_start;
1712         if (off_end)
1713                 map.m_len++;
1714
1715         if (!map.m_len)
1716                 return 0;
1717
1718         if (f2fs_is_pinned_file(inode)) {
1719                 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1720                 block_t sec_len = roundup(map.m_len, sec_blks);
1721
1722                 map.m_len = sec_blks;
1723 next_alloc:
1724                 if (has_not_enough_free_secs(sbi, 0,
1725                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1726                         f2fs_down_write(&sbi->gc_lock);
1727                         stat_inc_gc_call_count(sbi, FOREGROUND);
1728                         err = f2fs_gc(sbi, &gc_control);
1729                         if (err && err != -ENODATA)
1730                                 goto out_err;
1731                 }
1732
1733                 f2fs_down_write(&sbi->pin_sem);
1734
1735                 f2fs_lock_op(sbi);
1736                 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1737                 f2fs_unlock_op(sbi);
1738
1739                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1740                 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1741                 file_dont_truncate(inode);
1742
1743                 f2fs_up_write(&sbi->pin_sem);
1744
1745                 expanded += map.m_len;
1746                 sec_len -= map.m_len;
1747                 map.m_lblk += map.m_len;
1748                 if (!err && sec_len)
1749                         goto next_alloc;
1750
1751                 map.m_len = expanded;
1752         } else {
1753                 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1754                 expanded = map.m_len;
1755         }
1756 out_err:
1757         if (err) {
1758                 pgoff_t last_off;
1759
1760                 if (!expanded)
1761                         return err;
1762
1763                 last_off = pg_start + expanded - 1;
1764
1765                 /* update new size to the failed position */
1766                 new_size = (last_off == pg_end) ? offset + len :
1767                                         (loff_t)(last_off + 1) << PAGE_SHIFT;
1768         } else {
1769                 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1770         }
1771
1772         if (new_size > i_size_read(inode)) {
1773                 if (mode & FALLOC_FL_KEEP_SIZE)
1774                         file_set_keep_isize(inode);
1775                 else
1776                         f2fs_i_size_write(inode, new_size);
1777         }
1778
1779         return err;
1780 }
1781
1782 static long f2fs_fallocate(struct file *file, int mode,
1783                                 loff_t offset, loff_t len)
1784 {
1785         struct inode *inode = file_inode(file);
1786         long ret = 0;
1787
1788         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1789                 return -EIO;
1790         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1791                 return -ENOSPC;
1792         if (!f2fs_is_compress_backend_ready(inode))
1793                 return -EOPNOTSUPP;
1794
1795         /* f2fs only support ->fallocate for regular file */
1796         if (!S_ISREG(inode->i_mode))
1797                 return -EINVAL;
1798
1799         if (IS_ENCRYPTED(inode) &&
1800                 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1801                 return -EOPNOTSUPP;
1802
1803         /*
1804          * Pinned file should not support partial truncation since the block
1805          * can be used by applications.
1806          */
1807         if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1808                 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1809                         FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1810                 return -EOPNOTSUPP;
1811
1812         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1813                         FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1814                         FALLOC_FL_INSERT_RANGE))
1815                 return -EOPNOTSUPP;
1816
1817         inode_lock(inode);
1818
1819         ret = file_modified(file);
1820         if (ret)
1821                 goto out;
1822
1823         if (mode & FALLOC_FL_PUNCH_HOLE) {
1824                 if (offset >= inode->i_size)
1825                         goto out;
1826
1827                 ret = f2fs_punch_hole(inode, offset, len);
1828         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1829                 ret = f2fs_collapse_range(inode, offset, len);
1830         } else if (mode & FALLOC_FL_ZERO_RANGE) {
1831                 ret = f2fs_zero_range(inode, offset, len, mode);
1832         } else if (mode & FALLOC_FL_INSERT_RANGE) {
1833                 ret = f2fs_insert_range(inode, offset, len);
1834         } else {
1835                 ret = f2fs_expand_inode_data(inode, offset, len, mode);
1836         }
1837
1838         if (!ret) {
1839                 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1840                 f2fs_mark_inode_dirty_sync(inode, false);
1841                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1842         }
1843
1844 out:
1845         inode_unlock(inode);
1846
1847         trace_f2fs_fallocate(inode, mode, offset, len, ret);
1848         return ret;
1849 }
1850
1851 static int f2fs_release_file(struct inode *inode, struct file *filp)
1852 {
1853         /*
1854          * f2fs_release_file is called at every close calls. So we should
1855          * not drop any inmemory pages by close called by other process.
1856          */
1857         if (!(filp->f_mode & FMODE_WRITE) ||
1858                         atomic_read(&inode->i_writecount) != 1)
1859                 return 0;
1860
1861         inode_lock(inode);
1862         f2fs_abort_atomic_write(inode, true);
1863         inode_unlock(inode);
1864
1865         return 0;
1866 }
1867
1868 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1869 {
1870         struct inode *inode = file_inode(file);
1871
1872         /*
1873          * If the process doing a transaction is crashed, we should do
1874          * roll-back. Otherwise, other reader/write can see corrupted database
1875          * until all the writers close its file. Since this should be done
1876          * before dropping file lock, it needs to do in ->flush.
1877          */
1878         if (F2FS_I(inode)->atomic_write_task == current &&
1879                                 (current->flags & PF_EXITING)) {
1880                 inode_lock(inode);
1881                 f2fs_abort_atomic_write(inode, true);
1882                 inode_unlock(inode);
1883         }
1884
1885         return 0;
1886 }
1887
1888 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1889 {
1890         struct f2fs_inode_info *fi = F2FS_I(inode);
1891         u32 masked_flags = fi->i_flags & mask;
1892
1893         /* mask can be shrunk by flags_valid selector */
1894         iflags &= mask;
1895
1896         /* Is it quota file? Do not allow user to mess with it */
1897         if (IS_NOQUOTA(inode))
1898                 return -EPERM;
1899
1900         if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1901                 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1902                         return -EOPNOTSUPP;
1903                 if (!f2fs_empty_dir(inode))
1904                         return -ENOTEMPTY;
1905         }
1906
1907         if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1908                 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1909                         return -EOPNOTSUPP;
1910                 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1911                         return -EINVAL;
1912         }
1913
1914         if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1915                 if (masked_flags & F2FS_COMPR_FL) {
1916                         if (!f2fs_disable_compressed_file(inode))
1917                                 return -EINVAL;
1918                 } else {
1919                         /* try to convert inline_data to support compression */
1920                         int err = f2fs_convert_inline_inode(inode);
1921                         if (err)
1922                                 return err;
1923
1924                         f2fs_down_write(&F2FS_I(inode)->i_sem);
1925                         if (!f2fs_may_compress(inode) ||
1926                                         (S_ISREG(inode->i_mode) &&
1927                                         F2FS_HAS_BLOCKS(inode))) {
1928                                 f2fs_up_write(&F2FS_I(inode)->i_sem);
1929                                 return -EINVAL;
1930                         }
1931                         err = set_compress_context(inode);
1932                         f2fs_up_write(&F2FS_I(inode)->i_sem);
1933
1934                         if (err)
1935                                 return err;
1936                 }
1937         }
1938
1939         fi->i_flags = iflags | (fi->i_flags & ~mask);
1940         f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1941                                         (fi->i_flags & F2FS_NOCOMP_FL));
1942
1943         if (fi->i_flags & F2FS_PROJINHERIT_FL)
1944                 set_inode_flag(inode, FI_PROJ_INHERIT);
1945         else
1946                 clear_inode_flag(inode, FI_PROJ_INHERIT);
1947
1948         inode_set_ctime_current(inode);
1949         f2fs_set_inode_flags(inode);
1950         f2fs_mark_inode_dirty_sync(inode, true);
1951         return 0;
1952 }
1953
1954 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1955
1956 /*
1957  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1958  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1959  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1960  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1961  *
1962  * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1963  * FS_IOC_FSSETXATTR is done by the VFS.
1964  */
1965
1966 static const struct {
1967         u32 iflag;
1968         u32 fsflag;
1969 } f2fs_fsflags_map[] = {
1970         { F2FS_COMPR_FL,        FS_COMPR_FL },
1971         { F2FS_SYNC_FL,         FS_SYNC_FL },
1972         { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1973         { F2FS_APPEND_FL,       FS_APPEND_FL },
1974         { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1975         { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1976         { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1977         { F2FS_INDEX_FL,        FS_INDEX_FL },
1978         { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1979         { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1980         { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1981 };
1982
1983 #define F2FS_GETTABLE_FS_FL (           \
1984                 FS_COMPR_FL |           \
1985                 FS_SYNC_FL |            \
1986                 FS_IMMUTABLE_FL |       \
1987                 FS_APPEND_FL |          \
1988                 FS_NODUMP_FL |          \
1989                 FS_NOATIME_FL |         \
1990                 FS_NOCOMP_FL |          \
1991                 FS_INDEX_FL |           \
1992                 FS_DIRSYNC_FL |         \
1993                 FS_PROJINHERIT_FL |     \
1994                 FS_ENCRYPT_FL |         \
1995                 FS_INLINE_DATA_FL |     \
1996                 FS_NOCOW_FL |           \
1997                 FS_VERITY_FL |          \
1998                 FS_CASEFOLD_FL)
1999
2000 #define F2FS_SETTABLE_FS_FL (           \
2001                 FS_COMPR_FL |           \
2002                 FS_SYNC_FL |            \
2003                 FS_IMMUTABLE_FL |       \
2004                 FS_APPEND_FL |          \
2005                 FS_NODUMP_FL |          \
2006                 FS_NOATIME_FL |         \
2007                 FS_NOCOMP_FL |          \
2008                 FS_DIRSYNC_FL |         \
2009                 FS_PROJINHERIT_FL |     \
2010                 FS_CASEFOLD_FL)
2011
2012 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2013 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2014 {
2015         u32 fsflags = 0;
2016         int i;
2017
2018         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2019                 if (iflags & f2fs_fsflags_map[i].iflag)
2020                         fsflags |= f2fs_fsflags_map[i].fsflag;
2021
2022         return fsflags;
2023 }
2024
2025 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2026 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2027 {
2028         u32 iflags = 0;
2029         int i;
2030
2031         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2032                 if (fsflags & f2fs_fsflags_map[i].fsflag)
2033                         iflags |= f2fs_fsflags_map[i].iflag;
2034
2035         return iflags;
2036 }
2037
2038 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2039 {
2040         struct inode *inode = file_inode(filp);
2041
2042         return put_user(inode->i_generation, (int __user *)arg);
2043 }
2044
2045 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2046 {
2047         struct inode *inode = file_inode(filp);
2048         struct mnt_idmap *idmap = file_mnt_idmap(filp);
2049         struct f2fs_inode_info *fi = F2FS_I(inode);
2050         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2051         struct inode *pinode;
2052         loff_t isize;
2053         int ret;
2054
2055         if (!inode_owner_or_capable(idmap, inode))
2056                 return -EACCES;
2057
2058         if (!S_ISREG(inode->i_mode))
2059                 return -EINVAL;
2060
2061         if (filp->f_flags & O_DIRECT)
2062                 return -EINVAL;
2063
2064         ret = mnt_want_write_file(filp);
2065         if (ret)
2066                 return ret;
2067
2068         inode_lock(inode);
2069
2070         if (!f2fs_disable_compressed_file(inode)) {
2071                 ret = -EINVAL;
2072                 goto out;
2073         }
2074
2075         if (f2fs_is_atomic_file(inode))
2076                 goto out;
2077
2078         ret = f2fs_convert_inline_inode(inode);
2079         if (ret)
2080                 goto out;
2081
2082         f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2083
2084         /*
2085          * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2086          * f2fs_is_atomic_file.
2087          */
2088         if (get_dirty_pages(inode))
2089                 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2090                           inode->i_ino, get_dirty_pages(inode));
2091         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2092         if (ret) {
2093                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2094                 goto out;
2095         }
2096
2097         /* Check if the inode already has a COW inode */
2098         if (fi->cow_inode == NULL) {
2099                 /* Create a COW inode for atomic write */
2100                 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2101                 if (IS_ERR(pinode)) {
2102                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2103                         ret = PTR_ERR(pinode);
2104                         goto out;
2105                 }
2106
2107                 ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
2108                 iput(pinode);
2109                 if (ret) {
2110                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2111                         goto out;
2112                 }
2113
2114                 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2115                 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2116         } else {
2117                 /* Reuse the already created COW inode */
2118                 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2119                 if (ret) {
2120                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2121                         goto out;
2122                 }
2123         }
2124
2125         f2fs_write_inode(inode, NULL);
2126
2127         stat_inc_atomic_inode(inode);
2128
2129         set_inode_flag(inode, FI_ATOMIC_FILE);
2130
2131         isize = i_size_read(inode);
2132         fi->original_i_size = isize;
2133         if (truncate) {
2134                 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2135                 truncate_inode_pages_final(inode->i_mapping);
2136                 f2fs_i_size_write(inode, 0);
2137                 isize = 0;
2138         }
2139         f2fs_i_size_write(fi->cow_inode, isize);
2140
2141         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2142
2143         f2fs_update_time(sbi, REQ_TIME);
2144         fi->atomic_write_task = current;
2145         stat_update_max_atomic_write(inode);
2146         fi->atomic_write_cnt = 0;
2147 out:
2148         inode_unlock(inode);
2149         mnt_drop_write_file(filp);
2150         return ret;
2151 }
2152
2153 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2154 {
2155         struct inode *inode = file_inode(filp);
2156         struct mnt_idmap *idmap = file_mnt_idmap(filp);
2157         int ret;
2158
2159         if (!inode_owner_or_capable(idmap, inode))
2160                 return -EACCES;
2161
2162         ret = mnt_want_write_file(filp);
2163         if (ret)
2164                 return ret;
2165
2166         f2fs_balance_fs(F2FS_I_SB(inode), true);
2167
2168         inode_lock(inode);
2169
2170         if (f2fs_is_atomic_file(inode)) {
2171                 ret = f2fs_commit_atomic_write(inode);
2172                 if (!ret)
2173                         ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2174
2175                 f2fs_abort_atomic_write(inode, ret);
2176         } else {
2177                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2178         }
2179
2180         inode_unlock(inode);
2181         mnt_drop_write_file(filp);
2182         return ret;
2183 }
2184
2185 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2186 {
2187         struct inode *inode = file_inode(filp);
2188         struct mnt_idmap *idmap = file_mnt_idmap(filp);
2189         int ret;
2190
2191         if (!inode_owner_or_capable(idmap, inode))
2192                 return -EACCES;
2193
2194         ret = mnt_want_write_file(filp);
2195         if (ret)
2196                 return ret;
2197
2198         inode_lock(inode);
2199
2200         f2fs_abort_atomic_write(inode, true);
2201
2202         inode_unlock(inode);
2203
2204         mnt_drop_write_file(filp);
2205         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2206         return ret;
2207 }
2208
2209 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2210 {
2211         struct inode *inode = file_inode(filp);
2212         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2213         struct super_block *sb = sbi->sb;
2214         __u32 in;
2215         int ret = 0;
2216
2217         if (!capable(CAP_SYS_ADMIN))
2218                 return -EPERM;
2219
2220         if (get_user(in, (__u32 __user *)arg))
2221                 return -EFAULT;
2222
2223         if (in != F2FS_GOING_DOWN_FULLSYNC) {
2224                 ret = mnt_want_write_file(filp);
2225                 if (ret) {
2226                         if (ret == -EROFS) {
2227                                 ret = 0;
2228                                 f2fs_stop_checkpoint(sbi, false,
2229                                                 STOP_CP_REASON_SHUTDOWN);
2230                                 trace_f2fs_shutdown(sbi, in, ret);
2231                         }
2232                         return ret;
2233                 }
2234         }
2235
2236         switch (in) {
2237         case F2FS_GOING_DOWN_FULLSYNC:
2238                 ret = bdev_freeze(sb->s_bdev);
2239                 if (ret)
2240                         goto out;
2241                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2242                 bdev_thaw(sb->s_bdev);
2243                 break;
2244         case F2FS_GOING_DOWN_METASYNC:
2245                 /* do checkpoint only */
2246                 ret = f2fs_sync_fs(sb, 1);
2247                 if (ret)
2248                         goto out;
2249                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2250                 break;
2251         case F2FS_GOING_DOWN_NOSYNC:
2252                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2253                 break;
2254         case F2FS_GOING_DOWN_METAFLUSH:
2255                 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2256                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2257                 break;
2258         case F2FS_GOING_DOWN_NEED_FSCK:
2259                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2260                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2261                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2262                 /* do checkpoint only */
2263                 ret = f2fs_sync_fs(sb, 1);
2264                 goto out;
2265         default:
2266                 ret = -EINVAL;
2267                 goto out;
2268         }
2269
2270         f2fs_stop_gc_thread(sbi);
2271         f2fs_stop_discard_thread(sbi);
2272
2273         f2fs_drop_discard_cmd(sbi);
2274         clear_opt(sbi, DISCARD);
2275
2276         f2fs_update_time(sbi, REQ_TIME);
2277 out:
2278         if (in != F2FS_GOING_DOWN_FULLSYNC)
2279                 mnt_drop_write_file(filp);
2280
2281         trace_f2fs_shutdown(sbi, in, ret);
2282
2283         return ret;
2284 }
2285
2286 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2287 {
2288         struct inode *inode = file_inode(filp);
2289         struct super_block *sb = inode->i_sb;
2290         struct fstrim_range range;
2291         int ret;
2292
2293         if (!capable(CAP_SYS_ADMIN))
2294                 return -EPERM;
2295
2296         if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2297                 return -EOPNOTSUPP;
2298
2299         if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2300                                 sizeof(range)))
2301                 return -EFAULT;
2302
2303         ret = mnt_want_write_file(filp);
2304         if (ret)
2305                 return ret;
2306
2307         range.minlen = max((unsigned int)range.minlen,
2308                            bdev_discard_granularity(sb->s_bdev));
2309         ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2310         mnt_drop_write_file(filp);
2311         if (ret < 0)
2312                 return ret;
2313
2314         if (copy_to_user((struct fstrim_range __user *)arg, &range,
2315                                 sizeof(range)))
2316                 return -EFAULT;
2317         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2318         return 0;
2319 }
2320
2321 static bool uuid_is_nonzero(__u8 u[16])
2322 {
2323         int i;
2324
2325         for (i = 0; i < 16; i++)
2326                 if (u[i])
2327                         return true;
2328         return false;
2329 }
2330
2331 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2332 {
2333         struct inode *inode = file_inode(filp);
2334
2335         if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2336                 return -EOPNOTSUPP;
2337
2338         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2339
2340         return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2341 }
2342
2343 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2344 {
2345         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2346                 return -EOPNOTSUPP;
2347         return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2348 }
2349
2350 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2351 {
2352         struct inode *inode = file_inode(filp);
2353         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2354         u8 encrypt_pw_salt[16];
2355         int err;
2356
2357         if (!f2fs_sb_has_encrypt(sbi))
2358                 return -EOPNOTSUPP;
2359
2360         err = mnt_want_write_file(filp);
2361         if (err)
2362                 return err;
2363
2364         f2fs_down_write(&sbi->sb_lock);
2365
2366         if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2367                 goto got_it;
2368
2369         /* update superblock with uuid */
2370         generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2371
2372         err = f2fs_commit_super(sbi, false);
2373         if (err) {
2374                 /* undo new data */
2375                 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2376                 goto out_err;
2377         }
2378 got_it:
2379         memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2380 out_err:
2381         f2fs_up_write(&sbi->sb_lock);
2382         mnt_drop_write_file(filp);
2383
2384         if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2385                 err = -EFAULT;
2386
2387         return err;
2388 }
2389
2390 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2391                                              unsigned long arg)
2392 {
2393         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2394                 return -EOPNOTSUPP;
2395
2396         return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2397 }
2398
2399 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2400 {
2401         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2402                 return -EOPNOTSUPP;
2403
2404         return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2405 }
2406
2407 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2408 {
2409         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2410                 return -EOPNOTSUPP;
2411
2412         return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2413 }
2414
2415 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2416                                                     unsigned long arg)
2417 {
2418         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2419                 return -EOPNOTSUPP;
2420
2421         return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2422 }
2423
2424 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2425                                               unsigned long arg)
2426 {
2427         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2428                 return -EOPNOTSUPP;
2429
2430         return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2431 }
2432
2433 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2434 {
2435         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2436                 return -EOPNOTSUPP;
2437
2438         return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2439 }
2440
2441 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2442 {
2443         struct inode *inode = file_inode(filp);
2444         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2445         struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2446                         .no_bg_gc = false,
2447                         .should_migrate_blocks = false,
2448                         .nr_free_secs = 0 };
2449         __u32 sync;
2450         int ret;
2451
2452         if (!capable(CAP_SYS_ADMIN))
2453                 return -EPERM;
2454
2455         if (get_user(sync, (__u32 __user *)arg))
2456                 return -EFAULT;
2457
2458         if (f2fs_readonly(sbi->sb))
2459                 return -EROFS;
2460
2461         ret = mnt_want_write_file(filp);
2462         if (ret)
2463                 return ret;
2464
2465         if (!sync) {
2466                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2467                         ret = -EBUSY;
2468                         goto out;
2469                 }
2470         } else {
2471                 f2fs_down_write(&sbi->gc_lock);
2472         }
2473
2474         gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2475         gc_control.err_gc_skipped = sync;
2476         stat_inc_gc_call_count(sbi, FOREGROUND);
2477         ret = f2fs_gc(sbi, &gc_control);
2478 out:
2479         mnt_drop_write_file(filp);
2480         return ret;
2481 }
2482
2483 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2484 {
2485         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2486         struct f2fs_gc_control gc_control = {
2487                         .init_gc_type = range->sync ? FG_GC : BG_GC,
2488                         .no_bg_gc = false,
2489                         .should_migrate_blocks = false,
2490                         .err_gc_skipped = range->sync,
2491                         .nr_free_secs = 0 };
2492         u64 end;
2493         int ret;
2494
2495         if (!capable(CAP_SYS_ADMIN))
2496                 return -EPERM;
2497         if (f2fs_readonly(sbi->sb))
2498                 return -EROFS;
2499
2500         end = range->start + range->len;
2501         if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2502                                         end >= MAX_BLKADDR(sbi))
2503                 return -EINVAL;
2504
2505         ret = mnt_want_write_file(filp);
2506         if (ret)
2507                 return ret;
2508
2509 do_more:
2510         if (!range->sync) {
2511                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2512                         ret = -EBUSY;
2513                         goto out;
2514                 }
2515         } else {
2516                 f2fs_down_write(&sbi->gc_lock);
2517         }
2518
2519         gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2520         stat_inc_gc_call_count(sbi, FOREGROUND);
2521         ret = f2fs_gc(sbi, &gc_control);
2522         if (ret) {
2523                 if (ret == -EBUSY)
2524                         ret = -EAGAIN;
2525                 goto out;
2526         }
2527         range->start += CAP_BLKS_PER_SEC(sbi);
2528         if (range->start <= end)
2529                 goto do_more;
2530 out:
2531         mnt_drop_write_file(filp);
2532         return ret;
2533 }
2534
2535 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2536 {
2537         struct f2fs_gc_range range;
2538
2539         if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2540                                                         sizeof(range)))
2541                 return -EFAULT;
2542         return __f2fs_ioc_gc_range(filp, &range);
2543 }
2544
2545 static int f2fs_ioc_write_checkpoint(struct file *filp)
2546 {
2547         struct inode *inode = file_inode(filp);
2548         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2549         int ret;
2550
2551         if (!capable(CAP_SYS_ADMIN))
2552                 return -EPERM;
2553
2554         if (f2fs_readonly(sbi->sb))
2555                 return -EROFS;
2556
2557         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2558                 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2559                 return -EINVAL;
2560         }
2561
2562         ret = mnt_want_write_file(filp);
2563         if (ret)
2564                 return ret;
2565
2566         ret = f2fs_sync_fs(sbi->sb, 1);
2567
2568         mnt_drop_write_file(filp);
2569         return ret;
2570 }
2571
2572 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2573                                         struct file *filp,
2574                                         struct f2fs_defragment *range)
2575 {
2576         struct inode *inode = file_inode(filp);
2577         struct f2fs_map_blocks map = { .m_next_extent = NULL,
2578                                         .m_seg_type = NO_CHECK_TYPE,
2579                                         .m_may_create = false };
2580         struct extent_info ei = {};
2581         pgoff_t pg_start, pg_end, next_pgofs;
2582         unsigned int blk_per_seg = sbi->blocks_per_seg;
2583         unsigned int total = 0, sec_num;
2584         block_t blk_end = 0;
2585         bool fragmented = false;
2586         int err;
2587
2588         pg_start = range->start >> PAGE_SHIFT;
2589         pg_end = (range->start + range->len) >> PAGE_SHIFT;
2590
2591         f2fs_balance_fs(sbi, true);
2592
2593         inode_lock(inode);
2594
2595         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
2596                 err = -EINVAL;
2597                 goto unlock_out;
2598         }
2599
2600         /* if in-place-update policy is enabled, don't waste time here */
2601         set_inode_flag(inode, FI_OPU_WRITE);
2602         if (f2fs_should_update_inplace(inode, NULL)) {
2603                 err = -EINVAL;
2604                 goto out;
2605         }
2606
2607         /* writeback all dirty pages in the range */
2608         err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2609                                                 range->start + range->len - 1);
2610         if (err)
2611                 goto out;
2612
2613         /*
2614          * lookup mapping info in extent cache, skip defragmenting if physical
2615          * block addresses are continuous.
2616          */
2617         if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2618                 if (ei.fofs + ei.len >= pg_end)
2619                         goto out;
2620         }
2621
2622         map.m_lblk = pg_start;
2623         map.m_next_pgofs = &next_pgofs;
2624
2625         /*
2626          * lookup mapping info in dnode page cache, skip defragmenting if all
2627          * physical block addresses are continuous even if there are hole(s)
2628          * in logical blocks.
2629          */
2630         while (map.m_lblk < pg_end) {
2631                 map.m_len = pg_end - map.m_lblk;
2632                 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2633                 if (err)
2634                         goto out;
2635
2636                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2637                         map.m_lblk = next_pgofs;
2638                         continue;
2639                 }
2640
2641                 if (blk_end && blk_end != map.m_pblk)
2642                         fragmented = true;
2643
2644                 /* record total count of block that we're going to move */
2645                 total += map.m_len;
2646
2647                 blk_end = map.m_pblk + map.m_len;
2648
2649                 map.m_lblk += map.m_len;
2650         }
2651
2652         if (!fragmented) {
2653                 total = 0;
2654                 goto out;
2655         }
2656
2657         sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2658
2659         /*
2660          * make sure there are enough free section for LFS allocation, this can
2661          * avoid defragment running in SSR mode when free section are allocated
2662          * intensively
2663          */
2664         if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2665                 err = -EAGAIN;
2666                 goto out;
2667         }
2668
2669         map.m_lblk = pg_start;
2670         map.m_len = pg_end - pg_start;
2671         total = 0;
2672
2673         while (map.m_lblk < pg_end) {
2674                 pgoff_t idx;
2675                 int cnt = 0;
2676
2677 do_map:
2678                 map.m_len = pg_end - map.m_lblk;
2679                 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2680                 if (err)
2681                         goto clear_out;
2682
2683                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2684                         map.m_lblk = next_pgofs;
2685                         goto check;
2686                 }
2687
2688                 set_inode_flag(inode, FI_SKIP_WRITES);
2689
2690                 idx = map.m_lblk;
2691                 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2692                         struct page *page;
2693
2694                         page = f2fs_get_lock_data_page(inode, idx, true);
2695                         if (IS_ERR(page)) {
2696                                 err = PTR_ERR(page);
2697                                 goto clear_out;
2698                         }
2699
2700                         set_page_dirty(page);
2701                         set_page_private_gcing(page);
2702                         f2fs_put_page(page, 1);
2703
2704                         idx++;
2705                         cnt++;
2706                         total++;
2707                 }
2708
2709                 map.m_lblk = idx;
2710 check:
2711                 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2712                         goto do_map;
2713
2714                 clear_inode_flag(inode, FI_SKIP_WRITES);
2715
2716                 err = filemap_fdatawrite(inode->i_mapping);
2717                 if (err)
2718                         goto out;
2719         }
2720 clear_out:
2721         clear_inode_flag(inode, FI_SKIP_WRITES);
2722 out:
2723         clear_inode_flag(inode, FI_OPU_WRITE);
2724 unlock_out:
2725         inode_unlock(inode);
2726         if (!err)
2727                 range->len = (u64)total << PAGE_SHIFT;
2728         return err;
2729 }
2730
2731 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2732 {
2733         struct inode *inode = file_inode(filp);
2734         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2735         struct f2fs_defragment range;
2736         int err;
2737
2738         if (!capable(CAP_SYS_ADMIN))
2739                 return -EPERM;
2740
2741         if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2742                 return -EINVAL;
2743
2744         if (f2fs_readonly(sbi->sb))
2745                 return -EROFS;
2746
2747         if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2748                                                         sizeof(range)))
2749                 return -EFAULT;
2750
2751         /* verify alignment of offset & size */
2752         if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2753                 return -EINVAL;
2754
2755         if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2756                                         max_file_blocks(inode)))
2757                 return -EINVAL;
2758
2759         err = mnt_want_write_file(filp);
2760         if (err)
2761                 return err;
2762
2763         err = f2fs_defragment_range(sbi, filp, &range);
2764         mnt_drop_write_file(filp);
2765
2766         f2fs_update_time(sbi, REQ_TIME);
2767         if (err < 0)
2768                 return err;
2769
2770         if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2771                                                         sizeof(range)))
2772                 return -EFAULT;
2773
2774         return 0;
2775 }
2776
2777 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2778                         struct file *file_out, loff_t pos_out, size_t len)
2779 {
2780         struct inode *src = file_inode(file_in);
2781         struct inode *dst = file_inode(file_out);
2782         struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2783         size_t olen = len, dst_max_i_size = 0;
2784         size_t dst_osize;
2785         int ret;
2786
2787         if (file_in->f_path.mnt != file_out->f_path.mnt ||
2788                                 src->i_sb != dst->i_sb)
2789                 return -EXDEV;
2790
2791         if (unlikely(f2fs_readonly(src->i_sb)))
2792                 return -EROFS;
2793
2794         if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2795                 return -EINVAL;
2796
2797         if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2798                 return -EOPNOTSUPP;
2799
2800         if (pos_out < 0 || pos_in < 0)
2801                 return -EINVAL;
2802
2803         if (src == dst) {
2804                 if (pos_in == pos_out)
2805                         return 0;
2806                 if (pos_out > pos_in && pos_out < pos_in + len)
2807                         return -EINVAL;
2808         }
2809
2810         inode_lock(src);
2811         if (src != dst) {
2812                 ret = -EBUSY;
2813                 if (!inode_trylock(dst))
2814                         goto out;
2815         }
2816
2817         if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2818                 ret = -EOPNOTSUPP;
2819                 goto out_unlock;
2820         }
2821
2822         ret = -EINVAL;
2823         if (pos_in + len > src->i_size || pos_in + len < pos_in)
2824                 goto out_unlock;
2825         if (len == 0)
2826                 olen = len = src->i_size - pos_in;
2827         if (pos_in + len == src->i_size)
2828                 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2829         if (len == 0) {
2830                 ret = 0;
2831                 goto out_unlock;
2832         }
2833
2834         dst_osize = dst->i_size;
2835         if (pos_out + olen > dst->i_size)
2836                 dst_max_i_size = pos_out + olen;
2837
2838         /* verify the end result is block aligned */
2839         if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2840                         !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2841                         !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2842                 goto out_unlock;
2843
2844         ret = f2fs_convert_inline_inode(src);
2845         if (ret)
2846                 goto out_unlock;
2847
2848         ret = f2fs_convert_inline_inode(dst);
2849         if (ret)
2850                 goto out_unlock;
2851
2852         /* write out all dirty pages from offset */
2853         ret = filemap_write_and_wait_range(src->i_mapping,
2854                                         pos_in, pos_in + len);
2855         if (ret)
2856                 goto out_unlock;
2857
2858         ret = filemap_write_and_wait_range(dst->i_mapping,
2859                                         pos_out, pos_out + len);
2860         if (ret)
2861                 goto out_unlock;
2862
2863         f2fs_balance_fs(sbi, true);
2864
2865         f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2866         if (src != dst) {
2867                 ret = -EBUSY;
2868                 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2869                         goto out_src;
2870         }
2871
2872         f2fs_lock_op(sbi);
2873         ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2874                                 pos_out >> F2FS_BLKSIZE_BITS,
2875                                 len >> F2FS_BLKSIZE_BITS, false);
2876
2877         if (!ret) {
2878                 if (dst_max_i_size)
2879                         f2fs_i_size_write(dst, dst_max_i_size);
2880                 else if (dst_osize != dst->i_size)
2881                         f2fs_i_size_write(dst, dst_osize);
2882         }
2883         f2fs_unlock_op(sbi);
2884
2885         if (src != dst)
2886                 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2887 out_src:
2888         f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2889         if (ret)
2890                 goto out_unlock;
2891
2892         inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
2893         f2fs_mark_inode_dirty_sync(src, false);
2894         if (src != dst) {
2895                 inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
2896                 f2fs_mark_inode_dirty_sync(dst, false);
2897         }
2898         f2fs_update_time(sbi, REQ_TIME);
2899
2900 out_unlock:
2901         if (src != dst)
2902                 inode_unlock(dst);
2903 out:
2904         inode_unlock(src);
2905         return ret;
2906 }
2907
2908 static int __f2fs_ioc_move_range(struct file *filp,
2909                                 struct f2fs_move_range *range)
2910 {
2911         struct fd dst;
2912         int err;
2913
2914         if (!(filp->f_mode & FMODE_READ) ||
2915                         !(filp->f_mode & FMODE_WRITE))
2916                 return -EBADF;
2917
2918         dst = fdget(range->dst_fd);
2919         if (!dst.file)
2920                 return -EBADF;
2921
2922         if (!(dst.file->f_mode & FMODE_WRITE)) {
2923                 err = -EBADF;
2924                 goto err_out;
2925         }
2926
2927         err = mnt_want_write_file(filp);
2928         if (err)
2929                 goto err_out;
2930
2931         err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2932                                         range->pos_out, range->len);
2933
2934         mnt_drop_write_file(filp);
2935 err_out:
2936         fdput(dst);
2937         return err;
2938 }
2939
2940 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2941 {
2942         struct f2fs_move_range range;
2943
2944         if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2945                                                         sizeof(range)))
2946                 return -EFAULT;
2947         return __f2fs_ioc_move_range(filp, &range);
2948 }
2949
2950 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2951 {
2952         struct inode *inode = file_inode(filp);
2953         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2954         struct sit_info *sm = SIT_I(sbi);
2955         unsigned int start_segno = 0, end_segno = 0;
2956         unsigned int dev_start_segno = 0, dev_end_segno = 0;
2957         struct f2fs_flush_device range;
2958         struct f2fs_gc_control gc_control = {
2959                         .init_gc_type = FG_GC,
2960                         .should_migrate_blocks = true,
2961                         .err_gc_skipped = true,
2962                         .nr_free_secs = 0 };
2963         int ret;
2964
2965         if (!capable(CAP_SYS_ADMIN))
2966                 return -EPERM;
2967
2968         if (f2fs_readonly(sbi->sb))
2969                 return -EROFS;
2970
2971         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2972                 return -EINVAL;
2973
2974         if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2975                                                         sizeof(range)))
2976                 return -EFAULT;
2977
2978         if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2979                         __is_large_section(sbi)) {
2980                 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2981                           range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2982                 return -EINVAL;
2983         }
2984
2985         ret = mnt_want_write_file(filp);
2986         if (ret)
2987                 return ret;
2988
2989         if (range.dev_num != 0)
2990                 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2991         dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2992
2993         start_segno = sm->last_victim[FLUSH_DEVICE];
2994         if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2995                 start_segno = dev_start_segno;
2996         end_segno = min(start_segno + range.segments, dev_end_segno);
2997
2998         while (start_segno < end_segno) {
2999                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3000                         ret = -EBUSY;
3001                         goto out;
3002                 }
3003                 sm->last_victim[GC_CB] = end_segno + 1;
3004                 sm->last_victim[GC_GREEDY] = end_segno + 1;
3005                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3006
3007                 gc_control.victim_segno = start_segno;
3008                 stat_inc_gc_call_count(sbi, FOREGROUND);
3009                 ret = f2fs_gc(sbi, &gc_control);
3010                 if (ret == -EAGAIN)
3011                         ret = 0;
3012                 else if (ret < 0)
3013                         break;
3014                 start_segno++;
3015         }
3016 out:
3017         mnt_drop_write_file(filp);
3018         return ret;
3019 }
3020
3021 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3022 {
3023         struct inode *inode = file_inode(filp);
3024         u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3025
3026         /* Must validate to set it with SQLite behavior in Android. */
3027         sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3028
3029         return put_user(sb_feature, (u32 __user *)arg);
3030 }
3031
3032 #ifdef CONFIG_QUOTA
3033 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3034 {
3035         struct dquot *transfer_to[MAXQUOTAS] = {};
3036         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3037         struct super_block *sb = sbi->sb;
3038         int err;
3039
3040         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3041         if (IS_ERR(transfer_to[PRJQUOTA]))
3042                 return PTR_ERR(transfer_to[PRJQUOTA]);
3043
3044         err = __dquot_transfer(inode, transfer_to);
3045         if (err)
3046                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3047         dqput(transfer_to[PRJQUOTA]);
3048         return err;
3049 }
3050
3051 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3052 {
3053         struct f2fs_inode_info *fi = F2FS_I(inode);
3054         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3055         struct f2fs_inode *ri = NULL;
3056         kprojid_t kprojid;
3057         int err;
3058
3059         if (!f2fs_sb_has_project_quota(sbi)) {
3060                 if (projid != F2FS_DEF_PROJID)
3061                         return -EOPNOTSUPP;
3062                 else
3063                         return 0;
3064         }
3065
3066         if (!f2fs_has_extra_attr(inode))
3067                 return -EOPNOTSUPP;
3068
3069         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3070
3071         if (projid_eq(kprojid, fi->i_projid))
3072                 return 0;
3073
3074         err = -EPERM;
3075         /* Is it quota file? Do not allow user to mess with it */
3076         if (IS_NOQUOTA(inode))
3077                 return err;
3078
3079         if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3080                 return -EOVERFLOW;
3081
3082         err = f2fs_dquot_initialize(inode);
3083         if (err)
3084                 return err;
3085
3086         f2fs_lock_op(sbi);
3087         err = f2fs_transfer_project_quota(inode, kprojid);
3088         if (err)
3089                 goto out_unlock;
3090
3091         fi->i_projid = kprojid;
3092         inode_set_ctime_current(inode);
3093         f2fs_mark_inode_dirty_sync(inode, true);
3094 out_unlock:
3095         f2fs_unlock_op(sbi);
3096         return err;
3097 }
3098 #else
3099 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3100 {
3101         return 0;
3102 }
3103
3104 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3105 {
3106         if (projid != F2FS_DEF_PROJID)
3107                 return -EOPNOTSUPP;
3108         return 0;
3109 }
3110 #endif
3111
3112 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3113 {
3114         struct inode *inode = d_inode(dentry);
3115         struct f2fs_inode_info *fi = F2FS_I(inode);
3116         u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3117
3118         if (IS_ENCRYPTED(inode))
3119                 fsflags |= FS_ENCRYPT_FL;
3120         if (IS_VERITY(inode))
3121                 fsflags |= FS_VERITY_FL;
3122         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3123                 fsflags |= FS_INLINE_DATA_FL;
3124         if (is_inode_flag_set(inode, FI_PIN_FILE))
3125                 fsflags |= FS_NOCOW_FL;
3126
3127         fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3128
3129         if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3130                 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3131
3132         return 0;
3133 }
3134
3135 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3136                       struct dentry *dentry, struct fileattr *fa)
3137 {
3138         struct inode *inode = d_inode(dentry);
3139         u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3140         u32 iflags;
3141         int err;
3142
3143         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3144                 return -EIO;
3145         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3146                 return -ENOSPC;
3147         if (fsflags & ~F2FS_GETTABLE_FS_FL)
3148                 return -EOPNOTSUPP;
3149         fsflags &= F2FS_SETTABLE_FS_FL;
3150         if (!fa->flags_valid)
3151                 mask &= FS_COMMON_FL;
3152
3153         iflags = f2fs_fsflags_to_iflags(fsflags);
3154         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3155                 return -EOPNOTSUPP;
3156
3157         err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3158         if (!err)
3159                 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3160
3161         return err;
3162 }
3163
3164 int f2fs_pin_file_control(struct inode *inode, bool inc)
3165 {
3166         struct f2fs_inode_info *fi = F2FS_I(inode);
3167         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3168
3169         /* Use i_gc_failures for normal file as a risk signal. */
3170         if (inc)
3171                 f2fs_i_gc_failures_write(inode,
3172                                 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3173
3174         if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3175                 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3176                           __func__, inode->i_ino,
3177                           fi->i_gc_failures[GC_FAILURE_PIN]);
3178                 clear_inode_flag(inode, FI_PIN_FILE);
3179                 return -EAGAIN;
3180         }
3181         return 0;
3182 }
3183
3184 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3185 {
3186         struct inode *inode = file_inode(filp);
3187         __u32 pin;
3188         int ret = 0;
3189
3190         if (get_user(pin, (__u32 __user *)arg))
3191                 return -EFAULT;
3192
3193         if (!S_ISREG(inode->i_mode))
3194                 return -EINVAL;
3195
3196         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3197                 return -EROFS;
3198
3199         ret = mnt_want_write_file(filp);
3200         if (ret)
3201                 return ret;
3202
3203         inode_lock(inode);
3204
3205         if (!pin) {
3206                 clear_inode_flag(inode, FI_PIN_FILE);
3207                 f2fs_i_gc_failures_write(inode, 0);
3208                 goto done;
3209         }
3210
3211         if (f2fs_should_update_outplace(inode, NULL)) {
3212                 ret = -EINVAL;
3213                 goto out;
3214         }
3215
3216         if (f2fs_pin_file_control(inode, false)) {
3217                 ret = -EAGAIN;
3218                 goto out;
3219         }
3220
3221         ret = f2fs_convert_inline_inode(inode);
3222         if (ret)
3223                 goto out;
3224
3225         if (!f2fs_disable_compressed_file(inode)) {
3226                 ret = -EOPNOTSUPP;
3227                 goto out;
3228         }
3229
3230         set_inode_flag(inode, FI_PIN_FILE);
3231         ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3232 done:
3233         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3234 out:
3235         inode_unlock(inode);
3236         mnt_drop_write_file(filp);
3237         return ret;
3238 }
3239
3240 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3241 {
3242         struct inode *inode = file_inode(filp);
3243         __u32 pin = 0;
3244
3245         if (is_inode_flag_set(inode, FI_PIN_FILE))
3246                 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3247         return put_user(pin, (u32 __user *)arg);
3248 }
3249
3250 int f2fs_precache_extents(struct inode *inode)
3251 {
3252         struct f2fs_inode_info *fi = F2FS_I(inode);
3253         struct f2fs_map_blocks map;
3254         pgoff_t m_next_extent;
3255         loff_t end;
3256         int err;
3257
3258         if (is_inode_flag_set(inode, FI_NO_EXTENT))
3259                 return -EOPNOTSUPP;
3260
3261         map.m_lblk = 0;
3262         map.m_pblk = 0;
3263         map.m_next_pgofs = NULL;
3264         map.m_next_extent = &m_next_extent;
3265         map.m_seg_type = NO_CHECK_TYPE;
3266         map.m_may_create = false;
3267         end = F2FS_BLK_ALIGN(i_size_read(inode));
3268
3269         while (map.m_lblk < end) {
3270                 map.m_len = end - map.m_lblk;
3271
3272                 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3273                 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3274                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3275                 if (err || !map.m_len)
3276                         return err;
3277
3278                 map.m_lblk = m_next_extent;
3279         }
3280
3281         return 0;
3282 }
3283
3284 static int f2fs_ioc_precache_extents(struct file *filp)
3285 {
3286         return f2fs_precache_extents(file_inode(filp));
3287 }
3288
3289 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3290 {
3291         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3292         __u64 block_count;
3293
3294         if (!capable(CAP_SYS_ADMIN))
3295                 return -EPERM;
3296
3297         if (f2fs_readonly(sbi->sb))
3298                 return -EROFS;
3299
3300         if (copy_from_user(&block_count, (void __user *)arg,
3301                            sizeof(block_count)))
3302                 return -EFAULT;
3303
3304         return f2fs_resize_fs(filp, block_count);
3305 }
3306
3307 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3308 {
3309         struct inode *inode = file_inode(filp);
3310
3311         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3312
3313         if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3314                 f2fs_warn(F2FS_I_SB(inode),
3315                           "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3316                           inode->i_ino);
3317                 return -EOPNOTSUPP;
3318         }
3319
3320         return fsverity_ioctl_enable(filp, (const void __user *)arg);
3321 }
3322
3323 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3324 {
3325         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3326                 return -EOPNOTSUPP;
3327
3328         return fsverity_ioctl_measure(filp, (void __user *)arg);
3329 }
3330
3331 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3332 {
3333         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3334                 return -EOPNOTSUPP;
3335
3336         return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3337 }
3338
3339 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3340 {
3341         struct inode *inode = file_inode(filp);
3342         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3343         char *vbuf;
3344         int count;
3345         int err = 0;
3346
3347         vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3348         if (!vbuf)
3349                 return -ENOMEM;
3350
3351         f2fs_down_read(&sbi->sb_lock);
3352         count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3353                         ARRAY_SIZE(sbi->raw_super->volume_name),
3354                         UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3355         f2fs_up_read(&sbi->sb_lock);
3356
3357         if (copy_to_user((char __user *)arg, vbuf,
3358                                 min(FSLABEL_MAX, count)))
3359                 err = -EFAULT;
3360
3361         kfree(vbuf);
3362         return err;
3363 }
3364
3365 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3366 {
3367         struct inode *inode = file_inode(filp);
3368         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3369         char *vbuf;
3370         int err = 0;
3371
3372         if (!capable(CAP_SYS_ADMIN))
3373                 return -EPERM;
3374
3375         vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3376         if (IS_ERR(vbuf))
3377                 return PTR_ERR(vbuf);
3378
3379         err = mnt_want_write_file(filp);
3380         if (err)
3381                 goto out;
3382
3383         f2fs_down_write(&sbi->sb_lock);
3384
3385         memset(sbi->raw_super->volume_name, 0,
3386                         sizeof(sbi->raw_super->volume_name));
3387         utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3388                         sbi->raw_super->volume_name,
3389                         ARRAY_SIZE(sbi->raw_super->volume_name));
3390
3391         err = f2fs_commit_super(sbi, false);
3392
3393         f2fs_up_write(&sbi->sb_lock);
3394
3395         mnt_drop_write_file(filp);
3396 out:
3397         kfree(vbuf);
3398         return err;
3399 }
3400
3401 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
3402 {
3403         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3404                 return -EOPNOTSUPP;
3405
3406         if (!f2fs_compressed_file(inode))
3407                 return -EINVAL;
3408
3409         *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3410
3411         return 0;
3412 }
3413
3414 static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
3415 {
3416         struct inode *inode = file_inode(filp);
3417         __u64 blocks;
3418         int ret;
3419
3420         ret = f2fs_get_compress_blocks(inode, &blocks);
3421         if (ret < 0)
3422                 return ret;
3423
3424         return put_user(blocks, (u64 __user *)arg);
3425 }
3426
3427 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3428 {
3429         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3430         unsigned int released_blocks = 0;
3431         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3432         block_t blkaddr;
3433         int i;
3434
3435         for (i = 0; i < count; i++) {
3436                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3437                                                 dn->ofs_in_node + i);
3438
3439                 if (!__is_valid_data_blkaddr(blkaddr))
3440                         continue;
3441                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3442                                         DATA_GENERIC_ENHANCE))) {
3443                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3444                         return -EFSCORRUPTED;
3445                 }
3446         }
3447
3448         while (count) {
3449                 int compr_blocks = 0;
3450
3451                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3452                         blkaddr = f2fs_data_blkaddr(dn);
3453
3454                         if (i == 0) {
3455                                 if (blkaddr == COMPRESS_ADDR)
3456                                         continue;
3457                                 dn->ofs_in_node += cluster_size;
3458                                 goto next;
3459                         }
3460
3461                         if (__is_valid_data_blkaddr(blkaddr))
3462                                 compr_blocks++;
3463
3464                         if (blkaddr != NEW_ADDR)
3465                                 continue;
3466
3467                         f2fs_set_data_blkaddr(dn, NULL_ADDR);
3468                 }
3469
3470                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3471                 dec_valid_block_count(sbi, dn->inode,
3472                                         cluster_size - compr_blocks);
3473
3474                 released_blocks += cluster_size - compr_blocks;
3475 next:
3476                 count -= cluster_size;
3477         }
3478
3479         return released_blocks;
3480 }
3481
3482 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3483 {
3484         struct inode *inode = file_inode(filp);
3485         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3486         pgoff_t page_idx = 0, last_idx;
3487         unsigned int released_blocks = 0;
3488         int ret;
3489         int writecount;
3490
3491         if (!f2fs_sb_has_compression(sbi))
3492                 return -EOPNOTSUPP;
3493
3494         if (!f2fs_compressed_file(inode))
3495                 return -EINVAL;
3496
3497         if (f2fs_readonly(sbi->sb))
3498                 return -EROFS;
3499
3500         ret = mnt_want_write_file(filp);
3501         if (ret)
3502                 return ret;
3503
3504         f2fs_balance_fs(sbi, true);
3505
3506         inode_lock(inode);
3507
3508         writecount = atomic_read(&inode->i_writecount);
3509         if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3510                         (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3511                 ret = -EBUSY;
3512                 goto out;
3513         }
3514
3515         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3516                 ret = -EINVAL;
3517                 goto out;
3518         }
3519
3520         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3521         if (ret)
3522                 goto out;
3523
3524         if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3525                 ret = -EPERM;
3526                 goto out;
3527         }
3528
3529         set_inode_flag(inode, FI_COMPRESS_RELEASED);
3530         inode_set_ctime_current(inode);
3531         f2fs_mark_inode_dirty_sync(inode, true);
3532
3533         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3534         filemap_invalidate_lock(inode->i_mapping);
3535
3536         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3537
3538         while (page_idx < last_idx) {
3539                 struct dnode_of_data dn;
3540                 pgoff_t end_offset, count;
3541
3542                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3543                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3544                 if (ret) {
3545                         if (ret == -ENOENT) {
3546                                 page_idx = f2fs_get_next_page_offset(&dn,
3547                                                                 page_idx);
3548                                 ret = 0;
3549                                 continue;
3550                         }
3551                         break;
3552                 }
3553
3554                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3555                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3556                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3557
3558                 ret = release_compress_blocks(&dn, count);
3559
3560                 f2fs_put_dnode(&dn);
3561
3562                 if (ret < 0)
3563                         break;
3564
3565                 page_idx += count;
3566                 released_blocks += ret;
3567         }
3568
3569         filemap_invalidate_unlock(inode->i_mapping);
3570         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3571 out:
3572         inode_unlock(inode);
3573
3574         mnt_drop_write_file(filp);
3575
3576         if (ret >= 0) {
3577                 ret = put_user(released_blocks, (u64 __user *)arg);
3578         } else if (released_blocks &&
3579                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3580                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3581                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3582                         "iblocks=%llu, released=%u, compr_blocks=%u, "
3583                         "run fsck to fix.",
3584                         __func__, inode->i_ino, inode->i_blocks,
3585                         released_blocks,
3586                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3587         }
3588
3589         return ret;
3590 }
3591
3592 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
3593                 unsigned int *reserved_blocks)
3594 {
3595         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3596         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3597         block_t blkaddr;
3598         int i;
3599
3600         for (i = 0; i < count; i++) {
3601                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3602                                                 dn->ofs_in_node + i);
3603
3604                 if (!__is_valid_data_blkaddr(blkaddr))
3605                         continue;
3606                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3607                                         DATA_GENERIC_ENHANCE))) {
3608                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3609                         return -EFSCORRUPTED;
3610                 }
3611         }
3612
3613         while (count) {
3614                 int compr_blocks = 0;
3615                 blkcnt_t reserved;
3616                 int ret;
3617
3618                 for (i = 0; i < cluster_size; i++) {
3619                         blkaddr = data_blkaddr(dn->inode, dn->node_page,
3620                                                 dn->ofs_in_node + i);
3621
3622                         if (i == 0) {
3623                                 if (blkaddr != COMPRESS_ADDR) {
3624                                         dn->ofs_in_node += cluster_size;
3625                                         goto next;
3626                                 }
3627                                 continue;
3628                         }
3629
3630                         /*
3631                          * compressed cluster was not released due to it
3632                          * fails in release_compress_blocks(), so NEW_ADDR
3633                          * is a possible case.
3634                          */
3635                         if (blkaddr == NEW_ADDR ||
3636                                 __is_valid_data_blkaddr(blkaddr)) {
3637                                 compr_blocks++;
3638                                 continue;
3639                         }
3640                 }
3641
3642                 reserved = cluster_size - compr_blocks;
3643
3644                 /* for the case all blocks in cluster were reserved */
3645                 if (reserved == 1)
3646                         goto next;
3647
3648                 ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
3649                 if (unlikely(ret))
3650                         return ret;
3651
3652                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3653                         if (f2fs_data_blkaddr(dn) == NULL_ADDR)
3654                                 f2fs_set_data_blkaddr(dn, NEW_ADDR);
3655                 }
3656
3657                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3658
3659                 *reserved_blocks += reserved;
3660 next:
3661                 count -= cluster_size;
3662         }
3663
3664         return 0;
3665 }
3666
3667 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3668 {
3669         struct inode *inode = file_inode(filp);
3670         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3671         pgoff_t page_idx = 0, last_idx;
3672         unsigned int reserved_blocks = 0;
3673         int ret;
3674
3675         if (!f2fs_sb_has_compression(sbi))
3676                 return -EOPNOTSUPP;
3677
3678         if (!f2fs_compressed_file(inode))
3679                 return -EINVAL;
3680
3681         if (f2fs_readonly(sbi->sb))
3682                 return -EROFS;
3683
3684         ret = mnt_want_write_file(filp);
3685         if (ret)
3686                 return ret;
3687
3688         f2fs_balance_fs(sbi, true);
3689
3690         inode_lock(inode);
3691
3692         if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3693                 ret = -EINVAL;
3694                 goto unlock_inode;
3695         }
3696
3697         if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3698                 goto unlock_inode;
3699
3700         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3701         filemap_invalidate_lock(inode->i_mapping);
3702
3703         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3704
3705         while (page_idx < last_idx) {
3706                 struct dnode_of_data dn;
3707                 pgoff_t end_offset, count;
3708
3709                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3710                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3711                 if (ret) {
3712                         if (ret == -ENOENT) {
3713                                 page_idx = f2fs_get_next_page_offset(&dn,
3714                                                                 page_idx);
3715                                 ret = 0;
3716                                 continue;
3717                         }
3718                         break;
3719                 }
3720
3721                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3722                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3723                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3724
3725                 ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
3726
3727                 f2fs_put_dnode(&dn);
3728
3729                 if (ret < 0)
3730                         break;
3731
3732                 page_idx += count;
3733         }
3734
3735         filemap_invalidate_unlock(inode->i_mapping);
3736         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3737
3738         if (!ret) {
3739                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3740                 inode_set_ctime_current(inode);
3741                 f2fs_mark_inode_dirty_sync(inode, true);
3742         }
3743 unlock_inode:
3744         inode_unlock(inode);
3745         mnt_drop_write_file(filp);
3746
3747         if (!ret) {
3748                 ret = put_user(reserved_blocks, (u64 __user *)arg);
3749         } else if (reserved_blocks &&
3750                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3751                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3752                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3753                         "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3754                         "run fsck to fix.",
3755                         __func__, inode->i_ino, inode->i_blocks,
3756                         reserved_blocks,
3757                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3758         }
3759
3760         return ret;
3761 }
3762
3763 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3764                 pgoff_t off, block_t block, block_t len, u32 flags)
3765 {
3766         sector_t sector = SECTOR_FROM_BLOCK(block);
3767         sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3768         int ret = 0;
3769
3770         if (flags & F2FS_TRIM_FILE_DISCARD) {
3771                 if (bdev_max_secure_erase_sectors(bdev))
3772                         ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3773                                         GFP_NOFS);
3774                 else
3775                         ret = blkdev_issue_discard(bdev, sector, nr_sects,
3776                                         GFP_NOFS);
3777         }
3778
3779         if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3780                 if (IS_ENCRYPTED(inode))
3781                         ret = fscrypt_zeroout_range(inode, off, block, len);
3782                 else
3783                         ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3784                                         GFP_NOFS, 0);
3785         }
3786
3787         return ret;
3788 }
3789
3790 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3791 {
3792         struct inode *inode = file_inode(filp);
3793         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3794         struct address_space *mapping = inode->i_mapping;
3795         struct block_device *prev_bdev = NULL;
3796         struct f2fs_sectrim_range range;
3797         pgoff_t index, pg_end, prev_index = 0;
3798         block_t prev_block = 0, len = 0;
3799         loff_t end_addr;
3800         bool to_end = false;
3801         int ret = 0;
3802
3803         if (!(filp->f_mode & FMODE_WRITE))
3804                 return -EBADF;
3805
3806         if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3807                                 sizeof(range)))
3808                 return -EFAULT;
3809
3810         if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3811                         !S_ISREG(inode->i_mode))
3812                 return -EINVAL;
3813
3814         if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3815                         !f2fs_hw_support_discard(sbi)) ||
3816                         ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3817                          IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3818                 return -EOPNOTSUPP;
3819
3820         file_start_write(filp);
3821         inode_lock(inode);
3822
3823         if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3824                         range.start >= inode->i_size) {
3825                 ret = -EINVAL;
3826                 goto err;
3827         }
3828
3829         if (range.len == 0)
3830                 goto err;
3831
3832         if (inode->i_size - range.start > range.len) {
3833                 end_addr = range.start + range.len;
3834         } else {
3835                 end_addr = range.len == (u64)-1 ?
3836                         sbi->sb->s_maxbytes : inode->i_size;
3837                 to_end = true;
3838         }
3839
3840         if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3841                         (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3842                 ret = -EINVAL;
3843                 goto err;
3844         }
3845
3846         index = F2FS_BYTES_TO_BLK(range.start);
3847         pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3848
3849         ret = f2fs_convert_inline_inode(inode);
3850         if (ret)
3851                 goto err;
3852
3853         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3854         filemap_invalidate_lock(mapping);
3855
3856         ret = filemap_write_and_wait_range(mapping, range.start,
3857                         to_end ? LLONG_MAX : end_addr - 1);
3858         if (ret)
3859                 goto out;
3860
3861         truncate_inode_pages_range(mapping, range.start,
3862                         to_end ? -1 : end_addr - 1);
3863
3864         while (index < pg_end) {
3865                 struct dnode_of_data dn;
3866                 pgoff_t end_offset, count;
3867                 int i;
3868
3869                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3870                 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3871                 if (ret) {
3872                         if (ret == -ENOENT) {
3873                                 index = f2fs_get_next_page_offset(&dn, index);
3874                                 continue;
3875                         }
3876                         goto out;
3877                 }
3878
3879                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3880                 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3881                 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3882                         struct block_device *cur_bdev;
3883                         block_t blkaddr = f2fs_data_blkaddr(&dn);
3884
3885                         if (!__is_valid_data_blkaddr(blkaddr))
3886                                 continue;
3887
3888                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3889                                                 DATA_GENERIC_ENHANCE)) {
3890                                 ret = -EFSCORRUPTED;
3891                                 f2fs_put_dnode(&dn);
3892                                 f2fs_handle_error(sbi,
3893                                                 ERROR_INVALID_BLKADDR);
3894                                 goto out;
3895                         }
3896
3897                         cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3898                         if (f2fs_is_multi_device(sbi)) {
3899                                 int di = f2fs_target_device_index(sbi, blkaddr);
3900
3901                                 blkaddr -= FDEV(di).start_blk;
3902                         }
3903
3904                         if (len) {
3905                                 if (prev_bdev == cur_bdev &&
3906                                                 index == prev_index + len &&
3907                                                 blkaddr == prev_block + len) {
3908                                         len++;
3909                                 } else {
3910                                         ret = f2fs_secure_erase(prev_bdev,
3911                                                 inode, prev_index, prev_block,
3912                                                 len, range.flags);
3913                                         if (ret) {
3914                                                 f2fs_put_dnode(&dn);
3915                                                 goto out;
3916                                         }
3917
3918                                         len = 0;
3919                                 }
3920                         }
3921
3922                         if (!len) {
3923                                 prev_bdev = cur_bdev;
3924                                 prev_index = index;
3925                                 prev_block = blkaddr;
3926                                 len = 1;
3927                         }
3928                 }
3929
3930                 f2fs_put_dnode(&dn);
3931
3932                 if (fatal_signal_pending(current)) {
3933                         ret = -EINTR;
3934                         goto out;
3935                 }
3936                 cond_resched();
3937         }
3938
3939         if (len)
3940                 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3941                                 prev_block, len, range.flags);
3942 out:
3943         filemap_invalidate_unlock(mapping);
3944         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3945 err:
3946         inode_unlock(inode);
3947         file_end_write(filp);
3948
3949         return ret;
3950 }
3951
3952 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3953 {
3954         struct inode *inode = file_inode(filp);
3955         struct f2fs_comp_option option;
3956
3957         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3958                 return -EOPNOTSUPP;
3959
3960         inode_lock_shared(inode);
3961
3962         if (!f2fs_compressed_file(inode)) {
3963                 inode_unlock_shared(inode);
3964                 return -ENODATA;
3965         }
3966
3967         option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3968         option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3969
3970         inode_unlock_shared(inode);
3971
3972         if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3973                                 sizeof(option)))
3974                 return -EFAULT;
3975
3976         return 0;
3977 }
3978
3979 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3980 {
3981         struct inode *inode = file_inode(filp);
3982         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3983         struct f2fs_comp_option option;
3984         int ret = 0;
3985
3986         if (!f2fs_sb_has_compression(sbi))
3987                 return -EOPNOTSUPP;
3988
3989         if (!(filp->f_mode & FMODE_WRITE))
3990                 return -EBADF;
3991
3992         if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3993                                 sizeof(option)))
3994                 return -EFAULT;
3995
3996         if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3997                 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3998                 option.algorithm >= COMPRESS_MAX)
3999                 return -EINVAL;
4000
4001         file_start_write(filp);
4002         inode_lock(inode);
4003
4004         f2fs_down_write(&F2FS_I(inode)->i_sem);
4005         if (!f2fs_compressed_file(inode)) {
4006                 ret = -EINVAL;
4007                 goto out;
4008         }
4009
4010         if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4011                 ret = -EBUSY;
4012                 goto out;
4013         }
4014
4015         if (F2FS_HAS_BLOCKS(inode)) {
4016                 ret = -EFBIG;
4017                 goto out;
4018         }
4019
4020         F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4021         F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4022         F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
4023         /* Set default level */
4024         if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
4025                 F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
4026         else
4027                 F2FS_I(inode)->i_compress_level = 0;
4028         /* Adjust mount option level */
4029         if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
4030             F2FS_OPTION(sbi).compress_level)
4031                 F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
4032         f2fs_mark_inode_dirty_sync(inode, true);
4033
4034         if (!f2fs_is_compress_backend_ready(inode))
4035                 f2fs_warn(sbi, "compression algorithm is successfully set, "
4036                         "but current kernel doesn't support this algorithm.");
4037 out:
4038         f2fs_up_write(&F2FS_I(inode)->i_sem);
4039         inode_unlock(inode);
4040         file_end_write(filp);
4041
4042         return ret;
4043 }
4044
4045 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4046 {
4047         DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
4048         struct address_space *mapping = inode->i_mapping;
4049         struct page *page;
4050         pgoff_t redirty_idx = page_idx;
4051         int i, page_len = 0, ret = 0;
4052
4053         page_cache_ra_unbounded(&ractl, len, 0);
4054
4055         for (i = 0; i < len; i++, page_idx++) {
4056                 page = read_cache_page(mapping, page_idx, NULL, NULL);
4057                 if (IS_ERR(page)) {
4058                         ret = PTR_ERR(page);
4059                         break;
4060                 }
4061                 page_len++;
4062         }
4063
4064         for (i = 0; i < page_len; i++, redirty_idx++) {
4065                 page = find_lock_page(mapping, redirty_idx);
4066
4067                 /* It will never fail, when page has pinned above */
4068                 f2fs_bug_on(F2FS_I_SB(inode), !page);
4069
4070                 set_page_dirty(page);
4071                 set_page_private_gcing(page);
4072                 f2fs_put_page(page, 1);
4073                 f2fs_put_page(page, 0);
4074         }
4075
4076         return ret;
4077 }
4078
4079 static int f2fs_ioc_decompress_file(struct file *filp)
4080 {
4081         struct inode *inode = file_inode(filp);
4082         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4083         struct f2fs_inode_info *fi = F2FS_I(inode);
4084         pgoff_t page_idx = 0, last_idx;
4085         unsigned int blk_per_seg = sbi->blocks_per_seg;
4086         int cluster_size = fi->i_cluster_size;
4087         int count, ret;
4088
4089         if (!f2fs_sb_has_compression(sbi) ||
4090                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4091                 return -EOPNOTSUPP;
4092
4093         if (!(filp->f_mode & FMODE_WRITE))
4094                 return -EBADF;
4095
4096         if (!f2fs_compressed_file(inode))
4097                 return -EINVAL;
4098
4099         f2fs_balance_fs(sbi, true);
4100
4101         file_start_write(filp);
4102         inode_lock(inode);
4103
4104         if (!f2fs_is_compress_backend_ready(inode)) {
4105                 ret = -EOPNOTSUPP;
4106                 goto out;
4107         }
4108
4109         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4110                 ret = -EINVAL;
4111                 goto out;
4112         }
4113
4114         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4115         if (ret)
4116                 goto out;
4117
4118         if (!atomic_read(&fi->i_compr_blocks))
4119                 goto out;
4120
4121         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4122
4123         count = last_idx - page_idx;
4124         while (count && count >= cluster_size) {
4125                 ret = redirty_blocks(inode, page_idx, cluster_size);
4126                 if (ret < 0)
4127                         break;
4128
4129                 if (get_dirty_pages(inode) >= blk_per_seg) {
4130                         ret = filemap_fdatawrite(inode->i_mapping);
4131                         if (ret < 0)
4132                                 break;
4133                 }
4134
4135                 count -= cluster_size;
4136                 page_idx += cluster_size;
4137
4138                 cond_resched();
4139                 if (fatal_signal_pending(current)) {
4140                         ret = -EINTR;
4141                         break;
4142                 }
4143         }
4144
4145         if (!ret)
4146                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4147                                                         LLONG_MAX);
4148
4149         if (ret)
4150                 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4151                           __func__, ret);
4152 out:
4153         inode_unlock(inode);
4154         file_end_write(filp);
4155
4156         return ret;
4157 }
4158
4159 static int f2fs_ioc_compress_file(struct file *filp)
4160 {
4161         struct inode *inode = file_inode(filp);
4162         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4163         pgoff_t page_idx = 0, last_idx;
4164         unsigned int blk_per_seg = sbi->blocks_per_seg;
4165         int cluster_size = F2FS_I(inode)->i_cluster_size;
4166         int count, ret;
4167
4168         if (!f2fs_sb_has_compression(sbi) ||
4169                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4170                 return -EOPNOTSUPP;
4171
4172         if (!(filp->f_mode & FMODE_WRITE))
4173                 return -EBADF;
4174
4175         if (!f2fs_compressed_file(inode))
4176                 return -EINVAL;
4177
4178         f2fs_balance_fs(sbi, true);
4179
4180         file_start_write(filp);
4181         inode_lock(inode);
4182
4183         if (!f2fs_is_compress_backend_ready(inode)) {
4184                 ret = -EOPNOTSUPP;
4185                 goto out;
4186         }
4187
4188         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4189                 ret = -EINVAL;
4190                 goto out;
4191         }
4192
4193         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4194         if (ret)
4195                 goto out;
4196
4197         set_inode_flag(inode, FI_ENABLE_COMPRESS);
4198
4199         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4200
4201         count = last_idx - page_idx;
4202         while (count && count >= cluster_size) {
4203                 ret = redirty_blocks(inode, page_idx, cluster_size);
4204                 if (ret < 0)
4205                         break;
4206
4207                 if (get_dirty_pages(inode) >= blk_per_seg) {
4208                         ret = filemap_fdatawrite(inode->i_mapping);
4209                         if (ret < 0)
4210                                 break;
4211                 }
4212
4213                 count -= cluster_size;
4214                 page_idx += cluster_size;
4215
4216                 cond_resched();
4217                 if (fatal_signal_pending(current)) {
4218                         ret = -EINTR;
4219                         break;
4220                 }
4221         }
4222
4223         if (!ret)
4224                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4225                                                         LLONG_MAX);
4226
4227         clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4228
4229         if (ret)
4230                 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4231                           __func__, ret);
4232 out:
4233         inode_unlock(inode);
4234         file_end_write(filp);
4235
4236         return ret;
4237 }
4238
4239 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4240 {
4241         switch (cmd) {
4242         case FS_IOC_GETVERSION:
4243                 return f2fs_ioc_getversion(filp, arg);
4244         case F2FS_IOC_START_ATOMIC_WRITE:
4245                 return f2fs_ioc_start_atomic_write(filp, false);
4246         case F2FS_IOC_START_ATOMIC_REPLACE:
4247                 return f2fs_ioc_start_atomic_write(filp, true);
4248         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4249                 return f2fs_ioc_commit_atomic_write(filp);
4250         case F2FS_IOC_ABORT_ATOMIC_WRITE:
4251                 return f2fs_ioc_abort_atomic_write(filp);
4252         case F2FS_IOC_START_VOLATILE_WRITE:
4253         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4254                 return -EOPNOTSUPP;
4255         case F2FS_IOC_SHUTDOWN:
4256                 return f2fs_ioc_shutdown(filp, arg);
4257         case FITRIM:
4258                 return f2fs_ioc_fitrim(filp, arg);
4259         case FS_IOC_SET_ENCRYPTION_POLICY:
4260                 return f2fs_ioc_set_encryption_policy(filp, arg);
4261         case FS_IOC_GET_ENCRYPTION_POLICY:
4262                 return f2fs_ioc_get_encryption_policy(filp, arg);
4263         case FS_IOC_GET_ENCRYPTION_PWSALT:
4264                 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4265         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4266                 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4267         case FS_IOC_ADD_ENCRYPTION_KEY:
4268                 return f2fs_ioc_add_encryption_key(filp, arg);
4269         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4270                 return f2fs_ioc_remove_encryption_key(filp, arg);
4271         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4272                 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4273         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4274                 return f2fs_ioc_get_encryption_key_status(filp, arg);
4275         case FS_IOC_GET_ENCRYPTION_NONCE:
4276                 return f2fs_ioc_get_encryption_nonce(filp, arg);
4277         case F2FS_IOC_GARBAGE_COLLECT:
4278                 return f2fs_ioc_gc(filp, arg);
4279         case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4280                 return f2fs_ioc_gc_range(filp, arg);
4281         case F2FS_IOC_WRITE_CHECKPOINT:
4282                 return f2fs_ioc_write_checkpoint(filp);
4283         case F2FS_IOC_DEFRAGMENT:
4284                 return f2fs_ioc_defragment(filp, arg);
4285         case F2FS_IOC_MOVE_RANGE:
4286                 return f2fs_ioc_move_range(filp, arg);
4287         case F2FS_IOC_FLUSH_DEVICE:
4288                 return f2fs_ioc_flush_device(filp, arg);
4289         case F2FS_IOC_GET_FEATURES:
4290                 return f2fs_ioc_get_features(filp, arg);
4291         case F2FS_IOC_GET_PIN_FILE:
4292                 return f2fs_ioc_get_pin_file(filp, arg);
4293         case F2FS_IOC_SET_PIN_FILE:
4294                 return f2fs_ioc_set_pin_file(filp, arg);
4295         case F2FS_IOC_PRECACHE_EXTENTS:
4296                 return f2fs_ioc_precache_extents(filp);
4297         case F2FS_IOC_RESIZE_FS:
4298                 return f2fs_ioc_resize_fs(filp, arg);
4299         case FS_IOC_ENABLE_VERITY:
4300                 return f2fs_ioc_enable_verity(filp, arg);
4301         case FS_IOC_MEASURE_VERITY:
4302                 return f2fs_ioc_measure_verity(filp, arg);
4303         case FS_IOC_READ_VERITY_METADATA:
4304                 return f2fs_ioc_read_verity_metadata(filp, arg);
4305         case FS_IOC_GETFSLABEL:
4306                 return f2fs_ioc_getfslabel(filp, arg);
4307         case FS_IOC_SETFSLABEL:
4308                 return f2fs_ioc_setfslabel(filp, arg);
4309         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4310                 return f2fs_ioc_get_compress_blocks(filp, arg);
4311         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4312                 return f2fs_release_compress_blocks(filp, arg);
4313         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4314                 return f2fs_reserve_compress_blocks(filp, arg);
4315         case F2FS_IOC_SEC_TRIM_FILE:
4316                 return f2fs_sec_trim_file(filp, arg);
4317         case F2FS_IOC_GET_COMPRESS_OPTION:
4318                 return f2fs_ioc_get_compress_option(filp, arg);
4319         case F2FS_IOC_SET_COMPRESS_OPTION:
4320                 return f2fs_ioc_set_compress_option(filp, arg);
4321         case F2FS_IOC_DECOMPRESS_FILE:
4322                 return f2fs_ioc_decompress_file(filp);
4323         case F2FS_IOC_COMPRESS_FILE:
4324                 return f2fs_ioc_compress_file(filp);
4325         default:
4326                 return -ENOTTY;
4327         }
4328 }
4329
4330 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4331 {
4332         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4333                 return -EIO;
4334         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4335                 return -ENOSPC;
4336
4337         return __f2fs_ioctl(filp, cmd, arg);
4338 }
4339
4340 /*
4341  * Return %true if the given read or write request should use direct I/O, or
4342  * %false if it should use buffered I/O.
4343  */
4344 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4345                                 struct iov_iter *iter)
4346 {
4347         unsigned int align;
4348
4349         if (!(iocb->ki_flags & IOCB_DIRECT))
4350                 return false;
4351
4352         if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4353                 return false;
4354
4355         /*
4356          * Direct I/O not aligned to the disk's logical_block_size will be
4357          * attempted, but will fail with -EINVAL.
4358          *
4359          * f2fs additionally requires that direct I/O be aligned to the
4360          * filesystem block size, which is often a stricter requirement.
4361          * However, f2fs traditionally falls back to buffered I/O on requests
4362          * that are logical_block_size-aligned but not fs-block aligned.
4363          *
4364          * The below logic implements this behavior.
4365          */
4366         align = iocb->ki_pos | iov_iter_alignment(iter);
4367         if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4368             IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4369                 return false;
4370
4371         return true;
4372 }
4373
4374 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4375                                 unsigned int flags)
4376 {
4377         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4378
4379         dec_page_count(sbi, F2FS_DIO_READ);
4380         if (error)
4381                 return error;
4382         f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4383         return 0;
4384 }
4385
4386 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4387         .end_io = f2fs_dio_read_end_io,
4388 };
4389
4390 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4391 {
4392         struct file *file = iocb->ki_filp;
4393         struct inode *inode = file_inode(file);
4394         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4395         struct f2fs_inode_info *fi = F2FS_I(inode);
4396         const loff_t pos = iocb->ki_pos;
4397         const size_t count = iov_iter_count(to);
4398         struct iomap_dio *dio;
4399         ssize_t ret;
4400
4401         if (count == 0)
4402                 return 0; /* skip atime update */
4403
4404         trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4405
4406         if (iocb->ki_flags & IOCB_NOWAIT) {
4407                 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4408                         ret = -EAGAIN;
4409                         goto out;
4410                 }
4411         } else {
4412                 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4413         }
4414
4415         /*
4416          * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4417          * the higher-level function iomap_dio_rw() in order to ensure that the
4418          * F2FS_DIO_READ counter will be decremented correctly in all cases.
4419          */
4420         inc_page_count(sbi, F2FS_DIO_READ);
4421         dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4422                              &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4423         if (IS_ERR_OR_NULL(dio)) {
4424                 ret = PTR_ERR_OR_ZERO(dio);
4425                 if (ret != -EIOCBQUEUED)
4426                         dec_page_count(sbi, F2FS_DIO_READ);
4427         } else {
4428                 ret = iomap_dio_complete(dio);
4429         }
4430
4431         f2fs_up_read(&fi->i_gc_rwsem[READ]);
4432
4433         file_accessed(file);
4434 out:
4435         trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4436         return ret;
4437 }
4438
4439 static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
4440                                     int rw)
4441 {
4442         struct inode *inode = file_inode(file);
4443         char *buf, *path;
4444
4445         buf = f2fs_getname(F2FS_I_SB(inode));
4446         if (!buf)
4447                 return;
4448         path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
4449         if (IS_ERR(path))
4450                 goto free_buf;
4451         if (rw == WRITE)
4452                 trace_f2fs_datawrite_start(inode, pos, count,
4453                                 current->pid, path, current->comm);
4454         else
4455                 trace_f2fs_dataread_start(inode, pos, count,
4456                                 current->pid, path, current->comm);
4457 free_buf:
4458         f2fs_putname(buf);
4459 }
4460
4461 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4462 {
4463         struct inode *inode = file_inode(iocb->ki_filp);
4464         const loff_t pos = iocb->ki_pos;
4465         ssize_t ret;
4466
4467         if (!f2fs_is_compress_backend_ready(inode))
4468                 return -EOPNOTSUPP;
4469
4470         if (trace_f2fs_dataread_start_enabled())
4471                 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4472                                         iov_iter_count(to), READ);
4473
4474         if (f2fs_should_use_dio(inode, iocb, to)) {
4475                 ret = f2fs_dio_read_iter(iocb, to);
4476         } else {
4477                 ret = filemap_read(iocb, to, 0);
4478                 if (ret > 0)
4479                         f2fs_update_iostat(F2FS_I_SB(inode), inode,
4480                                                 APP_BUFFERED_READ_IO, ret);
4481         }
4482         if (trace_f2fs_dataread_end_enabled())
4483                 trace_f2fs_dataread_end(inode, pos, ret);
4484         return ret;
4485 }
4486
4487 static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
4488                                      struct pipe_inode_info *pipe,
4489                                      size_t len, unsigned int flags)
4490 {
4491         struct inode *inode = file_inode(in);
4492         const loff_t pos = *ppos;
4493         ssize_t ret;
4494
4495         if (!f2fs_is_compress_backend_ready(inode))
4496                 return -EOPNOTSUPP;
4497
4498         if (trace_f2fs_dataread_start_enabled())
4499                 f2fs_trace_rw_file_path(in, pos, len, READ);
4500
4501         ret = filemap_splice_read(in, ppos, pipe, len, flags);
4502         if (ret > 0)
4503                 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4504                                    APP_BUFFERED_READ_IO, ret);
4505
4506         if (trace_f2fs_dataread_end_enabled())
4507                 trace_f2fs_dataread_end(inode, pos, ret);
4508         return ret;
4509 }
4510
4511 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4512 {
4513         struct file *file = iocb->ki_filp;
4514         struct inode *inode = file_inode(file);
4515         ssize_t count;
4516         int err;
4517
4518         if (IS_IMMUTABLE(inode))
4519                 return -EPERM;
4520
4521         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4522                 return -EPERM;
4523
4524         count = generic_write_checks(iocb, from);
4525         if (count <= 0)
4526                 return count;
4527
4528         err = file_modified(file);
4529         if (err)
4530                 return err;
4531         return count;
4532 }
4533
4534 /*
4535  * Preallocate blocks for a write request, if it is possible and helpful to do
4536  * so.  Returns a positive number if blocks may have been preallocated, 0 if no
4537  * blocks were preallocated, or a negative errno value if something went
4538  * seriously wrong.  Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4539  * requested blocks (not just some of them) have been allocated.
4540  */
4541 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4542                                    bool dio)
4543 {
4544         struct inode *inode = file_inode(iocb->ki_filp);
4545         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4546         const loff_t pos = iocb->ki_pos;
4547         const size_t count = iov_iter_count(iter);
4548         struct f2fs_map_blocks map = {};
4549         int flag;
4550         int ret;
4551
4552         /* If it will be an out-of-place direct write, don't bother. */
4553         if (dio && f2fs_lfs_mode(sbi))
4554                 return 0;
4555         /*
4556          * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4557          * buffered IO, if DIO meets any holes.
4558          */
4559         if (dio && i_size_read(inode) &&
4560                 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4561                 return 0;
4562
4563         /* No-wait I/O can't allocate blocks. */
4564         if (iocb->ki_flags & IOCB_NOWAIT)
4565                 return 0;
4566
4567         /* If it will be a short write, don't bother. */
4568         if (fault_in_iov_iter_readable(iter, count))
4569                 return 0;
4570
4571         if (f2fs_has_inline_data(inode)) {
4572                 /* If the data will fit inline, don't bother. */
4573                 if (pos + count <= MAX_INLINE_DATA(inode))
4574                         return 0;
4575                 ret = f2fs_convert_inline_inode(inode);
4576                 if (ret)
4577                         return ret;
4578         }
4579
4580         /* Do not preallocate blocks that will be written partially in 4KB. */
4581         map.m_lblk = F2FS_BLK_ALIGN(pos);
4582         map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4583         if (map.m_len > map.m_lblk)
4584                 map.m_len -= map.m_lblk;
4585         else
4586                 return 0;
4587
4588         map.m_may_create = true;
4589         if (dio) {
4590                 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4591                 flag = F2FS_GET_BLOCK_PRE_DIO;
4592         } else {
4593                 map.m_seg_type = NO_CHECK_TYPE;
4594                 flag = F2FS_GET_BLOCK_PRE_AIO;
4595         }
4596
4597         ret = f2fs_map_blocks(inode, &map, flag);
4598         /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4599         if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4600                 return ret;
4601         if (ret == 0)
4602                 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4603         return map.m_len;
4604 }
4605
4606 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4607                                         struct iov_iter *from)
4608 {
4609         struct file *file = iocb->ki_filp;
4610         struct inode *inode = file_inode(file);
4611         ssize_t ret;
4612
4613         if (iocb->ki_flags & IOCB_NOWAIT)
4614                 return -EOPNOTSUPP;
4615
4616         ret = generic_perform_write(iocb, from);
4617
4618         if (ret > 0) {
4619                 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4620                                                 APP_BUFFERED_IO, ret);
4621         }
4622         return ret;
4623 }
4624
4625 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4626                                  unsigned int flags)
4627 {
4628         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4629
4630         dec_page_count(sbi, F2FS_DIO_WRITE);
4631         if (error)
4632                 return error;
4633         f2fs_update_time(sbi, REQ_TIME);
4634         f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4635         return 0;
4636 }
4637
4638 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4639         .end_io = f2fs_dio_write_end_io,
4640 };
4641
4642 static void f2fs_flush_buffered_write(struct address_space *mapping,
4643                                       loff_t start_pos, loff_t end_pos)
4644 {
4645         int ret;
4646
4647         ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
4648         if (ret < 0)
4649                 return;
4650         invalidate_mapping_pages(mapping,
4651                                  start_pos >> PAGE_SHIFT,
4652                                  end_pos >> PAGE_SHIFT);
4653 }
4654
4655 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4656                                    bool *may_need_sync)
4657 {
4658         struct file *file = iocb->ki_filp;
4659         struct inode *inode = file_inode(file);
4660         struct f2fs_inode_info *fi = F2FS_I(inode);
4661         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4662         const bool do_opu = f2fs_lfs_mode(sbi);
4663         const loff_t pos = iocb->ki_pos;
4664         const ssize_t count = iov_iter_count(from);
4665         unsigned int dio_flags;
4666         struct iomap_dio *dio;
4667         ssize_t ret;
4668
4669         trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4670
4671         if (iocb->ki_flags & IOCB_NOWAIT) {
4672                 /* f2fs_convert_inline_inode() and block allocation can block */
4673                 if (f2fs_has_inline_data(inode) ||
4674                     !f2fs_overwrite_io(inode, pos, count)) {
4675                         ret = -EAGAIN;
4676                         goto out;
4677                 }
4678
4679                 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4680                         ret = -EAGAIN;
4681                         goto out;
4682                 }
4683                 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4684                         f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4685                         ret = -EAGAIN;
4686                         goto out;
4687                 }
4688         } else {
4689                 ret = f2fs_convert_inline_inode(inode);
4690                 if (ret)
4691                         goto out;
4692
4693                 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4694                 if (do_opu)
4695                         f2fs_down_read(&fi->i_gc_rwsem[READ]);
4696         }
4697
4698         /*
4699          * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4700          * the higher-level function iomap_dio_rw() in order to ensure that the
4701          * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4702          */
4703         inc_page_count(sbi, F2FS_DIO_WRITE);
4704         dio_flags = 0;
4705         if (pos + count > inode->i_size)
4706                 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4707         dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4708                              &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4709         if (IS_ERR_OR_NULL(dio)) {
4710                 ret = PTR_ERR_OR_ZERO(dio);
4711                 if (ret == -ENOTBLK)
4712                         ret = 0;
4713                 if (ret != -EIOCBQUEUED)
4714                         dec_page_count(sbi, F2FS_DIO_WRITE);
4715         } else {
4716                 ret = iomap_dio_complete(dio);
4717         }
4718
4719         if (do_opu)
4720                 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4721         f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4722
4723         if (ret < 0)
4724                 goto out;
4725         if (pos + ret > inode->i_size)
4726                 f2fs_i_size_write(inode, pos + ret);
4727         if (!do_opu)
4728                 set_inode_flag(inode, FI_UPDATE_WRITE);
4729
4730         if (iov_iter_count(from)) {
4731                 ssize_t ret2;
4732                 loff_t bufio_start_pos = iocb->ki_pos;
4733
4734                 /*
4735                  * The direct write was partial, so we need to fall back to a
4736                  * buffered write for the remainder.
4737                  */
4738
4739                 ret2 = f2fs_buffered_write_iter(iocb, from);
4740                 if (iov_iter_count(from))
4741                         f2fs_write_failed(inode, iocb->ki_pos);
4742                 if (ret2 < 0)
4743                         goto out;
4744
4745                 /*
4746                  * Ensure that the pagecache pages are written to disk and
4747                  * invalidated to preserve the expected O_DIRECT semantics.
4748                  */
4749                 if (ret2 > 0) {
4750                         loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4751
4752                         ret += ret2;
4753
4754                         f2fs_flush_buffered_write(file->f_mapping,
4755                                                   bufio_start_pos,
4756                                                   bufio_end_pos);
4757                 }
4758         } else {
4759                 /* iomap_dio_rw() already handled the generic_write_sync(). */
4760                 *may_need_sync = false;
4761         }
4762 out:
4763         trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4764         return ret;
4765 }
4766
4767 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4768 {
4769         struct inode *inode = file_inode(iocb->ki_filp);
4770         const loff_t orig_pos = iocb->ki_pos;
4771         const size_t orig_count = iov_iter_count(from);
4772         loff_t target_size;
4773         bool dio;
4774         bool may_need_sync = true;
4775         int preallocated;
4776         ssize_t ret;
4777
4778         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4779                 ret = -EIO;
4780                 goto out;
4781         }
4782
4783         if (!f2fs_is_compress_backend_ready(inode)) {
4784                 ret = -EOPNOTSUPP;
4785                 goto out;
4786         }
4787
4788         if (iocb->ki_flags & IOCB_NOWAIT) {
4789                 if (!inode_trylock(inode)) {
4790                         ret = -EAGAIN;
4791                         goto out;
4792                 }
4793         } else {
4794                 inode_lock(inode);
4795         }
4796
4797         ret = f2fs_write_checks(iocb, from);
4798         if (ret <= 0)
4799                 goto out_unlock;
4800
4801         /* Determine whether we will do a direct write or a buffered write. */
4802         dio = f2fs_should_use_dio(inode, iocb, from);
4803
4804         /* Possibly preallocate the blocks for the write. */
4805         target_size = iocb->ki_pos + iov_iter_count(from);
4806         preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4807         if (preallocated < 0) {
4808                 ret = preallocated;
4809         } else {
4810                 if (trace_f2fs_datawrite_start_enabled())
4811                         f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4812                                                 orig_count, WRITE);
4813
4814                 /* Do the actual write. */
4815                 ret = dio ?
4816                         f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4817                         f2fs_buffered_write_iter(iocb, from);
4818
4819                 if (trace_f2fs_datawrite_end_enabled())
4820                         trace_f2fs_datawrite_end(inode, orig_pos, ret);
4821         }
4822
4823         /* Don't leave any preallocated blocks around past i_size. */
4824         if (preallocated && i_size_read(inode) < target_size) {
4825                 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4826                 filemap_invalidate_lock(inode->i_mapping);
4827                 if (!f2fs_truncate(inode))
4828                         file_dont_truncate(inode);
4829                 filemap_invalidate_unlock(inode->i_mapping);
4830                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4831         } else {
4832                 file_dont_truncate(inode);
4833         }
4834
4835         clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4836 out_unlock:
4837         inode_unlock(inode);
4838 out:
4839         trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4840
4841         if (ret > 0 && may_need_sync)
4842                 ret = generic_write_sync(iocb, ret);
4843
4844         /* If buffered IO was forced, flush and drop the data from
4845          * the page cache to preserve O_DIRECT semantics
4846          */
4847         if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
4848                 f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
4849                                           orig_pos,
4850                                           orig_pos + ret - 1);
4851
4852         return ret;
4853 }
4854
4855 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4856                 int advice)
4857 {
4858         struct address_space *mapping;
4859         struct backing_dev_info *bdi;
4860         struct inode *inode = file_inode(filp);
4861         int err;
4862
4863         if (advice == POSIX_FADV_SEQUENTIAL) {
4864                 if (S_ISFIFO(inode->i_mode))
4865                         return -ESPIPE;
4866
4867                 mapping = filp->f_mapping;
4868                 if (!mapping || len < 0)
4869                         return -EINVAL;
4870
4871                 bdi = inode_to_bdi(mapping->host);
4872                 filp->f_ra.ra_pages = bdi->ra_pages *
4873                         F2FS_I_SB(inode)->seq_file_ra_mul;
4874                 spin_lock(&filp->f_lock);
4875                 filp->f_mode &= ~FMODE_RANDOM;
4876                 spin_unlock(&filp->f_lock);
4877                 return 0;
4878         } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
4879                 /* Load extent cache at the first readahead. */
4880                 f2fs_precache_extents(inode);
4881         }
4882
4883         err = generic_fadvise(filp, offset, len, advice);
4884         if (!err && advice == POSIX_FADV_DONTNEED &&
4885                 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4886                 f2fs_compressed_file(inode))
4887                 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4888
4889         return err;
4890 }
4891
4892 #ifdef CONFIG_COMPAT
4893 struct compat_f2fs_gc_range {
4894         u32 sync;
4895         compat_u64 start;
4896         compat_u64 len;
4897 };
4898 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4899                                                 struct compat_f2fs_gc_range)
4900
4901 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4902 {
4903         struct compat_f2fs_gc_range __user *urange;
4904         struct f2fs_gc_range range;
4905         int err;
4906
4907         urange = compat_ptr(arg);
4908         err = get_user(range.sync, &urange->sync);
4909         err |= get_user(range.start, &urange->start);
4910         err |= get_user(range.len, &urange->len);
4911         if (err)
4912                 return -EFAULT;
4913
4914         return __f2fs_ioc_gc_range(file, &range);
4915 }
4916
4917 struct compat_f2fs_move_range {
4918         u32 dst_fd;
4919         compat_u64 pos_in;
4920         compat_u64 pos_out;
4921         compat_u64 len;
4922 };
4923 #define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4924                                         struct compat_f2fs_move_range)
4925
4926 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4927 {
4928         struct compat_f2fs_move_range __user *urange;
4929         struct f2fs_move_range range;
4930         int err;
4931
4932         urange = compat_ptr(arg);
4933         err = get_user(range.dst_fd, &urange->dst_fd);
4934         err |= get_user(range.pos_in, &urange->pos_in);
4935         err |= get_user(range.pos_out, &urange->pos_out);
4936         err |= get_user(range.len, &urange->len);
4937         if (err)
4938                 return -EFAULT;
4939
4940         return __f2fs_ioc_move_range(file, &range);
4941 }
4942
4943 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4944 {
4945         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4946                 return -EIO;
4947         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4948                 return -ENOSPC;
4949
4950         switch (cmd) {
4951         case FS_IOC32_GETVERSION:
4952                 cmd = FS_IOC_GETVERSION;
4953                 break;
4954         case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4955                 return f2fs_compat_ioc_gc_range(file, arg);
4956         case F2FS_IOC32_MOVE_RANGE:
4957                 return f2fs_compat_ioc_move_range(file, arg);
4958         case F2FS_IOC_START_ATOMIC_WRITE:
4959         case F2FS_IOC_START_ATOMIC_REPLACE:
4960         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4961         case F2FS_IOC_START_VOLATILE_WRITE:
4962         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4963         case F2FS_IOC_ABORT_ATOMIC_WRITE:
4964         case F2FS_IOC_SHUTDOWN:
4965         case FITRIM:
4966         case FS_IOC_SET_ENCRYPTION_POLICY:
4967         case FS_IOC_GET_ENCRYPTION_PWSALT:
4968         case FS_IOC_GET_ENCRYPTION_POLICY:
4969         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4970         case FS_IOC_ADD_ENCRYPTION_KEY:
4971         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4972         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4973         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4974         case FS_IOC_GET_ENCRYPTION_NONCE:
4975         case F2FS_IOC_GARBAGE_COLLECT:
4976         case F2FS_IOC_WRITE_CHECKPOINT:
4977         case F2FS_IOC_DEFRAGMENT:
4978         case F2FS_IOC_FLUSH_DEVICE:
4979         case F2FS_IOC_GET_FEATURES:
4980         case F2FS_IOC_GET_PIN_FILE:
4981         case F2FS_IOC_SET_PIN_FILE:
4982         case F2FS_IOC_PRECACHE_EXTENTS:
4983         case F2FS_IOC_RESIZE_FS:
4984         case FS_IOC_ENABLE_VERITY:
4985         case FS_IOC_MEASURE_VERITY:
4986         case FS_IOC_READ_VERITY_METADATA:
4987         case FS_IOC_GETFSLABEL:
4988         case FS_IOC_SETFSLABEL:
4989         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4990         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4991         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4992         case F2FS_IOC_SEC_TRIM_FILE:
4993         case F2FS_IOC_GET_COMPRESS_OPTION:
4994         case F2FS_IOC_SET_COMPRESS_OPTION:
4995         case F2FS_IOC_DECOMPRESS_FILE:
4996         case F2FS_IOC_COMPRESS_FILE:
4997                 break;
4998         default:
4999                 return -ENOIOCTLCMD;
5000         }
5001         return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5002 }
5003 #endif
5004
5005 const struct file_operations f2fs_file_operations = {
5006         .llseek         = f2fs_llseek,
5007         .read_iter      = f2fs_file_read_iter,
5008         .write_iter     = f2fs_file_write_iter,
5009         .iopoll         = iocb_bio_iopoll,
5010         .open           = f2fs_file_open,
5011         .release        = f2fs_release_file,
5012         .mmap           = f2fs_file_mmap,
5013         .flush          = f2fs_file_flush,
5014         .fsync          = f2fs_sync_file,
5015         .fallocate      = f2fs_fallocate,
5016         .unlocked_ioctl = f2fs_ioctl,
5017 #ifdef CONFIG_COMPAT
5018         .compat_ioctl   = f2fs_compat_ioctl,
5019 #endif
5020         .splice_read    = f2fs_file_splice_read,
5021         .splice_write   = iter_file_splice_write,
5022         .fadvise        = f2fs_file_fadvise,
5023 };