GNU Linux-libre 5.15.137-gnu
[releases.git] / fs / f2fs / data.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/data.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
22 #include <linux/sched/signal.h>
23 #include <linux/fiemap.h>
24
25 #include "f2fs.h"
26 #include "node.h"
27 #include "segment.h"
28 #include "iostat.h"
29 #include <trace/events/f2fs.h>
30
31 #define NUM_PREALLOC_POST_READ_CTXS     128
32
33 static struct kmem_cache *bio_post_read_ctx_cache;
34 static struct kmem_cache *bio_entry_slab;
35 static mempool_t *bio_post_read_ctx_pool;
36 static struct bio_set f2fs_bioset;
37
38 #define F2FS_BIO_POOL_SIZE      NR_CURSEG_TYPE
39
40 int __init f2fs_init_bioset(void)
41 {
42         if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43                                         0, BIOSET_NEED_BVECS))
44                 return -ENOMEM;
45         return 0;
46 }
47
48 void f2fs_destroy_bioset(void)
49 {
50         bioset_exit(&f2fs_bioset);
51 }
52
53 static bool __is_cp_guaranteed(struct page *page)
54 {
55         struct address_space *mapping = page->mapping;
56         struct inode *inode;
57         struct f2fs_sb_info *sbi;
58
59         if (!mapping)
60                 return false;
61
62         inode = mapping->host;
63         sbi = F2FS_I_SB(inode);
64
65         if (inode->i_ino == F2FS_META_INO(sbi) ||
66                         inode->i_ino == F2FS_NODE_INO(sbi) ||
67                         S_ISDIR(inode->i_mode))
68                 return true;
69
70         if (f2fs_is_compressed_page(page))
71                 return false;
72         if ((S_ISREG(inode->i_mode) &&
73                         (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
74                         page_private_gcing(page))
75                 return true;
76         return false;
77 }
78
79 static enum count_type __read_io_type(struct page *page)
80 {
81         struct address_space *mapping = page_file_mapping(page);
82
83         if (mapping) {
84                 struct inode *inode = mapping->host;
85                 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86
87                 if (inode->i_ino == F2FS_META_INO(sbi))
88                         return F2FS_RD_META;
89
90                 if (inode->i_ino == F2FS_NODE_INO(sbi))
91                         return F2FS_RD_NODE;
92         }
93         return F2FS_RD_DATA;
94 }
95
96 /* postprocessing steps for read bios */
97 enum bio_post_read_step {
98 #ifdef CONFIG_FS_ENCRYPTION
99         STEP_DECRYPT    = 1 << 0,
100 #else
101         STEP_DECRYPT    = 0,    /* compile out the decryption-related code */
102 #endif
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104         STEP_DECOMPRESS = 1 << 1,
105 #else
106         STEP_DECOMPRESS = 0,    /* compile out the decompression-related code */
107 #endif
108 #ifdef CONFIG_FS_VERITY
109         STEP_VERITY     = 1 << 2,
110 #else
111         STEP_VERITY     = 0,    /* compile out the verity-related code */
112 #endif
113 };
114
115 struct bio_post_read_ctx {
116         struct bio *bio;
117         struct f2fs_sb_info *sbi;
118         struct work_struct work;
119         unsigned int enabled_steps;
120         block_t fs_blkaddr;
121 };
122
123 static void f2fs_finish_read_bio(struct bio *bio)
124 {
125         struct bio_vec *bv;
126         struct bvec_iter_all iter_all;
127
128         /*
129          * Update and unlock the bio's pagecache pages, and put the
130          * decompression context for any compressed pages.
131          */
132         bio_for_each_segment_all(bv, bio, iter_all) {
133                 struct page *page = bv->bv_page;
134
135                 if (f2fs_is_compressed_page(page)) {
136                         if (bio->bi_status)
137                                 f2fs_end_read_compressed_page(page, true, 0);
138                         f2fs_put_page_dic(page);
139                         continue;
140                 }
141
142                 /* PG_error was set if decryption or verity failed. */
143                 if (bio->bi_status || PageError(page)) {
144                         ClearPageUptodate(page);
145                         /* will re-read again later */
146                         ClearPageError(page);
147                 } else {
148                         SetPageUptodate(page);
149                 }
150                 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
151                 unlock_page(page);
152         }
153
154         if (bio->bi_private)
155                 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
156         bio_put(bio);
157 }
158
159 static void f2fs_verify_bio(struct work_struct *work)
160 {
161         struct bio_post_read_ctx *ctx =
162                 container_of(work, struct bio_post_read_ctx, work);
163         struct bio *bio = ctx->bio;
164         bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
165
166         /*
167          * fsverity_verify_bio() may call readpages() again, and while verity
168          * will be disabled for this, decryption and/or decompression may still
169          * be needed, resulting in another bio_post_read_ctx being allocated.
170          * So to prevent deadlocks we need to release the current ctx to the
171          * mempool first.  This assumes that verity is the last post-read step.
172          */
173         mempool_free(ctx, bio_post_read_ctx_pool);
174         bio->bi_private = NULL;
175
176         /*
177          * Verify the bio's pages with fs-verity.  Exclude compressed pages,
178          * as those were handled separately by f2fs_end_read_compressed_page().
179          */
180         if (may_have_compressed_pages) {
181                 struct bio_vec *bv;
182                 struct bvec_iter_all iter_all;
183
184                 bio_for_each_segment_all(bv, bio, iter_all) {
185                         struct page *page = bv->bv_page;
186
187                         if (!f2fs_is_compressed_page(page) &&
188                             !PageError(page) && !fsverity_verify_page(page))
189                                 SetPageError(page);
190                 }
191         } else {
192                 fsverity_verify_bio(bio);
193         }
194
195         f2fs_finish_read_bio(bio);
196 }
197
198 /*
199  * If the bio's data needs to be verified with fs-verity, then enqueue the
200  * verity work for the bio.  Otherwise finish the bio now.
201  *
202  * Note that to avoid deadlocks, the verity work can't be done on the
203  * decryption/decompression workqueue.  This is because verifying the data pages
204  * can involve reading verity metadata pages from the file, and these verity
205  * metadata pages may be encrypted and/or compressed.
206  */
207 static void f2fs_verify_and_finish_bio(struct bio *bio)
208 {
209         struct bio_post_read_ctx *ctx = bio->bi_private;
210
211         if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
212                 INIT_WORK(&ctx->work, f2fs_verify_bio);
213                 fsverity_enqueue_verify_work(&ctx->work);
214         } else {
215                 f2fs_finish_read_bio(bio);
216         }
217 }
218
219 /*
220  * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
221  * remaining page was read by @ctx->bio.
222  *
223  * Note that a bio may span clusters (even a mix of compressed and uncompressed
224  * clusters) or be for just part of a cluster.  STEP_DECOMPRESS just indicates
225  * that the bio includes at least one compressed page.  The actual decompression
226  * is done on a per-cluster basis, not a per-bio basis.
227  */
228 static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
229 {
230         struct bio_vec *bv;
231         struct bvec_iter_all iter_all;
232         bool all_compressed = true;
233         block_t blkaddr = ctx->fs_blkaddr;
234
235         bio_for_each_segment_all(bv, ctx->bio, iter_all) {
236                 struct page *page = bv->bv_page;
237
238                 /* PG_error was set if decryption failed. */
239                 if (f2fs_is_compressed_page(page))
240                         f2fs_end_read_compressed_page(page, PageError(page),
241                                                 blkaddr);
242                 else
243                         all_compressed = false;
244
245                 blkaddr++;
246         }
247
248         /*
249          * Optimization: if all the bio's pages are compressed, then scheduling
250          * the per-bio verity work is unnecessary, as verity will be fully
251          * handled at the compression cluster level.
252          */
253         if (all_compressed)
254                 ctx->enabled_steps &= ~STEP_VERITY;
255 }
256
257 static void f2fs_post_read_work(struct work_struct *work)
258 {
259         struct bio_post_read_ctx *ctx =
260                 container_of(work, struct bio_post_read_ctx, work);
261
262         if (ctx->enabled_steps & STEP_DECRYPT)
263                 fscrypt_decrypt_bio(ctx->bio);
264
265         if (ctx->enabled_steps & STEP_DECOMPRESS)
266                 f2fs_handle_step_decompress(ctx);
267
268         f2fs_verify_and_finish_bio(ctx->bio);
269 }
270
271 static void f2fs_read_end_io(struct bio *bio)
272 {
273         struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
274         struct bio_post_read_ctx *ctx;
275
276         iostat_update_and_unbind_ctx(bio, 0);
277         ctx = bio->bi_private;
278
279         if (time_to_inject(sbi, FAULT_READ_IO)) {
280                 f2fs_show_injection_info(sbi, FAULT_READ_IO);
281                 bio->bi_status = BLK_STS_IOERR;
282         }
283
284         if (bio->bi_status) {
285                 f2fs_finish_read_bio(bio);
286                 return;
287         }
288
289         if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
290                 INIT_WORK(&ctx->work, f2fs_post_read_work);
291                 queue_work(ctx->sbi->post_read_wq, &ctx->work);
292         } else {
293                 f2fs_verify_and_finish_bio(bio);
294         }
295 }
296
297 static void f2fs_write_end_io(struct bio *bio)
298 {
299         struct f2fs_sb_info *sbi;
300         struct bio_vec *bvec;
301         struct bvec_iter_all iter_all;
302
303         iostat_update_and_unbind_ctx(bio, 1);
304         sbi = bio->bi_private;
305
306         if (time_to_inject(sbi, FAULT_WRITE_IO)) {
307                 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
308                 bio->bi_status = BLK_STS_IOERR;
309         }
310
311         bio_for_each_segment_all(bvec, bio, iter_all) {
312                 struct page *page = bvec->bv_page;
313                 enum count_type type = WB_DATA_TYPE(page);
314
315                 if (page_private_dummy(page)) {
316                         clear_page_private_dummy(page);
317                         unlock_page(page);
318                         mempool_free(page, sbi->write_io_dummy);
319
320                         if (unlikely(bio->bi_status))
321                                 f2fs_stop_checkpoint(sbi, true);
322                         continue;
323                 }
324
325                 fscrypt_finalize_bounce_page(&page);
326
327 #ifdef CONFIG_F2FS_FS_COMPRESSION
328                 if (f2fs_is_compressed_page(page)) {
329                         f2fs_compress_write_end_io(bio, page);
330                         continue;
331                 }
332 #endif
333
334                 if (unlikely(bio->bi_status)) {
335                         mapping_set_error(page->mapping, -EIO);
336                         if (type == F2FS_WB_CP_DATA)
337                                 f2fs_stop_checkpoint(sbi, true);
338                 }
339
340                 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
341                                         page->index != nid_of_node(page));
342
343                 dec_page_count(sbi, type);
344                 if (f2fs_in_warm_node_list(sbi, page))
345                         f2fs_del_fsync_node_entry(sbi, page);
346                 clear_page_private_gcing(page);
347                 end_page_writeback(page);
348         }
349         if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
350                                 wq_has_sleeper(&sbi->cp_wait))
351                 wake_up(&sbi->cp_wait);
352
353         bio_put(bio);
354 }
355
356 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
357                                 block_t blk_addr, struct bio *bio)
358 {
359         struct block_device *bdev = sbi->sb->s_bdev;
360         int i;
361
362         if (f2fs_is_multi_device(sbi)) {
363                 for (i = 0; i < sbi->s_ndevs; i++) {
364                         if (FDEV(i).start_blk <= blk_addr &&
365                             FDEV(i).end_blk >= blk_addr) {
366                                 blk_addr -= FDEV(i).start_blk;
367                                 bdev = FDEV(i).bdev;
368                                 break;
369                         }
370                 }
371         }
372         if (bio) {
373                 bio_set_dev(bio, bdev);
374                 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
375         }
376         return bdev;
377 }
378
379 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
380 {
381         int i;
382
383         if (!f2fs_is_multi_device(sbi))
384                 return 0;
385
386         for (i = 0; i < sbi->s_ndevs; i++)
387                 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
388                         return i;
389         return 0;
390 }
391
392 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
393 {
394         struct f2fs_sb_info *sbi = fio->sbi;
395         struct bio *bio;
396
397         bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
398
399         f2fs_target_device(sbi, fio->new_blkaddr, bio);
400         if (is_read_io(fio->op)) {
401                 bio->bi_end_io = f2fs_read_end_io;
402                 bio->bi_private = NULL;
403         } else {
404                 bio->bi_end_io = f2fs_write_end_io;
405                 bio->bi_private = sbi;
406                 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
407                                                 fio->type, fio->temp);
408         }
409         iostat_alloc_and_bind_ctx(sbi, bio, NULL);
410
411         if (fio->io_wbc)
412                 wbc_init_bio(fio->io_wbc, bio);
413
414         return bio;
415 }
416
417 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
418                                   pgoff_t first_idx,
419                                   const struct f2fs_io_info *fio,
420                                   gfp_t gfp_mask)
421 {
422         /*
423          * The f2fs garbage collector sets ->encrypted_page when it wants to
424          * read/write raw data without encryption.
425          */
426         if (!fio || !fio->encrypted_page)
427                 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
428 }
429
430 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
431                                      pgoff_t next_idx,
432                                      const struct f2fs_io_info *fio)
433 {
434         /*
435          * The f2fs garbage collector sets ->encrypted_page when it wants to
436          * read/write raw data without encryption.
437          */
438         if (fio && fio->encrypted_page)
439                 return !bio_has_crypt_ctx(bio);
440
441         return fscrypt_mergeable_bio(bio, inode, next_idx);
442 }
443
444 static inline void __submit_bio(struct f2fs_sb_info *sbi,
445                                 struct bio *bio, enum page_type type)
446 {
447         if (!is_read_io(bio_op(bio))) {
448                 unsigned int start;
449
450                 if (type != DATA && type != NODE)
451                         goto submit_io;
452
453                 if (f2fs_lfs_mode(sbi) && current->plug)
454                         blk_finish_plug(current->plug);
455
456                 if (!F2FS_IO_ALIGNED(sbi))
457                         goto submit_io;
458
459                 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
460                 start %= F2FS_IO_SIZE(sbi);
461
462                 if (start == 0)
463                         goto submit_io;
464
465                 /* fill dummy pages */
466                 for (; start < F2FS_IO_SIZE(sbi); start++) {
467                         struct page *page =
468                                 mempool_alloc(sbi->write_io_dummy,
469                                               GFP_NOIO | __GFP_NOFAIL);
470                         f2fs_bug_on(sbi, !page);
471
472                         lock_page(page);
473
474                         zero_user_segment(page, 0, PAGE_SIZE);
475                         set_page_private_dummy(page);
476
477                         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
478                                 f2fs_bug_on(sbi, 1);
479                 }
480                 /*
481                  * In the NODE case, we lose next block address chain. So, we
482                  * need to do checkpoint in f2fs_sync_file.
483                  */
484                 if (type == NODE)
485                         set_sbi_flag(sbi, SBI_NEED_CP);
486         }
487 submit_io:
488         if (is_read_io(bio_op(bio)))
489                 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
490         else
491                 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
492
493         iostat_update_submit_ctx(bio, type);
494         submit_bio(bio);
495 }
496
497 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
498                                 struct bio *bio, enum page_type type)
499 {
500         __submit_bio(sbi, bio, type);
501 }
502
503 static void __attach_io_flag(struct f2fs_io_info *fio)
504 {
505         struct f2fs_sb_info *sbi = fio->sbi;
506         unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
507         unsigned int io_flag, fua_flag, meta_flag;
508
509         if (fio->type == DATA)
510                 io_flag = sbi->data_io_flag;
511         else if (fio->type == NODE)
512                 io_flag = sbi->node_io_flag;
513         else
514                 return;
515
516         fua_flag = io_flag & temp_mask;
517         meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
518
519         /*
520          * data/node io flag bits per temp:
521          *      REQ_META     |      REQ_FUA      |
522          *    5 |    4 |   3 |    2 |    1 |   0 |
523          * Cold | Warm | Hot | Cold | Warm | Hot |
524          */
525         if ((1 << fio->temp) & meta_flag)
526                 fio->op_flags |= REQ_META;
527         if ((1 << fio->temp) & fua_flag)
528                 fio->op_flags |= REQ_FUA;
529 }
530
531 static void __submit_merged_bio(struct f2fs_bio_info *io)
532 {
533         struct f2fs_io_info *fio = &io->fio;
534
535         if (!io->bio)
536                 return;
537
538         __attach_io_flag(fio);
539         bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
540
541         if (is_read_io(fio->op))
542                 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
543         else
544                 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
545
546         __submit_bio(io->sbi, io->bio, fio->type);
547         io->bio = NULL;
548 }
549
550 static bool __has_merged_page(struct bio *bio, struct inode *inode,
551                                                 struct page *page, nid_t ino)
552 {
553         struct bio_vec *bvec;
554         struct bvec_iter_all iter_all;
555
556         if (!bio)
557                 return false;
558
559         if (!inode && !page && !ino)
560                 return true;
561
562         bio_for_each_segment_all(bvec, bio, iter_all) {
563                 struct page *target = bvec->bv_page;
564
565                 if (fscrypt_is_bounce_page(target)) {
566                         target = fscrypt_pagecache_page(target);
567                         if (IS_ERR(target))
568                                 continue;
569                 }
570                 if (f2fs_is_compressed_page(target)) {
571                         target = f2fs_compress_control_page(target);
572                         if (IS_ERR(target))
573                                 continue;
574                 }
575
576                 if (inode && inode == target->mapping->host)
577                         return true;
578                 if (page && page == target)
579                         return true;
580                 if (ino && ino == ino_of_node(target))
581                         return true;
582         }
583
584         return false;
585 }
586
587 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
588                                 enum page_type type, enum temp_type temp)
589 {
590         enum page_type btype = PAGE_TYPE_OF_BIO(type);
591         struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
592
593         down_write(&io->io_rwsem);
594
595         /* change META to META_FLUSH in the checkpoint procedure */
596         if (type >= META_FLUSH) {
597                 io->fio.type = META_FLUSH;
598                 io->fio.op = REQ_OP_WRITE;
599                 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
600                 if (!test_opt(sbi, NOBARRIER))
601                         io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
602         }
603         __submit_merged_bio(io);
604         up_write(&io->io_rwsem);
605 }
606
607 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
608                                 struct inode *inode, struct page *page,
609                                 nid_t ino, enum page_type type, bool force)
610 {
611         enum temp_type temp;
612         bool ret = true;
613
614         for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
615                 if (!force)     {
616                         enum page_type btype = PAGE_TYPE_OF_BIO(type);
617                         struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
618
619                         down_read(&io->io_rwsem);
620                         ret = __has_merged_page(io->bio, inode, page, ino);
621                         up_read(&io->io_rwsem);
622                 }
623                 if (ret)
624                         __f2fs_submit_merged_write(sbi, type, temp);
625
626                 /* TODO: use HOT temp only for meta pages now. */
627                 if (type >= META)
628                         break;
629         }
630 }
631
632 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
633 {
634         __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
635 }
636
637 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
638                                 struct inode *inode, struct page *page,
639                                 nid_t ino, enum page_type type)
640 {
641         __submit_merged_write_cond(sbi, inode, page, ino, type, false);
642 }
643
644 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
645 {
646         f2fs_submit_merged_write(sbi, DATA);
647         f2fs_submit_merged_write(sbi, NODE);
648         f2fs_submit_merged_write(sbi, META);
649 }
650
651 /*
652  * Fill the locked page with data located in the block address.
653  * A caller needs to unlock the page on failure.
654  */
655 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
656 {
657         struct bio *bio;
658         struct page *page = fio->encrypted_page ?
659                         fio->encrypted_page : fio->page;
660
661         if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
662                         fio->is_por ? META_POR : (__is_meta_io(fio) ?
663                         META_GENERIC : DATA_GENERIC_ENHANCE)))
664                 return -EFSCORRUPTED;
665
666         trace_f2fs_submit_page_bio(page, fio);
667
668         /* Allocate a new bio */
669         bio = __bio_alloc(fio, 1);
670
671         f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
672                                fio->page->index, fio, GFP_NOIO);
673
674         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
675                 bio_put(bio);
676                 return -EFAULT;
677         }
678
679         if (fio->io_wbc && !is_read_io(fio->op))
680                 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
681
682         __attach_io_flag(fio);
683         bio_set_op_attrs(bio, fio->op, fio->op_flags);
684
685         inc_page_count(fio->sbi, is_read_io(fio->op) ?
686                         __read_io_type(page): WB_DATA_TYPE(fio->page));
687
688         __submit_bio(fio->sbi, bio, fio->type);
689         return 0;
690 }
691
692 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
693                                 block_t last_blkaddr, block_t cur_blkaddr)
694 {
695         if (unlikely(sbi->max_io_bytes &&
696                         bio->bi_iter.bi_size >= sbi->max_io_bytes))
697                 return false;
698         if (last_blkaddr + 1 != cur_blkaddr)
699                 return false;
700         return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
701 }
702
703 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
704                                                 struct f2fs_io_info *fio)
705 {
706         if (io->fio.op != fio->op)
707                 return false;
708         return io->fio.op_flags == fio->op_flags;
709 }
710
711 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
712                                         struct f2fs_bio_info *io,
713                                         struct f2fs_io_info *fio,
714                                         block_t last_blkaddr,
715                                         block_t cur_blkaddr)
716 {
717         if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
718                 unsigned int filled_blocks =
719                                 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
720                 unsigned int io_size = F2FS_IO_SIZE(sbi);
721                 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
722
723                 /* IOs in bio is aligned and left space of vectors is not enough */
724                 if (!(filled_blocks % io_size) && left_vecs < io_size)
725                         return false;
726         }
727         if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
728                 return false;
729         return io_type_is_mergeable(io, fio);
730 }
731
732 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
733                                 struct page *page, enum temp_type temp)
734 {
735         struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
736         struct bio_entry *be;
737
738         be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
739         be->bio = bio;
740         bio_get(bio);
741
742         if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
743                 f2fs_bug_on(sbi, 1);
744
745         down_write(&io->bio_list_lock);
746         list_add_tail(&be->list, &io->bio_list);
747         up_write(&io->bio_list_lock);
748 }
749
750 static void del_bio_entry(struct bio_entry *be)
751 {
752         list_del(&be->list);
753         kmem_cache_free(bio_entry_slab, be);
754 }
755
756 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
757                                                         struct page *page)
758 {
759         struct f2fs_sb_info *sbi = fio->sbi;
760         enum temp_type temp;
761         bool found = false;
762         int ret = -EAGAIN;
763
764         for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
765                 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
766                 struct list_head *head = &io->bio_list;
767                 struct bio_entry *be;
768
769                 down_write(&io->bio_list_lock);
770                 list_for_each_entry(be, head, list) {
771                         if (be->bio != *bio)
772                                 continue;
773
774                         found = true;
775
776                         f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
777                                                             *fio->last_block,
778                                                             fio->new_blkaddr));
779                         if (f2fs_crypt_mergeable_bio(*bio,
780                                         fio->page->mapping->host,
781                                         fio->page->index, fio) &&
782                             bio_add_page(*bio, page, PAGE_SIZE, 0) ==
783                                         PAGE_SIZE) {
784                                 ret = 0;
785                                 break;
786                         }
787
788                         /* page can't be merged into bio; submit the bio */
789                         del_bio_entry(be);
790                         __submit_bio(sbi, *bio, DATA);
791                         break;
792                 }
793                 up_write(&io->bio_list_lock);
794         }
795
796         if (ret) {
797                 bio_put(*bio);
798                 *bio = NULL;
799         }
800
801         return ret;
802 }
803
804 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
805                                         struct bio **bio, struct page *page)
806 {
807         enum temp_type temp;
808         bool found = false;
809         struct bio *target = bio ? *bio : NULL;
810
811         f2fs_bug_on(sbi, !target && !page);
812
813         for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
814                 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
815                 struct list_head *head = &io->bio_list;
816                 struct bio_entry *be;
817
818                 if (list_empty(head))
819                         continue;
820
821                 down_read(&io->bio_list_lock);
822                 list_for_each_entry(be, head, list) {
823                         if (target)
824                                 found = (target == be->bio);
825                         else
826                                 found = __has_merged_page(be->bio, NULL,
827                                                                 page, 0);
828                         if (found)
829                                 break;
830                 }
831                 up_read(&io->bio_list_lock);
832
833                 if (!found)
834                         continue;
835
836                 found = false;
837
838                 down_write(&io->bio_list_lock);
839                 list_for_each_entry(be, head, list) {
840                         if (target)
841                                 found = (target == be->bio);
842                         else
843                                 found = __has_merged_page(be->bio, NULL,
844                                                                 page, 0);
845                         if (found) {
846                                 target = be->bio;
847                                 del_bio_entry(be);
848                                 break;
849                         }
850                 }
851                 up_write(&io->bio_list_lock);
852         }
853
854         if (found)
855                 __submit_bio(sbi, target, DATA);
856         if (bio && *bio) {
857                 bio_put(*bio);
858                 *bio = NULL;
859         }
860 }
861
862 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
863 {
864         struct bio *bio = *fio->bio;
865         struct page *page = fio->encrypted_page ?
866                         fio->encrypted_page : fio->page;
867
868         if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
869                         __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
870                 return -EFSCORRUPTED;
871
872         trace_f2fs_submit_page_bio(page, fio);
873
874         if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
875                                                 fio->new_blkaddr))
876                 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
877 alloc_new:
878         if (!bio) {
879                 bio = __bio_alloc(fio, BIO_MAX_VECS);
880                 __attach_io_flag(fio);
881                 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
882                                        fio->page->index, fio, GFP_NOIO);
883                 bio_set_op_attrs(bio, fio->op, fio->op_flags);
884
885                 add_bio_entry(fio->sbi, bio, page, fio->temp);
886         } else {
887                 if (add_ipu_page(fio, &bio, page))
888                         goto alloc_new;
889         }
890
891         if (fio->io_wbc)
892                 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
893
894         inc_page_count(fio->sbi, WB_DATA_TYPE(page));
895
896         *fio->last_block = fio->new_blkaddr;
897         *fio->bio = bio;
898
899         return 0;
900 }
901
902 void f2fs_submit_page_write(struct f2fs_io_info *fio)
903 {
904         struct f2fs_sb_info *sbi = fio->sbi;
905         enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
906         struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
907         struct page *bio_page;
908
909         f2fs_bug_on(sbi, is_read_io(fio->op));
910
911         down_write(&io->io_rwsem);
912 next:
913         if (fio->in_list) {
914                 spin_lock(&io->io_lock);
915                 if (list_empty(&io->io_list)) {
916                         spin_unlock(&io->io_lock);
917                         goto out;
918                 }
919                 fio = list_first_entry(&io->io_list,
920                                                 struct f2fs_io_info, list);
921                 list_del(&fio->list);
922                 spin_unlock(&io->io_lock);
923         }
924
925         verify_fio_blkaddr(fio);
926
927         if (fio->encrypted_page)
928                 bio_page = fio->encrypted_page;
929         else if (fio->compressed_page)
930                 bio_page = fio->compressed_page;
931         else
932                 bio_page = fio->page;
933
934         /* set submitted = true as a return value */
935         fio->submitted = true;
936
937         inc_page_count(sbi, WB_DATA_TYPE(bio_page));
938
939         if (io->bio &&
940             (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
941                               fio->new_blkaddr) ||
942              !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
943                                        bio_page->index, fio)))
944                 __submit_merged_bio(io);
945 alloc_new:
946         if (io->bio == NULL) {
947                 if (F2FS_IO_ALIGNED(sbi) &&
948                                 (fio->type == DATA || fio->type == NODE) &&
949                                 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
950                         dec_page_count(sbi, WB_DATA_TYPE(bio_page));
951                         fio->retry = true;
952                         goto skip;
953                 }
954                 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
955                 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
956                                        bio_page->index, fio, GFP_NOIO);
957                 io->fio = *fio;
958         }
959
960         if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
961                 __submit_merged_bio(io);
962                 goto alloc_new;
963         }
964
965         if (fio->io_wbc)
966                 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
967
968         io->last_block_in_bio = fio->new_blkaddr;
969
970         trace_f2fs_submit_page_write(fio->page, fio);
971 skip:
972         if (fio->in_list)
973                 goto next;
974 out:
975         if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
976                                 !f2fs_is_checkpoint_ready(sbi))
977                 __submit_merged_bio(io);
978         up_write(&io->io_rwsem);
979 }
980
981 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
982                                       unsigned nr_pages, unsigned op_flag,
983                                       pgoff_t first_idx, bool for_write)
984 {
985         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
986         struct bio *bio;
987         struct bio_post_read_ctx *ctx = NULL;
988         unsigned int post_read_steps = 0;
989
990         bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
991                                bio_max_segs(nr_pages), &f2fs_bioset);
992         if (!bio)
993                 return ERR_PTR(-ENOMEM);
994
995         f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
996
997         f2fs_target_device(sbi, blkaddr, bio);
998         bio->bi_end_io = f2fs_read_end_io;
999         bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
1000
1001         if (fscrypt_inode_uses_fs_layer_crypto(inode))
1002                 post_read_steps |= STEP_DECRYPT;
1003
1004         if (f2fs_need_verity(inode, first_idx))
1005                 post_read_steps |= STEP_VERITY;
1006
1007         /*
1008          * STEP_DECOMPRESS is handled specially, since a compressed file might
1009          * contain both compressed and uncompressed clusters.  We'll allocate a
1010          * bio_post_read_ctx if the file is compressed, but the caller is
1011          * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1012          */
1013
1014         if (post_read_steps || f2fs_compressed_file(inode)) {
1015                 /* Due to the mempool, this never fails. */
1016                 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1017                 ctx->bio = bio;
1018                 ctx->sbi = sbi;
1019                 ctx->enabled_steps = post_read_steps;
1020                 ctx->fs_blkaddr = blkaddr;
1021                 bio->bi_private = ctx;
1022         }
1023         iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1024
1025         return bio;
1026 }
1027
1028 /* This can handle encryption stuffs */
1029 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1030                                  block_t blkaddr, int op_flags, bool for_write)
1031 {
1032         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1033         struct bio *bio;
1034
1035         bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1036                                         page->index, for_write);
1037         if (IS_ERR(bio))
1038                 return PTR_ERR(bio);
1039
1040         /* wait for GCed page writeback via META_MAPPING */
1041         f2fs_wait_on_block_writeback(inode, blkaddr);
1042
1043         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1044                 bio_put(bio);
1045                 return -EFAULT;
1046         }
1047         ClearPageError(page);
1048         inc_page_count(sbi, F2FS_RD_DATA);
1049         f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1050         __submit_bio(sbi, bio, DATA);
1051         return 0;
1052 }
1053
1054 static void __set_data_blkaddr(struct dnode_of_data *dn)
1055 {
1056         struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1057         __le32 *addr_array;
1058         int base = 0;
1059
1060         if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1061                 base = get_extra_isize(dn->inode);
1062
1063         /* Get physical address of data block */
1064         addr_array = blkaddr_in_node(rn);
1065         addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1066 }
1067
1068 /*
1069  * Lock ordering for the change of data block address:
1070  * ->data_page
1071  *  ->node_page
1072  *    update block addresses in the node page
1073  */
1074 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1075 {
1076         f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1077         __set_data_blkaddr(dn);
1078         if (set_page_dirty(dn->node_page))
1079                 dn->node_changed = true;
1080 }
1081
1082 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1083 {
1084         dn->data_blkaddr = blkaddr;
1085         f2fs_set_data_blkaddr(dn);
1086         f2fs_update_extent_cache(dn);
1087 }
1088
1089 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1090 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1091 {
1092         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1093         int err;
1094
1095         if (!count)
1096                 return 0;
1097
1098         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1099                 return -EPERM;
1100         if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1101                 return err;
1102
1103         trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1104                                                 dn->ofs_in_node, count);
1105
1106         f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1107
1108         for (; count > 0; dn->ofs_in_node++) {
1109                 block_t blkaddr = f2fs_data_blkaddr(dn);
1110
1111                 if (blkaddr == NULL_ADDR) {
1112                         dn->data_blkaddr = NEW_ADDR;
1113                         __set_data_blkaddr(dn);
1114                         count--;
1115                 }
1116         }
1117
1118         if (set_page_dirty(dn->node_page))
1119                 dn->node_changed = true;
1120         return 0;
1121 }
1122
1123 /* Should keep dn->ofs_in_node unchanged */
1124 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1125 {
1126         unsigned int ofs_in_node = dn->ofs_in_node;
1127         int ret;
1128
1129         ret = f2fs_reserve_new_blocks(dn, 1);
1130         dn->ofs_in_node = ofs_in_node;
1131         return ret;
1132 }
1133
1134 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1135 {
1136         bool need_put = dn->inode_page ? false : true;
1137         int err;
1138
1139         err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1140         if (err)
1141                 return err;
1142
1143         if (dn->data_blkaddr == NULL_ADDR)
1144                 err = f2fs_reserve_new_block(dn);
1145         if (err || need_put)
1146                 f2fs_put_dnode(dn);
1147         return err;
1148 }
1149
1150 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1151 {
1152         struct extent_info ei = {0, };
1153         struct inode *inode = dn->inode;
1154
1155         if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1156                 dn->data_blkaddr = ei.blk + index - ei.fofs;
1157                 return 0;
1158         }
1159
1160         return f2fs_reserve_block(dn, index);
1161 }
1162
1163 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1164                                                 int op_flags, bool for_write)
1165 {
1166         struct address_space *mapping = inode->i_mapping;
1167         struct dnode_of_data dn;
1168         struct page *page;
1169         struct extent_info ei = {0, };
1170         int err;
1171
1172         page = f2fs_grab_cache_page(mapping, index, for_write);
1173         if (!page)
1174                 return ERR_PTR(-ENOMEM);
1175
1176         if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1177                 dn.data_blkaddr = ei.blk + index - ei.fofs;
1178                 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1179                                                 DATA_GENERIC_ENHANCE_READ)) {
1180                         err = -EFSCORRUPTED;
1181                         goto put_err;
1182                 }
1183                 goto got_it;
1184         }
1185
1186         set_new_dnode(&dn, inode, NULL, NULL, 0);
1187         err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1188         if (err)
1189                 goto put_err;
1190         f2fs_put_dnode(&dn);
1191
1192         if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1193                 err = -ENOENT;
1194                 goto put_err;
1195         }
1196         if (dn.data_blkaddr != NEW_ADDR &&
1197                         !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1198                                                 dn.data_blkaddr,
1199                                                 DATA_GENERIC_ENHANCE)) {
1200                 err = -EFSCORRUPTED;
1201                 goto put_err;
1202         }
1203 got_it:
1204         if (PageUptodate(page)) {
1205                 unlock_page(page);
1206                 return page;
1207         }
1208
1209         /*
1210          * A new dentry page is allocated but not able to be written, since its
1211          * new inode page couldn't be allocated due to -ENOSPC.
1212          * In such the case, its blkaddr can be remained as NEW_ADDR.
1213          * see, f2fs_add_link -> f2fs_get_new_data_page ->
1214          * f2fs_init_inode_metadata.
1215          */
1216         if (dn.data_blkaddr == NEW_ADDR) {
1217                 zero_user_segment(page, 0, PAGE_SIZE);
1218                 if (!PageUptodate(page))
1219                         SetPageUptodate(page);
1220                 unlock_page(page);
1221                 return page;
1222         }
1223
1224         err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1225                                                 op_flags, for_write);
1226         if (err)
1227                 goto put_err;
1228         return page;
1229
1230 put_err:
1231         f2fs_put_page(page, 1);
1232         return ERR_PTR(err);
1233 }
1234
1235 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1236 {
1237         struct address_space *mapping = inode->i_mapping;
1238         struct page *page;
1239
1240         page = find_get_page(mapping, index);
1241         if (page && PageUptodate(page))
1242                 return page;
1243         f2fs_put_page(page, 0);
1244
1245         page = f2fs_get_read_data_page(inode, index, 0, false);
1246         if (IS_ERR(page))
1247                 return page;
1248
1249         if (PageUptodate(page))
1250                 return page;
1251
1252         wait_on_page_locked(page);
1253         if (unlikely(!PageUptodate(page))) {
1254                 f2fs_put_page(page, 0);
1255                 return ERR_PTR(-EIO);
1256         }
1257         return page;
1258 }
1259
1260 /*
1261  * If it tries to access a hole, return an error.
1262  * Because, the callers, functions in dir.c and GC, should be able to know
1263  * whether this page exists or not.
1264  */
1265 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1266                                                         bool for_write)
1267 {
1268         struct address_space *mapping = inode->i_mapping;
1269         struct page *page;
1270 repeat:
1271         page = f2fs_get_read_data_page(inode, index, 0, for_write);
1272         if (IS_ERR(page))
1273                 return page;
1274
1275         /* wait for read completion */
1276         lock_page(page);
1277         if (unlikely(page->mapping != mapping)) {
1278                 f2fs_put_page(page, 1);
1279                 goto repeat;
1280         }
1281         if (unlikely(!PageUptodate(page))) {
1282                 f2fs_put_page(page, 1);
1283                 return ERR_PTR(-EIO);
1284         }
1285         return page;
1286 }
1287
1288 /*
1289  * Caller ensures that this data page is never allocated.
1290  * A new zero-filled data page is allocated in the page cache.
1291  *
1292  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1293  * f2fs_unlock_op().
1294  * Note that, ipage is set only by make_empty_dir, and if any error occur,
1295  * ipage should be released by this function.
1296  */
1297 struct page *f2fs_get_new_data_page(struct inode *inode,
1298                 struct page *ipage, pgoff_t index, bool new_i_size)
1299 {
1300         struct address_space *mapping = inode->i_mapping;
1301         struct page *page;
1302         struct dnode_of_data dn;
1303         int err;
1304
1305         page = f2fs_grab_cache_page(mapping, index, true);
1306         if (!page) {
1307                 /*
1308                  * before exiting, we should make sure ipage will be released
1309                  * if any error occur.
1310                  */
1311                 f2fs_put_page(ipage, 1);
1312                 return ERR_PTR(-ENOMEM);
1313         }
1314
1315         set_new_dnode(&dn, inode, ipage, NULL, 0);
1316         err = f2fs_reserve_block(&dn, index);
1317         if (err) {
1318                 f2fs_put_page(page, 1);
1319                 return ERR_PTR(err);
1320         }
1321         if (!ipage)
1322                 f2fs_put_dnode(&dn);
1323
1324         if (PageUptodate(page))
1325                 goto got_it;
1326
1327         if (dn.data_blkaddr == NEW_ADDR) {
1328                 zero_user_segment(page, 0, PAGE_SIZE);
1329                 if (!PageUptodate(page))
1330                         SetPageUptodate(page);
1331         } else {
1332                 f2fs_put_page(page, 1);
1333
1334                 /* if ipage exists, blkaddr should be NEW_ADDR */
1335                 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1336                 page = f2fs_get_lock_data_page(inode, index, true);
1337                 if (IS_ERR(page))
1338                         return page;
1339         }
1340 got_it:
1341         if (new_i_size && i_size_read(inode) <
1342                                 ((loff_t)(index + 1) << PAGE_SHIFT))
1343                 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1344         return page;
1345 }
1346
1347 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1348 {
1349         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1350         struct f2fs_summary sum;
1351         struct node_info ni;
1352         block_t old_blkaddr;
1353         blkcnt_t count = 1;
1354         int err;
1355
1356         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1357                 return -EPERM;
1358
1359         err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
1360         if (err)
1361                 return err;
1362
1363         dn->data_blkaddr = f2fs_data_blkaddr(dn);
1364         if (dn->data_blkaddr != NULL_ADDR)
1365                 goto alloc;
1366
1367         if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1368                 return err;
1369
1370 alloc:
1371         set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1372         old_blkaddr = dn->data_blkaddr;
1373         f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1374                                 &sum, seg_type, NULL);
1375         if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1376                 invalidate_mapping_pages(META_MAPPING(sbi),
1377                                         old_blkaddr, old_blkaddr);
1378                 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1379         }
1380         f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1381
1382         /*
1383          * i_size will be updated by direct_IO. Otherwise, we'll get stale
1384          * data from unwritten block via dio_read.
1385          */
1386         return 0;
1387 }
1388
1389 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1390 {
1391         struct inode *inode = file_inode(iocb->ki_filp);
1392         struct f2fs_map_blocks map;
1393         int flag;
1394         int err = 0;
1395         bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1396
1397         map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1398         map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1399         if (map.m_len > map.m_lblk)
1400                 map.m_len -= map.m_lblk;
1401         else
1402                 map.m_len = 0;
1403
1404         map.m_next_pgofs = NULL;
1405         map.m_next_extent = NULL;
1406         map.m_seg_type = NO_CHECK_TYPE;
1407         map.m_may_create = true;
1408
1409         if (direct_io) {
1410                 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1411                 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1412                                         F2FS_GET_BLOCK_PRE_AIO :
1413                                         F2FS_GET_BLOCK_PRE_DIO;
1414                 goto map_blocks;
1415         }
1416         if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1417                 err = f2fs_convert_inline_inode(inode);
1418                 if (err)
1419                         return err;
1420         }
1421         if (f2fs_has_inline_data(inode))
1422                 return err;
1423
1424         flag = F2FS_GET_BLOCK_PRE_AIO;
1425
1426 map_blocks:
1427         err = f2fs_map_blocks(inode, &map, 1, flag);
1428         if (map.m_len > 0 && err == -ENOSPC) {
1429                 if (!direct_io)
1430                         set_inode_flag(inode, FI_NO_PREALLOC);
1431                 err = 0;
1432         }
1433         return err;
1434 }
1435
1436 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1437 {
1438         if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1439                 if (lock)
1440                         down_read(&sbi->node_change);
1441                 else
1442                         up_read(&sbi->node_change);
1443         } else {
1444                 if (lock)
1445                         f2fs_lock_op(sbi);
1446                 else
1447                         f2fs_unlock_op(sbi);
1448         }
1449 }
1450
1451 /*
1452  * f2fs_map_blocks() tries to find or build mapping relationship which
1453  * maps continuous logical blocks to physical blocks, and return such
1454  * info via f2fs_map_blocks structure.
1455  */
1456 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1457                                                 int create, int flag)
1458 {
1459         unsigned int maxblocks = map->m_len;
1460         struct dnode_of_data dn;
1461         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1462         int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1463         pgoff_t pgofs, end_offset, end;
1464         int err = 0, ofs = 1;
1465         unsigned int ofs_in_node, last_ofs_in_node;
1466         blkcnt_t prealloc;
1467         struct extent_info ei = {0, };
1468         block_t blkaddr;
1469         unsigned int start_pgofs;
1470
1471         if (!maxblocks)
1472                 return 0;
1473
1474         map->m_len = 0;
1475         map->m_flags = 0;
1476
1477         /* it only supports block size == page size */
1478         pgofs = (pgoff_t)map->m_lblk;
1479         end = pgofs + maxblocks;
1480
1481         if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1482                 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1483                                                         map->m_may_create)
1484                         goto next_dnode;
1485
1486                 map->m_pblk = ei.blk + pgofs - ei.fofs;
1487                 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1488                 map->m_flags = F2FS_MAP_MAPPED;
1489                 if (map->m_next_extent)
1490                         *map->m_next_extent = pgofs + map->m_len;
1491
1492                 /* for hardware encryption, but to avoid potential issue in future */
1493                 if (flag == F2FS_GET_BLOCK_DIO)
1494                         f2fs_wait_on_block_writeback_range(inode,
1495                                                 map->m_pblk, map->m_len);
1496                 goto out;
1497         }
1498
1499 next_dnode:
1500         if (map->m_may_create)
1501                 f2fs_do_map_lock(sbi, flag, true);
1502
1503         /* When reading holes, we need its node page */
1504         set_new_dnode(&dn, inode, NULL, NULL, 0);
1505         err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1506         if (err) {
1507                 if (flag == F2FS_GET_BLOCK_BMAP)
1508                         map->m_pblk = 0;
1509
1510                 if (err == -ENOENT) {
1511                         /*
1512                          * There is one exceptional case that read_node_page()
1513                          * may return -ENOENT due to filesystem has been
1514                          * shutdown or cp_error, so force to convert error
1515                          * number to EIO for such case.
1516                          */
1517                         if (map->m_may_create &&
1518                                 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1519                                 f2fs_cp_error(sbi))) {
1520                                 err = -EIO;
1521                                 goto unlock_out;
1522                         }
1523
1524                         err = 0;
1525                         if (map->m_next_pgofs)
1526                                 *map->m_next_pgofs =
1527                                         f2fs_get_next_page_offset(&dn, pgofs);
1528                         if (map->m_next_extent)
1529                                 *map->m_next_extent =
1530                                         f2fs_get_next_page_offset(&dn, pgofs);
1531                 }
1532                 goto unlock_out;
1533         }
1534
1535         start_pgofs = pgofs;
1536         prealloc = 0;
1537         last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1538         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1539
1540 next_block:
1541         blkaddr = f2fs_data_blkaddr(&dn);
1542
1543         if (__is_valid_data_blkaddr(blkaddr) &&
1544                 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1545                 err = -EFSCORRUPTED;
1546                 goto sync_out;
1547         }
1548
1549         if (__is_valid_data_blkaddr(blkaddr)) {
1550                 /* use out-place-update for driect IO under LFS mode */
1551                 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1552                                                         map->m_may_create) {
1553                         err = __allocate_data_block(&dn, map->m_seg_type);
1554                         if (err)
1555                                 goto sync_out;
1556                         blkaddr = dn.data_blkaddr;
1557                         set_inode_flag(inode, FI_APPEND_WRITE);
1558                 }
1559         } else {
1560                 if (create) {
1561                         if (unlikely(f2fs_cp_error(sbi))) {
1562                                 err = -EIO;
1563                                 goto sync_out;
1564                         }
1565                         if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1566                                 if (blkaddr == NULL_ADDR) {
1567                                         prealloc++;
1568                                         last_ofs_in_node = dn.ofs_in_node;
1569                                 }
1570                         } else {
1571                                 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1572                                         flag != F2FS_GET_BLOCK_DIO);
1573                                 err = __allocate_data_block(&dn,
1574                                                         map->m_seg_type);
1575                                 if (!err)
1576                                         set_inode_flag(inode, FI_APPEND_WRITE);
1577                         }
1578                         if (err)
1579                                 goto sync_out;
1580                         map->m_flags |= F2FS_MAP_NEW;
1581                         blkaddr = dn.data_blkaddr;
1582                 } else {
1583                         if (f2fs_compressed_file(inode) &&
1584                                         f2fs_sanity_check_cluster(&dn) &&
1585                                         (flag != F2FS_GET_BLOCK_FIEMAP ||
1586                                         IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1587                                 err = -EFSCORRUPTED;
1588                                 goto sync_out;
1589                         }
1590                         if (flag == F2FS_GET_BLOCK_BMAP) {
1591                                 map->m_pblk = 0;
1592                                 goto sync_out;
1593                         }
1594                         if (flag == F2FS_GET_BLOCK_PRECACHE)
1595                                 goto sync_out;
1596                         if (flag == F2FS_GET_BLOCK_FIEMAP &&
1597                                                 blkaddr == NULL_ADDR) {
1598                                 if (map->m_next_pgofs)
1599                                         *map->m_next_pgofs = pgofs + 1;
1600                                 goto sync_out;
1601                         }
1602                         if (flag != F2FS_GET_BLOCK_FIEMAP) {
1603                                 /* for defragment case */
1604                                 if (map->m_next_pgofs)
1605                                         *map->m_next_pgofs = pgofs + 1;
1606                                 goto sync_out;
1607                         }
1608                 }
1609         }
1610
1611         if (flag == F2FS_GET_BLOCK_PRE_AIO)
1612                 goto skip;
1613
1614         if (map->m_len == 0) {
1615                 /* preallocated unwritten block should be mapped for fiemap. */
1616                 if (blkaddr == NEW_ADDR)
1617                         map->m_flags |= F2FS_MAP_UNWRITTEN;
1618                 map->m_flags |= F2FS_MAP_MAPPED;
1619
1620                 map->m_pblk = blkaddr;
1621                 map->m_len = 1;
1622         } else if ((map->m_pblk != NEW_ADDR &&
1623                         blkaddr == (map->m_pblk + ofs)) ||
1624                         (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1625                         flag == F2FS_GET_BLOCK_PRE_DIO) {
1626                 ofs++;
1627                 map->m_len++;
1628         } else {
1629                 goto sync_out;
1630         }
1631
1632 skip:
1633         dn.ofs_in_node++;
1634         pgofs++;
1635
1636         /* preallocate blocks in batch for one dnode page */
1637         if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1638                         (pgofs == end || dn.ofs_in_node == end_offset)) {
1639
1640                 dn.ofs_in_node = ofs_in_node;
1641                 err = f2fs_reserve_new_blocks(&dn, prealloc);
1642                 if (err)
1643                         goto sync_out;
1644
1645                 map->m_len += dn.ofs_in_node - ofs_in_node;
1646                 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1647                         err = -ENOSPC;
1648                         goto sync_out;
1649                 }
1650                 dn.ofs_in_node = end_offset;
1651         }
1652
1653         if (pgofs >= end)
1654                 goto sync_out;
1655         else if (dn.ofs_in_node < end_offset)
1656                 goto next_block;
1657
1658         if (flag == F2FS_GET_BLOCK_PRECACHE) {
1659                 if (map->m_flags & F2FS_MAP_MAPPED) {
1660                         unsigned int ofs = start_pgofs - map->m_lblk;
1661
1662                         f2fs_update_extent_cache_range(&dn,
1663                                 start_pgofs, map->m_pblk + ofs,
1664                                 map->m_len - ofs);
1665                 }
1666         }
1667
1668         f2fs_put_dnode(&dn);
1669
1670         if (map->m_may_create) {
1671                 f2fs_do_map_lock(sbi, flag, false);
1672                 f2fs_balance_fs(sbi, dn.node_changed);
1673         }
1674         goto next_dnode;
1675
1676 sync_out:
1677
1678         /* for hardware encryption, but to avoid potential issue in future */
1679         if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1680                 f2fs_wait_on_block_writeback_range(inode,
1681                                                 map->m_pblk, map->m_len);
1682
1683         if (flag == F2FS_GET_BLOCK_PRECACHE) {
1684                 if (map->m_flags & F2FS_MAP_MAPPED) {
1685                         unsigned int ofs = start_pgofs - map->m_lblk;
1686
1687                         f2fs_update_extent_cache_range(&dn,
1688                                 start_pgofs, map->m_pblk + ofs,
1689                                 map->m_len - ofs);
1690                 }
1691                 if (map->m_next_extent)
1692                         *map->m_next_extent = pgofs + 1;
1693         }
1694         f2fs_put_dnode(&dn);
1695 unlock_out:
1696         if (map->m_may_create) {
1697                 f2fs_do_map_lock(sbi, flag, false);
1698                 f2fs_balance_fs(sbi, dn.node_changed);
1699         }
1700 out:
1701         trace_f2fs_map_blocks(inode, map, err);
1702         return err;
1703 }
1704
1705 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1706 {
1707         struct f2fs_map_blocks map;
1708         block_t last_lblk;
1709         int err;
1710
1711         if (pos + len > i_size_read(inode))
1712                 return false;
1713
1714         map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1715         map.m_next_pgofs = NULL;
1716         map.m_next_extent = NULL;
1717         map.m_seg_type = NO_CHECK_TYPE;
1718         map.m_may_create = false;
1719         last_lblk = F2FS_BLK_ALIGN(pos + len);
1720
1721         while (map.m_lblk < last_lblk) {
1722                 map.m_len = last_lblk - map.m_lblk;
1723                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1724                 if (err || map.m_len == 0)
1725                         return false;
1726                 map.m_lblk += map.m_len;
1727         }
1728         return true;
1729 }
1730
1731 static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1732 {
1733         return (bytes >> inode->i_blkbits);
1734 }
1735
1736 static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1737 {
1738         return (blks << inode->i_blkbits);
1739 }
1740
1741 static int __get_data_block(struct inode *inode, sector_t iblock,
1742                         struct buffer_head *bh, int create, int flag,
1743                         pgoff_t *next_pgofs, int seg_type, bool may_write)
1744 {
1745         struct f2fs_map_blocks map;
1746         int err;
1747
1748         map.m_lblk = iblock;
1749         map.m_len = bytes_to_blks(inode, bh->b_size);
1750         map.m_next_pgofs = next_pgofs;
1751         map.m_next_extent = NULL;
1752         map.m_seg_type = seg_type;
1753         map.m_may_create = may_write;
1754
1755         err = f2fs_map_blocks(inode, &map, create, flag);
1756         if (!err) {
1757                 map_bh(bh, inode->i_sb, map.m_pblk);
1758                 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1759                 bh->b_size = blks_to_bytes(inode, map.m_len);
1760         }
1761         return err;
1762 }
1763
1764 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1765                         struct buffer_head *bh_result, int create)
1766 {
1767         return __get_data_block(inode, iblock, bh_result, create,
1768                                 F2FS_GET_BLOCK_DIO, NULL,
1769                                 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1770                                 true);
1771 }
1772
1773 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1774                         struct buffer_head *bh_result, int create)
1775 {
1776         return __get_data_block(inode, iblock, bh_result, create,
1777                                 F2FS_GET_BLOCK_DIO, NULL,
1778                                 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1779                                 false);
1780 }
1781
1782 static int f2fs_xattr_fiemap(struct inode *inode,
1783                                 struct fiemap_extent_info *fieinfo)
1784 {
1785         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1786         struct page *page;
1787         struct node_info ni;
1788         __u64 phys = 0, len;
1789         __u32 flags;
1790         nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1791         int err = 0;
1792
1793         if (f2fs_has_inline_xattr(inode)) {
1794                 int offset;
1795
1796                 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1797                                                 inode->i_ino, false);
1798                 if (!page)
1799                         return -ENOMEM;
1800
1801                 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1802                 if (err) {
1803                         f2fs_put_page(page, 1);
1804                         return err;
1805                 }
1806
1807                 phys = blks_to_bytes(inode, ni.blk_addr);
1808                 offset = offsetof(struct f2fs_inode, i_addr) +
1809                                         sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1810                                         get_inline_xattr_addrs(inode));
1811
1812                 phys += offset;
1813                 len = inline_xattr_size(inode);
1814
1815                 f2fs_put_page(page, 1);
1816
1817                 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1818
1819                 if (!xnid)
1820                         flags |= FIEMAP_EXTENT_LAST;
1821
1822                 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1823                 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1824                 if (err || err == 1)
1825                         return err;
1826         }
1827
1828         if (xnid) {
1829                 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1830                 if (!page)
1831                         return -ENOMEM;
1832
1833                 err = f2fs_get_node_info(sbi, xnid, &ni, false);
1834                 if (err) {
1835                         f2fs_put_page(page, 1);
1836                         return err;
1837                 }
1838
1839                 phys = blks_to_bytes(inode, ni.blk_addr);
1840                 len = inode->i_sb->s_blocksize;
1841
1842                 f2fs_put_page(page, 1);
1843
1844                 flags = FIEMAP_EXTENT_LAST;
1845         }
1846
1847         if (phys) {
1848                 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1849                 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1850         }
1851
1852         return (err < 0 ? err : 0);
1853 }
1854
1855 static loff_t max_inode_blocks(struct inode *inode)
1856 {
1857         loff_t result = ADDRS_PER_INODE(inode);
1858         loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1859
1860         /* two direct node blocks */
1861         result += (leaf_count * 2);
1862
1863         /* two indirect node blocks */
1864         leaf_count *= NIDS_PER_BLOCK;
1865         result += (leaf_count * 2);
1866
1867         /* one double indirect node block */
1868         leaf_count *= NIDS_PER_BLOCK;
1869         result += leaf_count;
1870
1871         return result;
1872 }
1873
1874 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1875                 u64 start, u64 len)
1876 {
1877         struct f2fs_map_blocks map;
1878         sector_t start_blk, last_blk;
1879         pgoff_t next_pgofs;
1880         u64 logical = 0, phys = 0, size = 0;
1881         u32 flags = 0;
1882         int ret = 0;
1883         bool compr_cluster = false, compr_appended;
1884         unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1885         unsigned int count_in_cluster = 0;
1886         loff_t maxbytes;
1887
1888         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1889                 ret = f2fs_precache_extents(inode);
1890                 if (ret)
1891                         return ret;
1892         }
1893
1894         ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1895         if (ret)
1896                 return ret;
1897
1898         inode_lock(inode);
1899
1900         maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1901         if (start > maxbytes) {
1902                 ret = -EFBIG;
1903                 goto out;
1904         }
1905
1906         if (len > maxbytes || (maxbytes - len) < start)
1907                 len = maxbytes - start;
1908
1909         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1910                 ret = f2fs_xattr_fiemap(inode, fieinfo);
1911                 goto out;
1912         }
1913
1914         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1915                 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1916                 if (ret != -EAGAIN)
1917                         goto out;
1918         }
1919
1920         if (bytes_to_blks(inode, len) == 0)
1921                 len = blks_to_bytes(inode, 1);
1922
1923         start_blk = bytes_to_blks(inode, start);
1924         last_blk = bytes_to_blks(inode, start + len - 1);
1925
1926 next:
1927         memset(&map, 0, sizeof(map));
1928         map.m_lblk = start_blk;
1929         map.m_len = bytes_to_blks(inode, len);
1930         map.m_next_pgofs = &next_pgofs;
1931         map.m_seg_type = NO_CHECK_TYPE;
1932
1933         if (compr_cluster) {
1934                 map.m_lblk += 1;
1935                 map.m_len = cluster_size - count_in_cluster;
1936         }
1937
1938         ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1939         if (ret)
1940                 goto out;
1941
1942         /* HOLE */
1943         if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1944                 start_blk = next_pgofs;
1945
1946                 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1947                                                 max_inode_blocks(inode)))
1948                         goto prep_next;
1949
1950                 flags |= FIEMAP_EXTENT_LAST;
1951         }
1952
1953         compr_appended = false;
1954         /* In a case of compressed cluster, append this to the last extent */
1955         if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
1956                         !(map.m_flags & F2FS_MAP_FLAGS))) {
1957                 compr_appended = true;
1958                 goto skip_fill;
1959         }
1960
1961         if (size) {
1962                 flags |= FIEMAP_EXTENT_MERGED;
1963                 if (IS_ENCRYPTED(inode))
1964                         flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1965
1966                 ret = fiemap_fill_next_extent(fieinfo, logical,
1967                                 phys, size, flags);
1968                 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1969                 if (ret)
1970                         goto out;
1971                 size = 0;
1972         }
1973
1974         if (start_blk > last_blk)
1975                 goto out;
1976
1977 skip_fill:
1978         if (map.m_pblk == COMPRESS_ADDR) {
1979                 compr_cluster = true;
1980                 count_in_cluster = 1;
1981         } else if (compr_appended) {
1982                 unsigned int appended_blks = cluster_size -
1983                                                 count_in_cluster + 1;
1984                 size += blks_to_bytes(inode, appended_blks);
1985                 start_blk += appended_blks;
1986                 compr_cluster = false;
1987         } else {
1988                 logical = blks_to_bytes(inode, start_blk);
1989                 phys = __is_valid_data_blkaddr(map.m_pblk) ?
1990                         blks_to_bytes(inode, map.m_pblk) : 0;
1991                 size = blks_to_bytes(inode, map.m_len);
1992                 flags = 0;
1993
1994                 if (compr_cluster) {
1995                         flags = FIEMAP_EXTENT_ENCODED;
1996                         count_in_cluster += map.m_len;
1997                         if (count_in_cluster == cluster_size) {
1998                                 compr_cluster = false;
1999                                 size += blks_to_bytes(inode, 1);
2000                         }
2001                 } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2002                         flags = FIEMAP_EXTENT_UNWRITTEN;
2003                 }
2004
2005                 start_blk += bytes_to_blks(inode, size);
2006         }
2007
2008 prep_next:
2009         cond_resched();
2010         if (fatal_signal_pending(current))
2011                 ret = -EINTR;
2012         else
2013                 goto next;
2014 out:
2015         if (ret == 1)
2016                 ret = 0;
2017
2018         inode_unlock(inode);
2019         return ret;
2020 }
2021
2022 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2023 {
2024         if (IS_ENABLED(CONFIG_FS_VERITY) &&
2025             (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2026                 return inode->i_sb->s_maxbytes;
2027
2028         return i_size_read(inode);
2029 }
2030
2031 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2032                                         unsigned nr_pages,
2033                                         struct f2fs_map_blocks *map,
2034                                         struct bio **bio_ret,
2035                                         sector_t *last_block_in_bio,
2036                                         bool is_readahead)
2037 {
2038         struct bio *bio = *bio_ret;
2039         const unsigned blocksize = blks_to_bytes(inode, 1);
2040         sector_t block_in_file;
2041         sector_t last_block;
2042         sector_t last_block_in_file;
2043         sector_t block_nr;
2044         int ret = 0;
2045
2046         block_in_file = (sector_t)page_index(page);
2047         last_block = block_in_file + nr_pages;
2048         last_block_in_file = bytes_to_blks(inode,
2049                         f2fs_readpage_limit(inode) + blocksize - 1);
2050         if (last_block > last_block_in_file)
2051                 last_block = last_block_in_file;
2052
2053         /* just zeroing out page which is beyond EOF */
2054         if (block_in_file >= last_block)
2055                 goto zero_out;
2056         /*
2057          * Map blocks using the previous result first.
2058          */
2059         if ((map->m_flags & F2FS_MAP_MAPPED) &&
2060                         block_in_file > map->m_lblk &&
2061                         block_in_file < (map->m_lblk + map->m_len))
2062                 goto got_it;
2063
2064         /*
2065          * Then do more f2fs_map_blocks() calls until we are
2066          * done with this page.
2067          */
2068         map->m_lblk = block_in_file;
2069         map->m_len = last_block - block_in_file;
2070
2071         ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2072         if (ret)
2073                 goto out;
2074 got_it:
2075         if ((map->m_flags & F2FS_MAP_MAPPED)) {
2076                 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2077                 SetPageMappedToDisk(page);
2078
2079                 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2080                                         !cleancache_get_page(page))) {
2081                         SetPageUptodate(page);
2082                         goto confused;
2083                 }
2084
2085                 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2086                                                 DATA_GENERIC_ENHANCE_READ)) {
2087                         ret = -EFSCORRUPTED;
2088                         goto out;
2089                 }
2090         } else {
2091 zero_out:
2092                 zero_user_segment(page, 0, PAGE_SIZE);
2093                 if (f2fs_need_verity(inode, page->index) &&
2094                     !fsverity_verify_page(page)) {
2095                         ret = -EIO;
2096                         goto out;
2097                 }
2098                 if (!PageUptodate(page))
2099                         SetPageUptodate(page);
2100                 unlock_page(page);
2101                 goto out;
2102         }
2103
2104         /*
2105          * This page will go to BIO.  Do we need to send this
2106          * BIO off first?
2107          */
2108         if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2109                                        *last_block_in_bio, block_nr) ||
2110                     !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2111 submit_and_realloc:
2112                 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2113                 bio = NULL;
2114         }
2115         if (bio == NULL) {
2116                 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2117                                 is_readahead ? REQ_RAHEAD : 0, page->index,
2118                                 false);
2119                 if (IS_ERR(bio)) {
2120                         ret = PTR_ERR(bio);
2121                         bio = NULL;
2122                         goto out;
2123                 }
2124         }
2125
2126         /*
2127          * If the page is under writeback, we need to wait for
2128          * its completion to see the correct decrypted data.
2129          */
2130         f2fs_wait_on_block_writeback(inode, block_nr);
2131
2132         if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2133                 goto submit_and_realloc;
2134
2135         inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2136         f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2137         ClearPageError(page);
2138         *last_block_in_bio = block_nr;
2139         goto out;
2140 confused:
2141         if (bio) {
2142                 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2143                 bio = NULL;
2144         }
2145         unlock_page(page);
2146 out:
2147         *bio_ret = bio;
2148         return ret;
2149 }
2150
2151 #ifdef CONFIG_F2FS_FS_COMPRESSION
2152 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2153                                 unsigned nr_pages, sector_t *last_block_in_bio,
2154                                 bool is_readahead, bool for_write)
2155 {
2156         struct dnode_of_data dn;
2157         struct inode *inode = cc->inode;
2158         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2159         struct bio *bio = *bio_ret;
2160         unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2161         sector_t last_block_in_file;
2162         const unsigned blocksize = blks_to_bytes(inode, 1);
2163         struct decompress_io_ctx *dic = NULL;
2164         struct extent_info ei = {0, };
2165         bool from_dnode = true;
2166         int i;
2167         int ret = 0;
2168
2169         f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2170
2171         last_block_in_file = bytes_to_blks(inode,
2172                         f2fs_readpage_limit(inode) + blocksize - 1);
2173
2174         /* get rid of pages beyond EOF */
2175         for (i = 0; i < cc->cluster_size; i++) {
2176                 struct page *page = cc->rpages[i];
2177
2178                 if (!page)
2179                         continue;
2180                 if ((sector_t)page->index >= last_block_in_file) {
2181                         zero_user_segment(page, 0, PAGE_SIZE);
2182                         if (!PageUptodate(page))
2183                                 SetPageUptodate(page);
2184                 } else if (!PageUptodate(page)) {
2185                         continue;
2186                 }
2187                 unlock_page(page);
2188                 if (for_write)
2189                         put_page(page);
2190                 cc->rpages[i] = NULL;
2191                 cc->nr_rpages--;
2192         }
2193
2194         /* we are done since all pages are beyond EOF */
2195         if (f2fs_cluster_is_empty(cc))
2196                 goto out;
2197
2198         if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
2199                 from_dnode = false;
2200
2201         if (!from_dnode)
2202                 goto skip_reading_dnode;
2203
2204         set_new_dnode(&dn, inode, NULL, NULL, 0);
2205         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2206         if (ret)
2207                 goto out;
2208
2209         f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2210
2211 skip_reading_dnode:
2212         for (i = 1; i < cc->cluster_size; i++) {
2213                 block_t blkaddr;
2214
2215                 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2216                                         dn.ofs_in_node + i) :
2217                                         ei.blk + i - 1;
2218
2219                 if (!__is_valid_data_blkaddr(blkaddr))
2220                         break;
2221
2222                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2223                         ret = -EFAULT;
2224                         goto out_put_dnode;
2225                 }
2226                 cc->nr_cpages++;
2227
2228                 if (!from_dnode && i >= ei.c_len)
2229                         break;
2230         }
2231
2232         /* nothing to decompress */
2233         if (cc->nr_cpages == 0) {
2234                 ret = 0;
2235                 goto out_put_dnode;
2236         }
2237
2238         dic = f2fs_alloc_dic(cc);
2239         if (IS_ERR(dic)) {
2240                 ret = PTR_ERR(dic);
2241                 goto out_put_dnode;
2242         }
2243
2244         for (i = 0; i < cc->nr_cpages; i++) {
2245                 struct page *page = dic->cpages[i];
2246                 block_t blkaddr;
2247                 struct bio_post_read_ctx *ctx;
2248
2249                 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2250                                         dn.ofs_in_node + i + 1) :
2251                                         ei.blk + i;
2252
2253                 f2fs_wait_on_block_writeback(inode, blkaddr);
2254
2255                 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2256                         if (atomic_dec_and_test(&dic->remaining_pages))
2257                                 f2fs_decompress_cluster(dic);
2258                         continue;
2259                 }
2260
2261                 if (bio && (!page_is_mergeable(sbi, bio,
2262                                         *last_block_in_bio, blkaddr) ||
2263                     !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2264 submit_and_realloc:
2265                         __submit_bio(sbi, bio, DATA);
2266                         bio = NULL;
2267                 }
2268
2269                 if (!bio) {
2270                         bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2271                                         is_readahead ? REQ_RAHEAD : 0,
2272                                         page->index, for_write);
2273                         if (IS_ERR(bio)) {
2274                                 ret = PTR_ERR(bio);
2275                                 f2fs_decompress_end_io(dic, ret);
2276                                 f2fs_put_dnode(&dn);
2277                                 *bio_ret = NULL;
2278                                 return ret;
2279                         }
2280                 }
2281
2282                 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2283                         goto submit_and_realloc;
2284
2285                 ctx = get_post_read_ctx(bio);
2286                 ctx->enabled_steps |= STEP_DECOMPRESS;
2287                 refcount_inc(&dic->refcnt);
2288
2289                 inc_page_count(sbi, F2FS_RD_DATA);
2290                 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2291                 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2292                 ClearPageError(page);
2293                 *last_block_in_bio = blkaddr;
2294         }
2295
2296         if (from_dnode)
2297                 f2fs_put_dnode(&dn);
2298
2299         *bio_ret = bio;
2300         return 0;
2301
2302 out_put_dnode:
2303         if (from_dnode)
2304                 f2fs_put_dnode(&dn);
2305 out:
2306         for (i = 0; i < cc->cluster_size; i++) {
2307                 if (cc->rpages[i]) {
2308                         ClearPageUptodate(cc->rpages[i]);
2309                         ClearPageError(cc->rpages[i]);
2310                         unlock_page(cc->rpages[i]);
2311                 }
2312         }
2313         *bio_ret = bio;
2314         return ret;
2315 }
2316 #endif
2317
2318 /*
2319  * This function was originally taken from fs/mpage.c, and customized for f2fs.
2320  * Major change was from block_size == page_size in f2fs by default.
2321  */
2322 static int f2fs_mpage_readpages(struct inode *inode,
2323                 struct readahead_control *rac, struct page *page)
2324 {
2325         struct bio *bio = NULL;
2326         sector_t last_block_in_bio = 0;
2327         struct f2fs_map_blocks map;
2328 #ifdef CONFIG_F2FS_FS_COMPRESSION
2329         struct compress_ctx cc = {
2330                 .inode = inode,
2331                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2332                 .cluster_size = F2FS_I(inode)->i_cluster_size,
2333                 .cluster_idx = NULL_CLUSTER,
2334                 .rpages = NULL,
2335                 .cpages = NULL,
2336                 .nr_rpages = 0,
2337                 .nr_cpages = 0,
2338         };
2339         pgoff_t nc_cluster_idx = NULL_CLUSTER;
2340 #endif
2341         unsigned nr_pages = rac ? readahead_count(rac) : 1;
2342         unsigned max_nr_pages = nr_pages;
2343         int ret = 0;
2344
2345         map.m_pblk = 0;
2346         map.m_lblk = 0;
2347         map.m_len = 0;
2348         map.m_flags = 0;
2349         map.m_next_pgofs = NULL;
2350         map.m_next_extent = NULL;
2351         map.m_seg_type = NO_CHECK_TYPE;
2352         map.m_may_create = false;
2353
2354         for (; nr_pages; nr_pages--) {
2355                 if (rac) {
2356                         page = readahead_page(rac);
2357                         prefetchw(&page->flags);
2358                 }
2359
2360 #ifdef CONFIG_F2FS_FS_COMPRESSION
2361                 if (f2fs_compressed_file(inode)) {
2362                         /* there are remained comressed pages, submit them */
2363                         if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2364                                 ret = f2fs_read_multi_pages(&cc, &bio,
2365                                                         max_nr_pages,
2366                                                         &last_block_in_bio,
2367                                                         rac != NULL, false);
2368                                 f2fs_destroy_compress_ctx(&cc, false);
2369                                 if (ret)
2370                                         goto set_error_page;
2371                         }
2372                         if (cc.cluster_idx == NULL_CLUSTER) {
2373                                 if (nc_cluster_idx ==
2374                                         page->index >> cc.log_cluster_size) {
2375                                         goto read_single_page;
2376                                 }
2377
2378                                 ret = f2fs_is_compressed_cluster(inode, page->index);
2379                                 if (ret < 0)
2380                                         goto set_error_page;
2381                                 else if (!ret) {
2382                                         nc_cluster_idx =
2383                                                 page->index >> cc.log_cluster_size;
2384                                         goto read_single_page;
2385                                 }
2386
2387                                 nc_cluster_idx = NULL_CLUSTER;
2388                         }
2389                         ret = f2fs_init_compress_ctx(&cc);
2390                         if (ret)
2391                                 goto set_error_page;
2392
2393                         f2fs_compress_ctx_add_page(&cc, page);
2394
2395                         goto next_page;
2396                 }
2397 read_single_page:
2398 #endif
2399
2400                 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2401                                         &bio, &last_block_in_bio, rac);
2402                 if (ret) {
2403 #ifdef CONFIG_F2FS_FS_COMPRESSION
2404 set_error_page:
2405 #endif
2406                         SetPageError(page);
2407                         zero_user_segment(page, 0, PAGE_SIZE);
2408                         unlock_page(page);
2409                 }
2410 #ifdef CONFIG_F2FS_FS_COMPRESSION
2411 next_page:
2412 #endif
2413                 if (rac)
2414                         put_page(page);
2415
2416 #ifdef CONFIG_F2FS_FS_COMPRESSION
2417                 if (f2fs_compressed_file(inode)) {
2418                         /* last page */
2419                         if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2420                                 ret = f2fs_read_multi_pages(&cc, &bio,
2421                                                         max_nr_pages,
2422                                                         &last_block_in_bio,
2423                                                         rac != NULL, false);
2424                                 f2fs_destroy_compress_ctx(&cc, false);
2425                         }
2426                 }
2427 #endif
2428         }
2429         if (bio)
2430                 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2431         return ret;
2432 }
2433
2434 static int f2fs_read_data_page(struct file *file, struct page *page)
2435 {
2436         struct inode *inode = page_file_mapping(page)->host;
2437         int ret = -EAGAIN;
2438
2439         trace_f2fs_readpage(page, DATA);
2440
2441         if (!f2fs_is_compress_backend_ready(inode)) {
2442                 unlock_page(page);
2443                 return -EOPNOTSUPP;
2444         }
2445
2446         /* If the file has inline data, try to read it directly */
2447         if (f2fs_has_inline_data(inode))
2448                 ret = f2fs_read_inline_data(inode, page);
2449         if (ret == -EAGAIN)
2450                 ret = f2fs_mpage_readpages(inode, NULL, page);
2451         return ret;
2452 }
2453
2454 static void f2fs_readahead(struct readahead_control *rac)
2455 {
2456         struct inode *inode = rac->mapping->host;
2457
2458         trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2459
2460         if (!f2fs_is_compress_backend_ready(inode))
2461                 return;
2462
2463         /* If the file has inline data, skip readpages */
2464         if (f2fs_has_inline_data(inode))
2465                 return;
2466
2467         f2fs_mpage_readpages(inode, rac, NULL);
2468 }
2469
2470 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2471 {
2472         struct inode *inode = fio->page->mapping->host;
2473         struct page *mpage, *page;
2474         gfp_t gfp_flags = GFP_NOFS;
2475
2476         if (!f2fs_encrypted_file(inode))
2477                 return 0;
2478
2479         page = fio->compressed_page ? fio->compressed_page : fio->page;
2480
2481         /* wait for GCed page writeback via META_MAPPING */
2482         f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2483
2484         if (fscrypt_inode_uses_inline_crypto(inode))
2485                 return 0;
2486
2487 retry_encrypt:
2488         fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2489                                         PAGE_SIZE, 0, gfp_flags);
2490         if (IS_ERR(fio->encrypted_page)) {
2491                 /* flush pending IOs and wait for a while in the ENOMEM case */
2492                 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2493                         f2fs_flush_merged_writes(fio->sbi);
2494                         congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2495                         gfp_flags |= __GFP_NOFAIL;
2496                         goto retry_encrypt;
2497                 }
2498                 return PTR_ERR(fio->encrypted_page);
2499         }
2500
2501         mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2502         if (mpage) {
2503                 if (PageUptodate(mpage))
2504                         memcpy(page_address(mpage),
2505                                 page_address(fio->encrypted_page), PAGE_SIZE);
2506                 f2fs_put_page(mpage, 1);
2507         }
2508         return 0;
2509 }
2510
2511 static inline bool check_inplace_update_policy(struct inode *inode,
2512                                 struct f2fs_io_info *fio)
2513 {
2514         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2515         unsigned int policy = SM_I(sbi)->ipu_policy;
2516
2517         if (policy & (0x1 << F2FS_IPU_FORCE))
2518                 return true;
2519         if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2520                 return true;
2521         if (policy & (0x1 << F2FS_IPU_UTIL) &&
2522                         utilization(sbi) > SM_I(sbi)->min_ipu_util)
2523                 return true;
2524         if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2525                         utilization(sbi) > SM_I(sbi)->min_ipu_util)
2526                 return true;
2527
2528         /*
2529          * IPU for rewrite async pages
2530          */
2531         if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2532                         fio && fio->op == REQ_OP_WRITE &&
2533                         !(fio->op_flags & REQ_SYNC) &&
2534                         !IS_ENCRYPTED(inode))
2535                 return true;
2536
2537         /* this is only set during fdatasync */
2538         if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2539                         is_inode_flag_set(inode, FI_NEED_IPU))
2540                 return true;
2541
2542         if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2543                         !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2544                 return true;
2545
2546         return false;
2547 }
2548
2549 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2550 {
2551         /* swap file is migrating in aligned write mode */
2552         if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2553                 return false;
2554
2555         if (f2fs_is_pinned_file(inode))
2556                 return true;
2557
2558         /* if this is cold file, we should overwrite to avoid fragmentation */
2559         if (file_is_cold(inode))
2560                 return true;
2561
2562         return check_inplace_update_policy(inode, fio);
2563 }
2564
2565 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2566 {
2567         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2568
2569         /* The below cases were checked when setting it. */
2570         if (f2fs_is_pinned_file(inode))
2571                 return false;
2572         if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2573                 return true;
2574         if (f2fs_lfs_mode(sbi))
2575                 return true;
2576         if (S_ISDIR(inode->i_mode))
2577                 return true;
2578         if (IS_NOQUOTA(inode))
2579                 return true;
2580         if (f2fs_is_atomic_file(inode))
2581                 return true;
2582
2583         /* swap file is migrating in aligned write mode */
2584         if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2585                 return true;
2586
2587         if (fio) {
2588                 if (page_private_gcing(fio->page))
2589                         return true;
2590                 if (page_private_dummy(fio->page))
2591                         return true;
2592                 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2593                         f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2594                         return true;
2595         }
2596         return false;
2597 }
2598
2599 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2600 {
2601         struct inode *inode = fio->page->mapping->host;
2602
2603         if (f2fs_should_update_outplace(inode, fio))
2604                 return false;
2605
2606         return f2fs_should_update_inplace(inode, fio);
2607 }
2608
2609 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2610 {
2611         struct page *page = fio->page;
2612         struct inode *inode = page->mapping->host;
2613         struct dnode_of_data dn;
2614         struct extent_info ei = {0, };
2615         struct node_info ni;
2616         bool ipu_force = false;
2617         int err = 0;
2618
2619         set_new_dnode(&dn, inode, NULL, NULL, 0);
2620         if (need_inplace_update(fio) &&
2621                         f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2622                 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2623
2624                 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2625                                                 DATA_GENERIC_ENHANCE))
2626                         return -EFSCORRUPTED;
2627
2628                 ipu_force = true;
2629                 fio->need_lock = LOCK_DONE;
2630                 goto got_it;
2631         }
2632
2633         /* Deadlock due to between page->lock and f2fs_lock_op */
2634         if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2635                 return -EAGAIN;
2636
2637         err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2638         if (err)
2639                 goto out;
2640
2641         fio->old_blkaddr = dn.data_blkaddr;
2642
2643         /* This page is already truncated */
2644         if (fio->old_blkaddr == NULL_ADDR) {
2645                 ClearPageUptodate(page);
2646                 clear_page_private_gcing(page);
2647                 goto out_writepage;
2648         }
2649 got_it:
2650         if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2651                 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2652                                                 DATA_GENERIC_ENHANCE)) {
2653                 err = -EFSCORRUPTED;
2654                 goto out_writepage;
2655         }
2656         /*
2657          * If current allocation needs SSR,
2658          * it had better in-place writes for updated data.
2659          */
2660         if (ipu_force ||
2661                 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2662                                         need_inplace_update(fio))) {
2663                 err = f2fs_encrypt_one_page(fio);
2664                 if (err)
2665                         goto out_writepage;
2666
2667                 set_page_writeback(page);
2668                 ClearPageError(page);
2669                 f2fs_put_dnode(&dn);
2670                 if (fio->need_lock == LOCK_REQ)
2671                         f2fs_unlock_op(fio->sbi);
2672                 err = f2fs_inplace_write_data(fio);
2673                 if (err) {
2674                         if (fscrypt_inode_uses_fs_layer_crypto(inode))
2675                                 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2676                         if (PageWriteback(page))
2677                                 end_page_writeback(page);
2678                 } else {
2679                         set_inode_flag(inode, FI_UPDATE_WRITE);
2680                 }
2681                 trace_f2fs_do_write_data_page(fio->page, IPU);
2682                 return err;
2683         }
2684
2685         if (fio->need_lock == LOCK_RETRY) {
2686                 if (!f2fs_trylock_op(fio->sbi)) {
2687                         err = -EAGAIN;
2688                         goto out_writepage;
2689                 }
2690                 fio->need_lock = LOCK_REQ;
2691         }
2692
2693         err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
2694         if (err)
2695                 goto out_writepage;
2696
2697         fio->version = ni.version;
2698
2699         err = f2fs_encrypt_one_page(fio);
2700         if (err)
2701                 goto out_writepage;
2702
2703         set_page_writeback(page);
2704         ClearPageError(page);
2705
2706         if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2707                 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2708
2709         /* LFS mode write path */
2710         f2fs_outplace_write_data(&dn, fio);
2711         trace_f2fs_do_write_data_page(page, OPU);
2712         set_inode_flag(inode, FI_APPEND_WRITE);
2713         if (page->index == 0)
2714                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2715 out_writepage:
2716         f2fs_put_dnode(&dn);
2717 out:
2718         if (fio->need_lock == LOCK_REQ)
2719                 f2fs_unlock_op(fio->sbi);
2720         return err;
2721 }
2722
2723 int f2fs_write_single_data_page(struct page *page, int *submitted,
2724                                 struct bio **bio,
2725                                 sector_t *last_block,
2726                                 struct writeback_control *wbc,
2727                                 enum iostat_type io_type,
2728                                 int compr_blocks,
2729                                 bool allow_balance)
2730 {
2731         struct inode *inode = page->mapping->host;
2732         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2733         loff_t i_size = i_size_read(inode);
2734         const pgoff_t end_index = ((unsigned long long)i_size)
2735                                                         >> PAGE_SHIFT;
2736         loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2737         unsigned offset = 0;
2738         bool need_balance_fs = false;
2739         int err = 0;
2740         struct f2fs_io_info fio = {
2741                 .sbi = sbi,
2742                 .ino = inode->i_ino,
2743                 .type = DATA,
2744                 .op = REQ_OP_WRITE,
2745                 .op_flags = wbc_to_write_flags(wbc),
2746                 .old_blkaddr = NULL_ADDR,
2747                 .page = page,
2748                 .encrypted_page = NULL,
2749                 .submitted = false,
2750                 .compr_blocks = compr_blocks,
2751                 .need_lock = LOCK_RETRY,
2752                 .io_type = io_type,
2753                 .io_wbc = wbc,
2754                 .bio = bio,
2755                 .last_block = last_block,
2756         };
2757
2758         trace_f2fs_writepage(page, DATA);
2759
2760         /* we should bypass data pages to proceed the kworkder jobs */
2761         if (unlikely(f2fs_cp_error(sbi))) {
2762                 mapping_set_error(page->mapping, -EIO);
2763                 /*
2764                  * don't drop any dirty dentry pages for keeping lastest
2765                  * directory structure.
2766                  */
2767                 if (S_ISDIR(inode->i_mode) &&
2768                                 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2769                         goto redirty_out;
2770                 goto out;
2771         }
2772
2773         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2774                 goto redirty_out;
2775
2776         if (page->index < end_index ||
2777                         f2fs_verity_in_progress(inode) ||
2778                         compr_blocks)
2779                 goto write;
2780
2781         /*
2782          * If the offset is out-of-range of file size,
2783          * this page does not have to be written to disk.
2784          */
2785         offset = i_size & (PAGE_SIZE - 1);
2786         if ((page->index >= end_index + 1) || !offset)
2787                 goto out;
2788
2789         zero_user_segment(page, offset, PAGE_SIZE);
2790 write:
2791         if (f2fs_is_drop_cache(inode))
2792                 goto out;
2793         /* we should not write 0'th page having journal header */
2794         if (f2fs_is_volatile_file(inode) && (!page->index ||
2795                         (!wbc->for_reclaim &&
2796                         f2fs_available_free_memory(sbi, BASE_CHECK))))
2797                 goto redirty_out;
2798
2799         /* Dentry/quota blocks are controlled by checkpoint */
2800         if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2801                 /*
2802                  * We need to wait for node_write to avoid block allocation during
2803                  * checkpoint. This can only happen to quota writes which can cause
2804                  * the below discard race condition.
2805                  */
2806                 if (IS_NOQUOTA(inode))
2807                         down_read(&sbi->node_write);
2808
2809                 fio.need_lock = LOCK_DONE;
2810                 err = f2fs_do_write_data_page(&fio);
2811
2812                 if (IS_NOQUOTA(inode))
2813                         up_read(&sbi->node_write);
2814
2815                 goto done;
2816         }
2817
2818         if (!wbc->for_reclaim)
2819                 need_balance_fs = true;
2820         else if (has_not_enough_free_secs(sbi, 0, 0))
2821                 goto redirty_out;
2822         else
2823                 set_inode_flag(inode, FI_HOT_DATA);
2824
2825         err = -EAGAIN;
2826         if (f2fs_has_inline_data(inode)) {
2827                 err = f2fs_write_inline_data(inode, page);
2828                 if (!err)
2829                         goto out;
2830         }
2831
2832         if (err == -EAGAIN) {
2833                 err = f2fs_do_write_data_page(&fio);
2834                 if (err == -EAGAIN) {
2835                         fio.need_lock = LOCK_REQ;
2836                         err = f2fs_do_write_data_page(&fio);
2837                 }
2838         }
2839
2840         if (err) {
2841                 file_set_keep_isize(inode);
2842         } else {
2843                 spin_lock(&F2FS_I(inode)->i_size_lock);
2844                 if (F2FS_I(inode)->last_disk_size < psize)
2845                         F2FS_I(inode)->last_disk_size = psize;
2846                 spin_unlock(&F2FS_I(inode)->i_size_lock);
2847         }
2848
2849 done:
2850         if (err && err != -ENOENT)
2851                 goto redirty_out;
2852
2853 out:
2854         inode_dec_dirty_pages(inode);
2855         if (err) {
2856                 ClearPageUptodate(page);
2857                 clear_page_private_gcing(page);
2858         }
2859
2860         if (wbc->for_reclaim) {
2861                 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2862                 clear_inode_flag(inode, FI_HOT_DATA);
2863                 f2fs_remove_dirty_inode(inode);
2864                 submitted = NULL;
2865         }
2866         unlock_page(page);
2867         if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2868                         !F2FS_I(inode)->wb_task && allow_balance)
2869                 f2fs_balance_fs(sbi, need_balance_fs);
2870
2871         if (unlikely(f2fs_cp_error(sbi))) {
2872                 f2fs_submit_merged_write(sbi, DATA);
2873                 if (bio && *bio)
2874                         f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2875                 submitted = NULL;
2876         }
2877
2878         if (submitted)
2879                 *submitted = fio.submitted ? 1 : 0;
2880
2881         return 0;
2882
2883 redirty_out:
2884         redirty_page_for_writepage(wbc, page);
2885         /*
2886          * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2887          * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2888          * file_write_and_wait_range() will see EIO error, which is critical
2889          * to return value of fsync() followed by atomic_write failure to user.
2890          */
2891         if (!err || wbc->for_reclaim)
2892                 return AOP_WRITEPAGE_ACTIVATE;
2893         unlock_page(page);
2894         return err;
2895 }
2896
2897 static int f2fs_write_data_page(struct page *page,
2898                                         struct writeback_control *wbc)
2899 {
2900 #ifdef CONFIG_F2FS_FS_COMPRESSION
2901         struct inode *inode = page->mapping->host;
2902
2903         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2904                 goto out;
2905
2906         if (f2fs_compressed_file(inode)) {
2907                 if (f2fs_is_compressed_cluster(inode, page->index)) {
2908                         redirty_page_for_writepage(wbc, page);
2909                         return AOP_WRITEPAGE_ACTIVATE;
2910                 }
2911         }
2912 out:
2913 #endif
2914
2915         return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2916                                                 wbc, FS_DATA_IO, 0, true);
2917 }
2918
2919 /*
2920  * This function was copied from write_cche_pages from mm/page-writeback.c.
2921  * The major change is making write step of cold data page separately from
2922  * warm/hot data page.
2923  */
2924 static int f2fs_write_cache_pages(struct address_space *mapping,
2925                                         struct writeback_control *wbc,
2926                                         enum iostat_type io_type)
2927 {
2928         int ret = 0;
2929         int done = 0, retry = 0;
2930         struct pagevec pvec;
2931         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2932         struct bio *bio = NULL;
2933         sector_t last_block;
2934 #ifdef CONFIG_F2FS_FS_COMPRESSION
2935         struct inode *inode = mapping->host;
2936         struct compress_ctx cc = {
2937                 .inode = inode,
2938                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2939                 .cluster_size = F2FS_I(inode)->i_cluster_size,
2940                 .cluster_idx = NULL_CLUSTER,
2941                 .rpages = NULL,
2942                 .nr_rpages = 0,
2943                 .cpages = NULL,
2944                 .rbuf = NULL,
2945                 .cbuf = NULL,
2946                 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2947                 .private = NULL,
2948         };
2949 #endif
2950         int nr_pages;
2951         pgoff_t index;
2952         pgoff_t end;            /* Inclusive */
2953         pgoff_t done_index;
2954         int range_whole = 0;
2955         xa_mark_t tag;
2956         int nwritten = 0;
2957         int submitted = 0;
2958         int i;
2959
2960         pagevec_init(&pvec);
2961
2962         if (get_dirty_pages(mapping->host) <=
2963                                 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2964                 set_inode_flag(mapping->host, FI_HOT_DATA);
2965         else
2966                 clear_inode_flag(mapping->host, FI_HOT_DATA);
2967
2968         if (wbc->range_cyclic) {
2969                 index = mapping->writeback_index; /* prev offset */
2970                 end = -1;
2971         } else {
2972                 index = wbc->range_start >> PAGE_SHIFT;
2973                 end = wbc->range_end >> PAGE_SHIFT;
2974                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2975                         range_whole = 1;
2976         }
2977         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2978                 tag = PAGECACHE_TAG_TOWRITE;
2979         else
2980                 tag = PAGECACHE_TAG_DIRTY;
2981 retry:
2982         retry = 0;
2983         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2984                 tag_pages_for_writeback(mapping, index, end);
2985         done_index = index;
2986         while (!done && !retry && (index <= end)) {
2987                 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2988                                 tag);
2989                 if (nr_pages == 0)
2990                         break;
2991
2992                 for (i = 0; i < nr_pages; i++) {
2993                         struct page *page = pvec.pages[i];
2994                         bool need_readd;
2995 readd:
2996                         need_readd = false;
2997 #ifdef CONFIG_F2FS_FS_COMPRESSION
2998                         if (f2fs_compressed_file(inode)) {
2999                                 ret = f2fs_init_compress_ctx(&cc);
3000                                 if (ret) {
3001                                         done = 1;
3002                                         break;
3003                                 }
3004
3005                                 if (!f2fs_cluster_can_merge_page(&cc,
3006                                                                 page->index)) {
3007                                         ret = f2fs_write_multi_pages(&cc,
3008                                                 &submitted, wbc, io_type);
3009                                         if (!ret)
3010                                                 need_readd = true;
3011                                         goto result;
3012                                 }
3013
3014                                 if (unlikely(f2fs_cp_error(sbi)))
3015                                         goto lock_page;
3016
3017                                 if (f2fs_cluster_is_empty(&cc)) {
3018                                         void *fsdata = NULL;
3019                                         struct page *pagep;
3020                                         int ret2;
3021
3022                                         ret2 = f2fs_prepare_compress_overwrite(
3023                                                         inode, &pagep,
3024                                                         page->index, &fsdata);
3025                                         if (ret2 < 0) {
3026                                                 ret = ret2;
3027                                                 done = 1;
3028                                                 break;
3029                                         } else if (ret2 &&
3030                                                 !f2fs_compress_write_end(inode,
3031                                                                 fsdata, page->index,
3032                                                                 1)) {
3033                                                 retry = 1;
3034                                                 break;
3035                                         }
3036                                 } else {
3037                                         goto lock_page;
3038                                 }
3039                         }
3040 #endif
3041                         /* give a priority to WB_SYNC threads */
3042                         if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3043                                         wbc->sync_mode == WB_SYNC_NONE) {
3044                                 done = 1;
3045                                 break;
3046                         }
3047 #ifdef CONFIG_F2FS_FS_COMPRESSION
3048 lock_page:
3049 #endif
3050                         done_index = page->index;
3051 retry_write:
3052                         lock_page(page);
3053
3054                         if (unlikely(page->mapping != mapping)) {
3055 continue_unlock:
3056                                 unlock_page(page);
3057                                 continue;
3058                         }
3059
3060                         if (!PageDirty(page)) {
3061                                 /* someone wrote it for us */
3062                                 goto continue_unlock;
3063                         }
3064
3065                         if (PageWriteback(page)) {
3066                                 if (wbc->sync_mode != WB_SYNC_NONE)
3067                                         f2fs_wait_on_page_writeback(page,
3068                                                         DATA, true, true);
3069                                 else
3070                                         goto continue_unlock;
3071                         }
3072
3073                         if (!clear_page_dirty_for_io(page))
3074                                 goto continue_unlock;
3075
3076 #ifdef CONFIG_F2FS_FS_COMPRESSION
3077                         if (f2fs_compressed_file(inode)) {
3078                                 get_page(page);
3079                                 f2fs_compress_ctx_add_page(&cc, page);
3080                                 continue;
3081                         }
3082 #endif
3083                         ret = f2fs_write_single_data_page(page, &submitted,
3084                                         &bio, &last_block, wbc, io_type,
3085                                         0, true);
3086                         if (ret == AOP_WRITEPAGE_ACTIVATE)
3087                                 unlock_page(page);
3088 #ifdef CONFIG_F2FS_FS_COMPRESSION
3089 result:
3090 #endif
3091                         nwritten += submitted;
3092                         wbc->nr_to_write -= submitted;
3093
3094                         if (unlikely(ret)) {
3095                                 /*
3096                                  * keep nr_to_write, since vfs uses this to
3097                                  * get # of written pages.
3098                                  */
3099                                 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3100                                         ret = 0;
3101                                         goto next;
3102                                 } else if (ret == -EAGAIN) {
3103                                         ret = 0;
3104                                         if (wbc->sync_mode == WB_SYNC_ALL) {
3105                                                 cond_resched();
3106                                                 congestion_wait(BLK_RW_ASYNC,
3107                                                         DEFAULT_IO_TIMEOUT);
3108                                                 goto retry_write;
3109                                         }
3110                                         goto next;
3111                                 }
3112                                 done_index = page->index + 1;
3113                                 done = 1;
3114                                 break;
3115                         }
3116
3117                         if (wbc->nr_to_write <= 0 &&
3118                                         wbc->sync_mode == WB_SYNC_NONE) {
3119                                 done = 1;
3120                                 break;
3121                         }
3122 next:
3123                         if (need_readd)
3124                                 goto readd;
3125                 }
3126                 pagevec_release(&pvec);
3127                 cond_resched();
3128         }
3129 #ifdef CONFIG_F2FS_FS_COMPRESSION
3130         /* flush remained pages in compress cluster */
3131         if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3132                 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3133                 nwritten += submitted;
3134                 wbc->nr_to_write -= submitted;
3135                 if (ret) {
3136                         done = 1;
3137                         retry = 0;
3138                 }
3139         }
3140         if (f2fs_compressed_file(inode))
3141                 f2fs_destroy_compress_ctx(&cc, false);
3142 #endif
3143         if (retry) {
3144                 index = 0;
3145                 end = -1;
3146                 goto retry;
3147         }
3148         if (wbc->range_cyclic && !done)
3149                 done_index = 0;
3150         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3151                 mapping->writeback_index = done_index;
3152
3153         if (nwritten)
3154                 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3155                                                                 NULL, 0, DATA);
3156         /* submit cached bio of IPU write */
3157         if (bio)
3158                 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3159
3160         return ret;
3161 }
3162
3163 static inline bool __should_serialize_io(struct inode *inode,
3164                                         struct writeback_control *wbc)
3165 {
3166         /* to avoid deadlock in path of data flush */
3167         if (F2FS_I(inode)->wb_task)
3168                 return false;
3169
3170         if (!S_ISREG(inode->i_mode))
3171                 return false;
3172         if (IS_NOQUOTA(inode))
3173                 return false;
3174
3175         if (f2fs_need_compress_data(inode))
3176                 return true;
3177         if (wbc->sync_mode != WB_SYNC_ALL)
3178                 return true;
3179         if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3180                 return true;
3181         return false;
3182 }
3183
3184 static int __f2fs_write_data_pages(struct address_space *mapping,
3185                                                 struct writeback_control *wbc,
3186                                                 enum iostat_type io_type)
3187 {
3188         struct inode *inode = mapping->host;
3189         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3190         struct blk_plug plug;
3191         int ret;
3192         bool locked = false;
3193
3194         /* deal with chardevs and other special file */
3195         if (!mapping->a_ops->writepage)
3196                 return 0;
3197
3198         /* skip writing if there is no dirty page in this inode */
3199         if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3200                 return 0;
3201
3202         /* during POR, we don't need to trigger writepage at all. */
3203         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3204                 goto skip_write;
3205
3206         if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3207                         wbc->sync_mode == WB_SYNC_NONE &&
3208                         get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3209                         f2fs_available_free_memory(sbi, DIRTY_DENTS))
3210                 goto skip_write;
3211
3212         /* skip writing during file defragment */
3213         if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3214                 goto skip_write;
3215
3216         trace_f2fs_writepages(mapping->host, wbc, DATA);
3217
3218         /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3219         if (wbc->sync_mode == WB_SYNC_ALL)
3220                 atomic_inc(&sbi->wb_sync_req[DATA]);
3221         else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3222                 /* to avoid potential deadlock */
3223                 if (current->plug)
3224                         blk_finish_plug(current->plug);
3225                 goto skip_write;
3226         }
3227
3228         if (__should_serialize_io(inode, wbc)) {
3229                 mutex_lock(&sbi->writepages);
3230                 locked = true;
3231         }
3232
3233         blk_start_plug(&plug);
3234         ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3235         blk_finish_plug(&plug);
3236
3237         if (locked)
3238                 mutex_unlock(&sbi->writepages);
3239
3240         if (wbc->sync_mode == WB_SYNC_ALL)
3241                 atomic_dec(&sbi->wb_sync_req[DATA]);
3242         /*
3243          * if some pages were truncated, we cannot guarantee its mapping->host
3244          * to detect pending bios.
3245          */
3246
3247         f2fs_remove_dirty_inode(inode);
3248         return ret;
3249
3250 skip_write:
3251         wbc->pages_skipped += get_dirty_pages(inode);
3252         trace_f2fs_writepages(mapping->host, wbc, DATA);
3253         return 0;
3254 }
3255
3256 static int f2fs_write_data_pages(struct address_space *mapping,
3257                             struct writeback_control *wbc)
3258 {
3259         struct inode *inode = mapping->host;
3260
3261         return __f2fs_write_data_pages(mapping, wbc,
3262                         F2FS_I(inode)->cp_task == current ?
3263                         FS_CP_DATA_IO : FS_DATA_IO);
3264 }
3265
3266 static void f2fs_write_failed(struct inode *inode, loff_t to)
3267 {
3268         loff_t i_size = i_size_read(inode);
3269
3270         if (IS_NOQUOTA(inode))
3271                 return;
3272
3273         /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3274         if (to > i_size && !f2fs_verity_in_progress(inode)) {
3275                 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3276                 filemap_invalidate_lock(inode->i_mapping);
3277
3278                 truncate_pagecache(inode, i_size);
3279                 f2fs_truncate_blocks(inode, i_size, true);
3280
3281                 filemap_invalidate_unlock(inode->i_mapping);
3282                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3283         }
3284 }
3285
3286 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3287                         struct page *page, loff_t pos, unsigned len,
3288                         block_t *blk_addr, bool *node_changed)
3289 {
3290         struct inode *inode = page->mapping->host;
3291         pgoff_t index = page->index;
3292         struct dnode_of_data dn;
3293         struct page *ipage;
3294         bool locked = false;
3295         struct extent_info ei = {0, };
3296         int err = 0;
3297         int flag;
3298
3299         /*
3300          * we already allocated all the blocks, so we don't need to get
3301          * the block addresses when there is no need to fill the page.
3302          */
3303         if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3304             !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3305             !f2fs_verity_in_progress(inode))
3306                 return 0;
3307
3308         /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3309         if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3310                 flag = F2FS_GET_BLOCK_DEFAULT;
3311         else
3312                 flag = F2FS_GET_BLOCK_PRE_AIO;
3313
3314         if (f2fs_has_inline_data(inode) ||
3315                         (pos & PAGE_MASK) >= i_size_read(inode)) {
3316                 f2fs_do_map_lock(sbi, flag, true);
3317                 locked = true;
3318         }
3319
3320 restart:
3321         /* check inline_data */
3322         ipage = f2fs_get_node_page(sbi, inode->i_ino);
3323         if (IS_ERR(ipage)) {
3324                 err = PTR_ERR(ipage);
3325                 goto unlock_out;
3326         }
3327
3328         set_new_dnode(&dn, inode, ipage, ipage, 0);
3329
3330         if (f2fs_has_inline_data(inode)) {
3331                 if (pos + len <= MAX_INLINE_DATA(inode)) {
3332                         f2fs_do_read_inline_data(page, ipage);
3333                         set_inode_flag(inode, FI_DATA_EXIST);
3334                         if (inode->i_nlink)
3335                                 set_page_private_inline(ipage);
3336                 } else {
3337                         err = f2fs_convert_inline_page(&dn, page);
3338                         if (err)
3339                                 goto out;
3340                         if (dn.data_blkaddr == NULL_ADDR)
3341                                 err = f2fs_get_block(&dn, index);
3342                 }
3343         } else if (locked) {
3344                 err = f2fs_get_block(&dn, index);
3345         } else {
3346                 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3347                         dn.data_blkaddr = ei.blk + index - ei.fofs;
3348                 } else {
3349                         /* hole case */
3350                         err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3351                         if (err || dn.data_blkaddr == NULL_ADDR) {
3352                                 f2fs_put_dnode(&dn);
3353                                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3354                                                                 true);
3355                                 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3356                                 locked = true;
3357                                 goto restart;
3358                         }
3359                 }
3360         }
3361
3362         /* convert_inline_page can make node_changed */
3363         *blk_addr = dn.data_blkaddr;
3364         *node_changed = dn.node_changed;
3365 out:
3366         f2fs_put_dnode(&dn);
3367 unlock_out:
3368         if (locked)
3369                 f2fs_do_map_lock(sbi, flag, false);
3370         return err;
3371 }
3372
3373 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3374                 loff_t pos, unsigned len, unsigned flags,
3375                 struct page **pagep, void **fsdata)
3376 {
3377         struct inode *inode = mapping->host;
3378         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3379         struct page *page = NULL;
3380         pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3381         bool need_balance = false, drop_atomic = false;
3382         block_t blkaddr = NULL_ADDR;
3383         int err = 0;
3384
3385         trace_f2fs_write_begin(inode, pos, len, flags);
3386
3387         if (!f2fs_is_checkpoint_ready(sbi)) {
3388                 err = -ENOSPC;
3389                 goto fail;
3390         }
3391
3392         if ((f2fs_is_atomic_file(inode) &&
3393                         !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3394                         is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3395                 err = -ENOMEM;
3396                 drop_atomic = true;
3397                 goto fail;
3398         }
3399
3400         /*
3401          * We should check this at this moment to avoid deadlock on inode page
3402          * and #0 page. The locking rule for inline_data conversion should be:
3403          * lock_page(page #0) -> lock_page(inode_page)
3404          */
3405         if (index != 0) {
3406                 err = f2fs_convert_inline_inode(inode);
3407                 if (err)
3408                         goto fail;
3409         }
3410
3411 #ifdef CONFIG_F2FS_FS_COMPRESSION
3412         if (f2fs_compressed_file(inode)) {
3413                 int ret;
3414
3415                 *fsdata = NULL;
3416
3417                 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3418                         goto repeat;
3419
3420                 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3421                                                         index, fsdata);
3422                 if (ret < 0) {
3423                         err = ret;
3424                         goto fail;
3425                 } else if (ret) {
3426                         return 0;
3427                 }
3428         }
3429 #endif
3430
3431 repeat:
3432         /*
3433          * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3434          * wait_for_stable_page. Will wait that below with our IO control.
3435          */
3436         page = f2fs_pagecache_get_page(mapping, index,
3437                                 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3438         if (!page) {
3439                 err = -ENOMEM;
3440                 goto fail;
3441         }
3442
3443         /* TODO: cluster can be compressed due to race with .writepage */
3444
3445         *pagep = page;
3446
3447         err = prepare_write_begin(sbi, page, pos, len,
3448                                         &blkaddr, &need_balance);
3449         if (err)
3450                 goto fail;
3451
3452         if (need_balance && !IS_NOQUOTA(inode) &&
3453                         has_not_enough_free_secs(sbi, 0, 0)) {
3454                 unlock_page(page);
3455                 f2fs_balance_fs(sbi, true);
3456                 lock_page(page);
3457                 if (page->mapping != mapping) {
3458                         /* The page got truncated from under us */
3459                         f2fs_put_page(page, 1);
3460                         goto repeat;
3461                 }
3462         }
3463
3464         f2fs_wait_on_page_writeback(page, DATA, false, true);
3465
3466         if (len == PAGE_SIZE || PageUptodate(page))
3467                 return 0;
3468
3469         if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3470             !f2fs_verity_in_progress(inode)) {
3471                 zero_user_segment(page, len, PAGE_SIZE);
3472                 return 0;
3473         }
3474
3475         if (blkaddr == NEW_ADDR) {
3476                 zero_user_segment(page, 0, PAGE_SIZE);
3477                 SetPageUptodate(page);
3478         } else {
3479                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3480                                 DATA_GENERIC_ENHANCE_READ)) {
3481                         err = -EFSCORRUPTED;
3482                         goto fail;
3483                 }
3484                 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3485                 if (err)
3486                         goto fail;
3487
3488                 lock_page(page);
3489                 if (unlikely(page->mapping != mapping)) {
3490                         f2fs_put_page(page, 1);
3491                         goto repeat;
3492                 }
3493                 if (unlikely(!PageUptodate(page))) {
3494                         err = -EIO;
3495                         goto fail;
3496                 }
3497         }
3498         return 0;
3499
3500 fail:
3501         f2fs_put_page(page, 1);
3502         f2fs_write_failed(inode, pos + len);
3503         if (drop_atomic)
3504                 f2fs_drop_inmem_pages_all(sbi, false);
3505         return err;
3506 }
3507
3508 static int f2fs_write_end(struct file *file,
3509                         struct address_space *mapping,
3510                         loff_t pos, unsigned len, unsigned copied,
3511                         struct page *page, void *fsdata)
3512 {
3513         struct inode *inode = page->mapping->host;
3514
3515         trace_f2fs_write_end(inode, pos, len, copied);
3516
3517         /*
3518          * This should be come from len == PAGE_SIZE, and we expect copied
3519          * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3520          * let generic_perform_write() try to copy data again through copied=0.
3521          */
3522         if (!PageUptodate(page)) {
3523                 if (unlikely(copied != len))
3524                         copied = 0;
3525                 else
3526                         SetPageUptodate(page);
3527         }
3528
3529 #ifdef CONFIG_F2FS_FS_COMPRESSION
3530         /* overwrite compressed file */
3531         if (f2fs_compressed_file(inode) && fsdata) {
3532                 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3533                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3534
3535                 if (pos + copied > i_size_read(inode) &&
3536                                 !f2fs_verity_in_progress(inode))
3537                         f2fs_i_size_write(inode, pos + copied);
3538                 return copied;
3539         }
3540 #endif
3541
3542         if (!copied)
3543                 goto unlock_out;
3544
3545         set_page_dirty(page);
3546
3547         if (pos + copied > i_size_read(inode) &&
3548             !f2fs_verity_in_progress(inode))
3549                 f2fs_i_size_write(inode, pos + copied);
3550 unlock_out:
3551         f2fs_put_page(page, 1);
3552         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3553         return copied;
3554 }
3555
3556 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3557                            loff_t offset)
3558 {
3559         unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3560         unsigned blkbits = i_blkbits;
3561         unsigned blocksize_mask = (1 << blkbits) - 1;
3562         unsigned long align = offset | iov_iter_alignment(iter);
3563         struct block_device *bdev = inode->i_sb->s_bdev;
3564
3565         if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3566                 return 1;
3567
3568         if (align & blocksize_mask) {
3569                 if (bdev)
3570                         blkbits = blksize_bits(bdev_logical_block_size(bdev));
3571                 blocksize_mask = (1 << blkbits) - 1;
3572                 if (align & blocksize_mask)
3573                         return -EINVAL;
3574                 return 1;
3575         }
3576         return 0;
3577 }
3578
3579 static void f2fs_dio_end_io(struct bio *bio)
3580 {
3581         struct f2fs_private_dio *dio = bio->bi_private;
3582
3583         dec_page_count(F2FS_I_SB(dio->inode),
3584                         dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3585
3586         bio->bi_private = dio->orig_private;
3587         bio->bi_end_io = dio->orig_end_io;
3588
3589         kfree(dio);
3590
3591         bio_endio(bio);
3592 }
3593
3594 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3595                                                         loff_t file_offset)
3596 {
3597         struct f2fs_private_dio *dio;
3598         bool write = (bio_op(bio) == REQ_OP_WRITE);
3599
3600         dio = f2fs_kzalloc(F2FS_I_SB(inode),
3601                         sizeof(struct f2fs_private_dio), GFP_NOFS);
3602         if (!dio)
3603                 goto out;
3604
3605         dio->inode = inode;
3606         dio->orig_end_io = bio->bi_end_io;
3607         dio->orig_private = bio->bi_private;
3608         dio->write = write;
3609
3610         bio->bi_end_io = f2fs_dio_end_io;
3611         bio->bi_private = dio;
3612
3613         inc_page_count(F2FS_I_SB(inode),
3614                         write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3615
3616         submit_bio(bio);
3617         return;
3618 out:
3619         bio->bi_status = BLK_STS_IOERR;
3620         bio_endio(bio);
3621 }
3622
3623 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3624 {
3625         struct address_space *mapping = iocb->ki_filp->f_mapping;
3626         struct inode *inode = mapping->host;
3627         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3628         struct f2fs_inode_info *fi = F2FS_I(inode);
3629         size_t count = iov_iter_count(iter);
3630         loff_t offset = iocb->ki_pos;
3631         int rw = iov_iter_rw(iter);
3632         int err;
3633         enum rw_hint hint = iocb->ki_hint;
3634         int whint_mode = F2FS_OPTION(sbi).whint_mode;
3635         bool do_opu;
3636
3637         err = check_direct_IO(inode, iter, offset);
3638         if (err)
3639                 return err < 0 ? err : 0;
3640
3641         if (f2fs_force_buffered_io(inode, iocb, iter))
3642                 return 0;
3643
3644         do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
3645
3646         trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3647
3648         if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3649                 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3650
3651         if (iocb->ki_flags & IOCB_NOWAIT) {
3652                 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3653                         iocb->ki_hint = hint;
3654                         err = -EAGAIN;
3655                         goto out;
3656                 }
3657                 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3658                         up_read(&fi->i_gc_rwsem[rw]);
3659                         iocb->ki_hint = hint;
3660                         err = -EAGAIN;
3661                         goto out;
3662                 }
3663         } else {
3664                 down_read(&fi->i_gc_rwsem[rw]);
3665                 if (do_opu)
3666                         down_read(&fi->i_gc_rwsem[READ]);
3667         }
3668
3669         err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3670                         iter, rw == WRITE ? get_data_block_dio_write :
3671                         get_data_block_dio, NULL, f2fs_dio_submit_bio,
3672                         rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3673                         DIO_SKIP_HOLES);
3674
3675         if (do_opu)
3676                 up_read(&fi->i_gc_rwsem[READ]);
3677
3678         up_read(&fi->i_gc_rwsem[rw]);
3679
3680         if (rw == WRITE) {
3681                 if (whint_mode == WHINT_MODE_OFF)
3682                         iocb->ki_hint = hint;
3683                 if (err > 0) {
3684                         f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3685                                                                         err);
3686                         if (!do_opu)
3687                                 set_inode_flag(inode, FI_UPDATE_WRITE);
3688                 } else if (err == -EIOCBQUEUED) {
3689                         f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3690                                                 count - iov_iter_count(iter));
3691                 } else if (err < 0) {
3692                         f2fs_write_failed(inode, offset + count);
3693                 }
3694         } else {
3695                 if (err > 0)
3696                         f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3697                 else if (err == -EIOCBQUEUED)
3698                         f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3699                                                 count - iov_iter_count(iter));
3700         }
3701
3702 out:
3703         trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3704
3705         return err;
3706 }
3707
3708 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3709                                                         unsigned int length)
3710 {
3711         struct inode *inode = page->mapping->host;
3712         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3713
3714         if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3715                 (offset % PAGE_SIZE || length != PAGE_SIZE))
3716                 return;
3717
3718         if (PageDirty(page)) {
3719                 if (inode->i_ino == F2FS_META_INO(sbi)) {
3720                         dec_page_count(sbi, F2FS_DIRTY_META);
3721                 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3722                         dec_page_count(sbi, F2FS_DIRTY_NODES);
3723                 } else {
3724                         inode_dec_dirty_pages(inode);
3725                         f2fs_remove_dirty_inode(inode);
3726                 }
3727         }
3728
3729         clear_page_private_gcing(page);
3730
3731         if (test_opt(sbi, COMPRESS_CACHE)) {
3732                 if (f2fs_compressed_file(inode))
3733                         f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3734                 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3735                         clear_page_private_data(page);
3736         }
3737
3738         if (page_private_atomic(page))
3739                 return f2fs_drop_inmem_page(inode, page);
3740
3741         detach_page_private(page);
3742         set_page_private(page, 0);
3743 }
3744
3745 int f2fs_release_page(struct page *page, gfp_t wait)
3746 {
3747         /* If this is dirty page, keep PagePrivate */
3748         if (PageDirty(page))
3749                 return 0;
3750
3751         /* This is atomic written page, keep Private */
3752         if (page_private_atomic(page))
3753                 return 0;
3754
3755         if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
3756                 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3757                 struct inode *inode = page->mapping->host;
3758
3759                 if (f2fs_compressed_file(inode))
3760                         f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3761                 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3762                         clear_page_private_data(page);
3763         }
3764
3765         clear_page_private_gcing(page);
3766
3767         detach_page_private(page);
3768         set_page_private(page, 0);
3769         return 1;
3770 }
3771
3772 static int f2fs_set_data_page_dirty(struct page *page)
3773 {
3774         struct inode *inode = page_file_mapping(page)->host;
3775
3776         trace_f2fs_set_page_dirty(page, DATA);
3777
3778         if (!PageUptodate(page))
3779                 SetPageUptodate(page);
3780         if (PageSwapCache(page))
3781                 return __set_page_dirty_nobuffers(page);
3782
3783         if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3784                 if (!page_private_atomic(page)) {
3785                         f2fs_register_inmem_page(inode, page);
3786                         return 1;
3787                 }
3788                 /*
3789                  * Previously, this page has been registered, we just
3790                  * return here.
3791                  */
3792                 return 0;
3793         }
3794
3795         if (!PageDirty(page)) {
3796                 __set_page_dirty_nobuffers(page);
3797                 f2fs_update_dirty_page(inode, page);
3798                 return 1;
3799         }
3800         return 0;
3801 }
3802
3803
3804 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3805 {
3806 #ifdef CONFIG_F2FS_FS_COMPRESSION
3807         struct dnode_of_data dn;
3808         sector_t start_idx, blknr = 0;
3809         int ret;
3810
3811         start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3812
3813         set_new_dnode(&dn, inode, NULL, NULL, 0);
3814         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3815         if (ret)
3816                 return 0;
3817
3818         if (dn.data_blkaddr != COMPRESS_ADDR) {
3819                 dn.ofs_in_node += block - start_idx;
3820                 blknr = f2fs_data_blkaddr(&dn);
3821                 if (!__is_valid_data_blkaddr(blknr))
3822                         blknr = 0;
3823         }
3824
3825         f2fs_put_dnode(&dn);
3826         return blknr;
3827 #else
3828         return 0;
3829 #endif
3830 }
3831
3832
3833 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3834 {
3835         struct inode *inode = mapping->host;
3836         sector_t blknr = 0;
3837
3838         if (f2fs_has_inline_data(inode))
3839                 goto out;
3840
3841         /* make sure allocating whole blocks */
3842         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3843                 filemap_write_and_wait(mapping);
3844
3845         /* Block number less than F2FS MAX BLOCKS */
3846         if (unlikely(block >= max_file_blocks(inode)))
3847                 goto out;
3848
3849         if (f2fs_compressed_file(inode)) {
3850                 blknr = f2fs_bmap_compress(inode, block);
3851         } else {
3852                 struct f2fs_map_blocks map;
3853
3854                 memset(&map, 0, sizeof(map));
3855                 map.m_lblk = block;
3856                 map.m_len = 1;
3857                 map.m_next_pgofs = NULL;
3858                 map.m_seg_type = NO_CHECK_TYPE;
3859
3860                 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3861                         blknr = map.m_pblk;
3862         }
3863 out:
3864         trace_f2fs_bmap(inode, block, blknr);
3865         return blknr;
3866 }
3867
3868 #ifdef CONFIG_MIGRATION
3869 #include <linux/migrate.h>
3870
3871 int f2fs_migrate_page(struct address_space *mapping,
3872                 struct page *newpage, struct page *page, enum migrate_mode mode)
3873 {
3874         int rc, extra_count;
3875         struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3876         bool atomic_written = page_private_atomic(page);
3877
3878         BUG_ON(PageWriteback(page));
3879
3880         /* migrating an atomic written page is safe with the inmem_lock hold */
3881         if (atomic_written) {
3882                 if (mode != MIGRATE_SYNC)
3883                         return -EBUSY;
3884                 if (!mutex_trylock(&fi->inmem_lock))
3885                         return -EAGAIN;
3886         }
3887
3888         /* one extra reference was held for atomic_write page */
3889         extra_count = atomic_written ? 1 : 0;
3890         rc = migrate_page_move_mapping(mapping, newpage,
3891                                 page, extra_count);
3892         if (rc != MIGRATEPAGE_SUCCESS) {
3893                 if (atomic_written)
3894                         mutex_unlock(&fi->inmem_lock);
3895                 return rc;
3896         }
3897
3898         if (atomic_written) {
3899                 struct inmem_pages *cur;
3900
3901                 list_for_each_entry(cur, &fi->inmem_pages, list)
3902                         if (cur->page == page) {
3903                                 cur->page = newpage;
3904                                 break;
3905                         }
3906                 mutex_unlock(&fi->inmem_lock);
3907                 put_page(page);
3908                 get_page(newpage);
3909         }
3910
3911         /* guarantee to start from no stale private field */
3912         set_page_private(newpage, 0);
3913         if (PagePrivate(page)) {
3914                 set_page_private(newpage, page_private(page));
3915                 SetPagePrivate(newpage);
3916                 get_page(newpage);
3917
3918                 set_page_private(page, 0);
3919                 ClearPagePrivate(page);
3920                 put_page(page);
3921         }
3922
3923         if (mode != MIGRATE_SYNC_NO_COPY)
3924                 migrate_page_copy(newpage, page);
3925         else
3926                 migrate_page_states(newpage, page);
3927
3928         return MIGRATEPAGE_SUCCESS;
3929 }
3930 #endif
3931
3932 #ifdef CONFIG_SWAP
3933 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3934                                                         unsigned int blkcnt)
3935 {
3936         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3937         unsigned int blkofs;
3938         unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3939         unsigned int secidx = start_blk / blk_per_sec;
3940         unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3941         int ret = 0;
3942
3943         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3944         filemap_invalidate_lock(inode->i_mapping);
3945
3946         set_inode_flag(inode, FI_ALIGNED_WRITE);
3947
3948         for (; secidx < end_sec; secidx++) {
3949                 down_write(&sbi->pin_sem);
3950
3951                 f2fs_lock_op(sbi);
3952                 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3953                 f2fs_unlock_op(sbi);
3954
3955                 set_inode_flag(inode, FI_DO_DEFRAG);
3956
3957                 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3958                         struct page *page;
3959                         unsigned int blkidx = secidx * blk_per_sec + blkofs;
3960
3961                         page = f2fs_get_lock_data_page(inode, blkidx, true);
3962                         if (IS_ERR(page)) {
3963                                 up_write(&sbi->pin_sem);
3964                                 ret = PTR_ERR(page);
3965                                 goto done;
3966                         }
3967
3968                         set_page_dirty(page);
3969                         f2fs_put_page(page, 1);
3970                 }
3971
3972                 clear_inode_flag(inode, FI_DO_DEFRAG);
3973
3974                 ret = filemap_fdatawrite(inode->i_mapping);
3975
3976                 up_write(&sbi->pin_sem);
3977
3978                 if (ret)
3979                         break;
3980         }
3981
3982 done:
3983         clear_inode_flag(inode, FI_DO_DEFRAG);
3984         clear_inode_flag(inode, FI_ALIGNED_WRITE);
3985
3986         filemap_invalidate_unlock(inode->i_mapping);
3987         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3988
3989         return ret;
3990 }
3991
3992 static int check_swap_activate(struct swap_info_struct *sis,
3993                                 struct file *swap_file, sector_t *span)
3994 {
3995         struct address_space *mapping = swap_file->f_mapping;
3996         struct inode *inode = mapping->host;
3997         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3998         sector_t cur_lblock;
3999         sector_t last_lblock;
4000         sector_t pblock;
4001         sector_t lowest_pblock = -1;
4002         sector_t highest_pblock = 0;
4003         int nr_extents = 0;
4004         unsigned long nr_pblocks;
4005         unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
4006         unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
4007         unsigned int not_aligned = 0;
4008         int ret = 0;
4009
4010         /*
4011          * Map all the blocks into the extent list.  This code doesn't try
4012          * to be very smart.
4013          */
4014         cur_lblock = 0;
4015         last_lblock = bytes_to_blks(inode, i_size_read(inode));
4016
4017         while (cur_lblock < last_lblock && cur_lblock < sis->max) {
4018                 struct f2fs_map_blocks map;
4019 retry:
4020                 cond_resched();
4021
4022                 memset(&map, 0, sizeof(map));
4023                 map.m_lblk = cur_lblock;
4024                 map.m_len = last_lblock - cur_lblock;
4025                 map.m_next_pgofs = NULL;
4026                 map.m_next_extent = NULL;
4027                 map.m_seg_type = NO_CHECK_TYPE;
4028                 map.m_may_create = false;
4029
4030                 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
4031                 if (ret)
4032                         goto out;
4033
4034                 /* hole */
4035                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
4036                         f2fs_err(sbi, "Swapfile has holes");
4037                         ret = -EINVAL;
4038                         goto out;
4039                 }
4040
4041                 pblock = map.m_pblk;
4042                 nr_pblocks = map.m_len;
4043
4044                 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
4045                                 nr_pblocks & sec_blks_mask) {
4046                         not_aligned++;
4047
4048                         nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4049                         if (cur_lblock + nr_pblocks > sis->max)
4050                                 nr_pblocks -= blks_per_sec;
4051
4052                         if (!nr_pblocks) {
4053                                 /* this extent is last one */
4054                                 nr_pblocks = map.m_len;
4055                                 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
4056                                 goto next;
4057                         }
4058
4059                         ret = f2fs_migrate_blocks(inode, cur_lblock,
4060                                                         nr_pblocks);
4061                         if (ret)
4062                                 goto out;
4063                         goto retry;
4064                 }
4065 next:
4066                 if (cur_lblock + nr_pblocks >= sis->max)
4067                         nr_pblocks = sis->max - cur_lblock;
4068
4069                 if (cur_lblock) {       /* exclude the header page */
4070                         if (pblock < lowest_pblock)
4071                                 lowest_pblock = pblock;
4072                         if (pblock + nr_pblocks - 1 > highest_pblock)
4073                                 highest_pblock = pblock + nr_pblocks - 1;
4074                 }
4075
4076                 /*
4077                  * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4078                  */
4079                 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4080                 if (ret < 0)
4081                         goto out;
4082                 nr_extents += ret;
4083                 cur_lblock += nr_pblocks;
4084         }
4085         ret = nr_extents;
4086         *span = 1 + highest_pblock - lowest_pblock;
4087         if (cur_lblock == 0)
4088                 cur_lblock = 1; /* force Empty message */
4089         sis->max = cur_lblock;
4090         sis->pages = cur_lblock - 1;
4091         sis->highest_bit = cur_lblock - 1;
4092 out:
4093         if (not_aligned)
4094                 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4095                           not_aligned, blks_per_sec * F2FS_BLKSIZE);
4096         return ret;
4097 }
4098
4099 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4100                                 sector_t *span)
4101 {
4102         struct inode *inode = file_inode(file);
4103         int ret;
4104
4105         if (!S_ISREG(inode->i_mode))
4106                 return -EINVAL;
4107
4108         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4109                 return -EROFS;
4110
4111         if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4112                 f2fs_err(F2FS_I_SB(inode),
4113                         "Swapfile not supported in LFS mode");
4114                 return -EINVAL;
4115         }
4116
4117         ret = f2fs_convert_inline_inode(inode);
4118         if (ret)
4119                 return ret;
4120
4121         if (!f2fs_disable_compressed_file(inode))
4122                 return -EINVAL;
4123
4124         f2fs_precache_extents(inode);
4125
4126         ret = check_swap_activate(sis, file, span);
4127         if (ret < 0)
4128                 return ret;
4129
4130         set_inode_flag(inode, FI_PIN_FILE);
4131         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4132         return ret;
4133 }
4134
4135 static void f2fs_swap_deactivate(struct file *file)
4136 {
4137         struct inode *inode = file_inode(file);
4138
4139         clear_inode_flag(inode, FI_PIN_FILE);
4140 }
4141 #else
4142 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4143                                 sector_t *span)
4144 {
4145         return -EOPNOTSUPP;
4146 }
4147
4148 static void f2fs_swap_deactivate(struct file *file)
4149 {
4150 }
4151 #endif
4152
4153 const struct address_space_operations f2fs_dblock_aops = {
4154         .readpage       = f2fs_read_data_page,
4155         .readahead      = f2fs_readahead,
4156         .writepage      = f2fs_write_data_page,
4157         .writepages     = f2fs_write_data_pages,
4158         .write_begin    = f2fs_write_begin,
4159         .write_end      = f2fs_write_end,
4160         .set_page_dirty = f2fs_set_data_page_dirty,
4161         .invalidatepage = f2fs_invalidate_page,
4162         .releasepage    = f2fs_release_page,
4163         .direct_IO      = f2fs_direct_IO,
4164         .bmap           = f2fs_bmap,
4165         .swap_activate  = f2fs_swap_activate,
4166         .swap_deactivate = f2fs_swap_deactivate,
4167 #ifdef CONFIG_MIGRATION
4168         .migratepage    = f2fs_migrate_page,
4169 #endif
4170 };
4171
4172 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4173 {
4174         struct address_space *mapping = page_mapping(page);
4175         unsigned long flags;
4176
4177         xa_lock_irqsave(&mapping->i_pages, flags);
4178         __xa_clear_mark(&mapping->i_pages, page_index(page),
4179                                                 PAGECACHE_TAG_DIRTY);
4180         xa_unlock_irqrestore(&mapping->i_pages, flags);
4181 }
4182
4183 int __init f2fs_init_post_read_processing(void)
4184 {
4185         bio_post_read_ctx_cache =
4186                 kmem_cache_create("f2fs_bio_post_read_ctx",
4187                                   sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4188         if (!bio_post_read_ctx_cache)
4189                 goto fail;
4190         bio_post_read_ctx_pool =
4191                 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4192                                          bio_post_read_ctx_cache);
4193         if (!bio_post_read_ctx_pool)
4194                 goto fail_free_cache;
4195         return 0;
4196
4197 fail_free_cache:
4198         kmem_cache_destroy(bio_post_read_ctx_cache);
4199 fail:
4200         return -ENOMEM;
4201 }
4202
4203 void f2fs_destroy_post_read_processing(void)
4204 {
4205         mempool_destroy(bio_post_read_ctx_pool);
4206         kmem_cache_destroy(bio_post_read_ctx_cache);
4207 }
4208
4209 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4210 {
4211         if (!f2fs_sb_has_encrypt(sbi) &&
4212                 !f2fs_sb_has_verity(sbi) &&
4213                 !f2fs_sb_has_compression(sbi))
4214                 return 0;
4215
4216         sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4217                                                  WQ_UNBOUND | WQ_HIGHPRI,
4218                                                  num_online_cpus());
4219         if (!sbi->post_read_wq)
4220                 return -ENOMEM;
4221         return 0;
4222 }
4223
4224 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4225 {
4226         if (sbi->post_read_wq)
4227                 destroy_workqueue(sbi->post_read_wq);
4228 }
4229
4230 int __init f2fs_init_bio_entry_cache(void)
4231 {
4232         bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4233                         sizeof(struct bio_entry));
4234         if (!bio_entry_slab)
4235                 return -ENOMEM;
4236         return 0;
4237 }
4238
4239 void f2fs_destroy_bio_entry_cache(void)
4240 {
4241         kmem_cache_destroy(bio_entry_slab);
4242 }