1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
22 #include <linux/sched/signal.h>
23 #include <linux/fiemap.h>
29 #include <trace/events/f2fs.h>
31 #define NUM_PREALLOC_POST_READ_CTXS 128
33 static struct kmem_cache *bio_post_read_ctx_cache;
34 static struct kmem_cache *bio_entry_slab;
35 static mempool_t *bio_post_read_ctx_pool;
36 static struct bio_set f2fs_bioset;
38 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
40 int __init f2fs_init_bioset(void)
42 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS))
48 void f2fs_destroy_bioset(void)
50 bioset_exit(&f2fs_bioset);
53 static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
54 unsigned int nr_iovecs)
56 return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
59 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
62 /* No failure on bio allocation */
63 return __f2fs_bio_alloc(GFP_NOIO, npages);
66 if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
67 f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
71 return __f2fs_bio_alloc(GFP_KERNEL, npages);
74 static bool __is_cp_guaranteed(struct page *page)
76 struct address_space *mapping = page->mapping;
78 struct f2fs_sb_info *sbi;
83 if (f2fs_is_compressed_page(page))
86 inode = mapping->host;
87 sbi = F2FS_I_SB(inode);
89 if (inode->i_ino == F2FS_META_INO(sbi) ||
90 inode->i_ino == F2FS_NODE_INO(sbi) ||
91 S_ISDIR(inode->i_mode) ||
92 (S_ISREG(inode->i_mode) &&
93 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
99 static enum count_type __read_io_type(struct page *page)
101 struct address_space *mapping = page_file_mapping(page);
104 struct inode *inode = mapping->host;
105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
107 if (inode->i_ino == F2FS_META_INO(sbi))
110 if (inode->i_ino == F2FS_NODE_INO(sbi))
116 /* postprocessing steps for read bios */
117 enum bio_post_read_step {
119 STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
120 STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
124 struct bio_post_read_ctx {
126 struct f2fs_sb_info *sbi;
127 struct work_struct work;
128 unsigned int enabled_steps;
131 static void __read_end_io(struct bio *bio, bool compr, bool verity)
135 struct bvec_iter_all iter_all;
137 bio_for_each_segment_all(bv, bio, iter_all) {
140 #ifdef CONFIG_F2FS_FS_COMPRESSION
141 if (compr && f2fs_is_compressed_page(page)) {
142 f2fs_decompress_pages(bio, page, verity);
149 /* PG_error was set if any post_read step failed */
150 if (bio->bi_status || PageError(page)) {
151 ClearPageUptodate(page);
152 /* will re-read again later */
153 ClearPageError(page);
155 SetPageUptodate(page);
157 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
162 static void f2fs_release_read_bio(struct bio *bio);
163 static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
166 __read_end_io(bio, false, verity);
167 f2fs_release_read_bio(bio);
170 static void f2fs_decompress_bio(struct bio *bio, bool verity)
172 __read_end_io(bio, true, verity);
175 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
177 static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
179 fscrypt_decrypt_bio(ctx->bio);
182 static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
184 f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
187 #ifdef CONFIG_F2FS_FS_COMPRESSION
188 static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
190 f2fs_decompress_end_io(rpages, cluster_size, false, true);
193 static void f2fs_verify_bio(struct bio *bio)
196 struct bvec_iter_all iter_all;
198 bio_for_each_segment_all(bv, bio, iter_all) {
199 struct page *page = bv->bv_page;
200 struct decompress_io_ctx *dic;
202 dic = (struct decompress_io_ctx *)page_private(page);
205 if (atomic_dec_return(&dic->verity_pages))
207 f2fs_verify_pages(dic->rpages,
213 if (bio->bi_status || PageError(page))
216 if (fsverity_verify_page(page)) {
217 SetPageUptodate(page);
221 ClearPageUptodate(page);
222 ClearPageError(page);
224 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
230 static void f2fs_verity_work(struct work_struct *work)
232 struct bio_post_read_ctx *ctx =
233 container_of(work, struct bio_post_read_ctx, work);
234 struct bio *bio = ctx->bio;
235 #ifdef CONFIG_F2FS_FS_COMPRESSION
236 unsigned int enabled_steps = ctx->enabled_steps;
240 * fsverity_verify_bio() may call readpages() again, and while verity
241 * will be disabled for this, decryption may still be needed, resulting
242 * in another bio_post_read_ctx being allocated. So to prevent
243 * deadlocks we need to release the current ctx to the mempool first.
244 * This assumes that verity is the last post-read step.
246 mempool_free(ctx, bio_post_read_ctx_pool);
247 bio->bi_private = NULL;
249 #ifdef CONFIG_F2FS_FS_COMPRESSION
250 /* previous step is decompression */
251 if (enabled_steps & (1 << STEP_DECOMPRESS)) {
252 f2fs_verify_bio(bio);
253 f2fs_release_read_bio(bio);
258 fsverity_verify_bio(bio);
259 __f2fs_read_end_io(bio, false, false);
262 static void f2fs_post_read_work(struct work_struct *work)
264 struct bio_post_read_ctx *ctx =
265 container_of(work, struct bio_post_read_ctx, work);
267 if (ctx->enabled_steps & (1 << STEP_DECRYPT))
268 f2fs_decrypt_work(ctx);
270 if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
271 f2fs_decompress_work(ctx);
273 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
274 INIT_WORK(&ctx->work, f2fs_verity_work);
275 fsverity_enqueue_verify_work(&ctx->work);
279 __f2fs_read_end_io(ctx->bio,
280 ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
283 static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
284 struct work_struct *work)
286 queue_work(sbi->post_read_wq, work);
289 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
292 * We use different work queues for decryption and for verity because
293 * verity may require reading metadata pages that need decryption, and
294 * we shouldn't recurse to the same workqueue.
297 if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
298 ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
299 INIT_WORK(&ctx->work, f2fs_post_read_work);
300 f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
304 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
305 INIT_WORK(&ctx->work, f2fs_verity_work);
306 fsverity_enqueue_verify_work(&ctx->work);
310 __f2fs_read_end_io(ctx->bio, false, false);
313 static bool f2fs_bio_post_read_required(struct bio *bio)
315 return bio->bi_private;
318 static void f2fs_read_end_io(struct bio *bio)
320 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
322 if (time_to_inject(sbi, FAULT_READ_IO)) {
323 f2fs_show_injection_info(sbi, FAULT_READ_IO);
324 bio->bi_status = BLK_STS_IOERR;
327 if (f2fs_bio_post_read_required(bio)) {
328 struct bio_post_read_ctx *ctx = bio->bi_private;
330 bio_post_read_processing(ctx);
334 __f2fs_read_end_io(bio, false, false);
337 static void f2fs_write_end_io(struct bio *bio)
339 struct f2fs_sb_info *sbi = bio->bi_private;
340 struct bio_vec *bvec;
341 struct bvec_iter_all iter_all;
343 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
344 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
345 bio->bi_status = BLK_STS_IOERR;
348 bio_for_each_segment_all(bvec, bio, iter_all) {
349 struct page *page = bvec->bv_page;
350 enum count_type type = WB_DATA_TYPE(page);
352 if (IS_DUMMY_WRITTEN_PAGE(page)) {
353 set_page_private(page, (unsigned long)NULL);
354 ClearPagePrivate(page);
356 mempool_free(page, sbi->write_io_dummy);
358 if (unlikely(bio->bi_status))
359 f2fs_stop_checkpoint(sbi, true);
363 fscrypt_finalize_bounce_page(&page);
365 #ifdef CONFIG_F2FS_FS_COMPRESSION
366 if (f2fs_is_compressed_page(page)) {
367 f2fs_compress_write_end_io(bio, page);
372 if (unlikely(bio->bi_status)) {
373 mapping_set_error(page->mapping, -EIO);
374 if (type == F2FS_WB_CP_DATA)
375 f2fs_stop_checkpoint(sbi, true);
378 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
379 page->index != nid_of_node(page));
381 dec_page_count(sbi, type);
382 if (f2fs_in_warm_node_list(sbi, page))
383 f2fs_del_fsync_node_entry(sbi, page);
384 clear_cold_data(page);
385 end_page_writeback(page);
387 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
388 wq_has_sleeper(&sbi->cp_wait))
389 wake_up(&sbi->cp_wait);
394 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
395 block_t blk_addr, struct bio *bio)
397 struct block_device *bdev = sbi->sb->s_bdev;
400 if (f2fs_is_multi_device(sbi)) {
401 for (i = 0; i < sbi->s_ndevs; i++) {
402 if (FDEV(i).start_blk <= blk_addr &&
403 FDEV(i).end_blk >= blk_addr) {
404 blk_addr -= FDEV(i).start_blk;
411 bio_set_dev(bio, bdev);
412 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
417 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
421 if (!f2fs_is_multi_device(sbi))
424 for (i = 0; i < sbi->s_ndevs; i++)
425 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
431 * Return true, if pre_bio's bdev is same as its target device.
433 static bool __same_bdev(struct f2fs_sb_info *sbi,
434 block_t blk_addr, struct bio *bio)
436 struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
437 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
440 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
442 struct f2fs_sb_info *sbi = fio->sbi;
445 bio = f2fs_bio_alloc(sbi, npages, true);
447 f2fs_target_device(sbi, fio->new_blkaddr, bio);
448 if (is_read_io(fio->op)) {
449 bio->bi_end_io = f2fs_read_end_io;
450 bio->bi_private = NULL;
452 bio->bi_end_io = f2fs_write_end_io;
453 bio->bi_private = sbi;
454 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
455 fio->type, fio->temp);
458 wbc_init_bio(fio->io_wbc, bio);
463 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
465 const struct f2fs_io_info *fio,
469 * The f2fs garbage collector sets ->encrypted_page when it wants to
470 * read/write raw data without encryption.
472 if (!fio || !fio->encrypted_page)
473 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
476 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
478 const struct f2fs_io_info *fio)
481 * The f2fs garbage collector sets ->encrypted_page when it wants to
482 * read/write raw data without encryption.
484 if (fio && fio->encrypted_page)
485 return !bio_has_crypt_ctx(bio);
487 return fscrypt_mergeable_bio(bio, inode, next_idx);
490 static inline void __submit_bio(struct f2fs_sb_info *sbi,
491 struct bio *bio, enum page_type type)
493 if (!is_read_io(bio_op(bio))) {
496 if (type != DATA && type != NODE)
499 if (f2fs_lfs_mode(sbi) && current->plug)
500 blk_finish_plug(current->plug);
502 if (!F2FS_IO_ALIGNED(sbi))
505 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
506 start %= F2FS_IO_SIZE(sbi);
511 /* fill dummy pages */
512 for (; start < F2FS_IO_SIZE(sbi); start++) {
514 mempool_alloc(sbi->write_io_dummy,
515 GFP_NOIO | __GFP_NOFAIL);
516 f2fs_bug_on(sbi, !page);
518 zero_user_segment(page, 0, PAGE_SIZE);
519 SetPagePrivate(page);
520 set_page_private(page, DUMMY_WRITTEN_PAGE);
522 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
526 * In the NODE case, we lose next block address chain. So, we
527 * need to do checkpoint in f2fs_sync_file.
530 set_sbi_flag(sbi, SBI_NEED_CP);
533 if (is_read_io(bio_op(bio)))
534 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
536 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
540 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
541 struct bio *bio, enum page_type type)
543 __submit_bio(sbi, bio, type);
546 static void __attach_io_flag(struct f2fs_io_info *fio)
548 struct f2fs_sb_info *sbi = fio->sbi;
549 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
550 unsigned int io_flag, fua_flag, meta_flag;
552 if (fio->type == DATA)
553 io_flag = sbi->data_io_flag;
554 else if (fio->type == NODE)
555 io_flag = sbi->node_io_flag;
559 fua_flag = io_flag & temp_mask;
560 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
563 * data/node io flag bits per temp:
564 * REQ_META | REQ_FUA |
565 * 5 | 4 | 3 | 2 | 1 | 0 |
566 * Cold | Warm | Hot | Cold | Warm | Hot |
568 if ((1 << fio->temp) & meta_flag)
569 fio->op_flags |= REQ_META;
570 if ((1 << fio->temp) & fua_flag)
571 fio->op_flags |= REQ_FUA;
574 static void __submit_merged_bio(struct f2fs_bio_info *io)
576 struct f2fs_io_info *fio = &io->fio;
581 __attach_io_flag(fio);
582 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
584 if (is_read_io(fio->op))
585 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
587 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
589 __submit_bio(io->sbi, io->bio, fio->type);
593 static bool __has_merged_page(struct bio *bio, struct inode *inode,
594 struct page *page, nid_t ino)
596 struct bio_vec *bvec;
597 struct bvec_iter_all iter_all;
602 if (!inode && !page && !ino)
605 bio_for_each_segment_all(bvec, bio, iter_all) {
606 struct page *target = bvec->bv_page;
608 if (fscrypt_is_bounce_page(target)) {
609 target = fscrypt_pagecache_page(target);
613 if (f2fs_is_compressed_page(target)) {
614 target = f2fs_compress_control_page(target);
619 if (inode && inode == target->mapping->host)
621 if (page && page == target)
623 if (ino && ino == ino_of_node(target))
630 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
631 enum page_type type, enum temp_type temp)
633 enum page_type btype = PAGE_TYPE_OF_BIO(type);
634 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
636 down_write(&io->io_rwsem);
638 /* change META to META_FLUSH in the checkpoint procedure */
639 if (type >= META_FLUSH) {
640 io->fio.type = META_FLUSH;
641 io->fio.op = REQ_OP_WRITE;
642 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
643 if (!test_opt(sbi, NOBARRIER))
644 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
646 __submit_merged_bio(io);
647 up_write(&io->io_rwsem);
650 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
651 struct inode *inode, struct page *page,
652 nid_t ino, enum page_type type, bool force)
657 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
659 enum page_type btype = PAGE_TYPE_OF_BIO(type);
660 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
662 down_read(&io->io_rwsem);
663 ret = __has_merged_page(io->bio, inode, page, ino);
664 up_read(&io->io_rwsem);
667 __f2fs_submit_merged_write(sbi, type, temp);
669 /* TODO: use HOT temp only for meta pages now. */
675 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
677 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
680 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
681 struct inode *inode, struct page *page,
682 nid_t ino, enum page_type type)
684 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
687 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
689 f2fs_submit_merged_write(sbi, DATA);
690 f2fs_submit_merged_write(sbi, NODE);
691 f2fs_submit_merged_write(sbi, META);
695 * Fill the locked page with data located in the block address.
696 * A caller needs to unlock the page on failure.
698 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
701 struct page *page = fio->encrypted_page ?
702 fio->encrypted_page : fio->page;
704 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
705 fio->is_por ? META_POR : (__is_meta_io(fio) ?
706 META_GENERIC : DATA_GENERIC_ENHANCE)))
707 return -EFSCORRUPTED;
709 trace_f2fs_submit_page_bio(page, fio);
710 f2fs_trace_ios(fio, 0);
712 /* Allocate a new bio */
713 bio = __bio_alloc(fio, 1);
715 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
716 fio->page->index, fio, GFP_NOIO);
718 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
723 if (fio->io_wbc && !is_read_io(fio->op))
724 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
726 __attach_io_flag(fio);
727 bio_set_op_attrs(bio, fio->op, fio->op_flags);
729 inc_page_count(fio->sbi, is_read_io(fio->op) ?
730 __read_io_type(page): WB_DATA_TYPE(fio->page));
732 __submit_bio(fio->sbi, bio, fio->type);
736 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
737 block_t last_blkaddr, block_t cur_blkaddr)
739 if (last_blkaddr + 1 != cur_blkaddr)
741 return __same_bdev(sbi, cur_blkaddr, bio);
744 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
745 struct f2fs_io_info *fio)
747 if (io->fio.op != fio->op)
749 return io->fio.op_flags == fio->op_flags;
752 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
753 struct f2fs_bio_info *io,
754 struct f2fs_io_info *fio,
755 block_t last_blkaddr,
758 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
759 unsigned int filled_blocks =
760 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
761 unsigned int io_size = F2FS_IO_SIZE(sbi);
762 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
764 /* IOs in bio is aligned and left space of vectors is not enough */
765 if (!(filled_blocks % io_size) && left_vecs < io_size)
768 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
770 return io_type_is_mergeable(io, fio);
773 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
774 struct page *page, enum temp_type temp)
776 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
777 struct bio_entry *be;
779 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
783 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
786 down_write(&io->bio_list_lock);
787 list_add_tail(&be->list, &io->bio_list);
788 up_write(&io->bio_list_lock);
791 static void del_bio_entry(struct bio_entry *be)
794 kmem_cache_free(bio_entry_slab, be);
797 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
800 struct f2fs_sb_info *sbi = fio->sbi;
805 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
806 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
807 struct list_head *head = &io->bio_list;
808 struct bio_entry *be;
810 down_write(&io->bio_list_lock);
811 list_for_each_entry(be, head, list) {
817 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
820 if (f2fs_crypt_mergeable_bio(*bio,
821 fio->page->mapping->host,
822 fio->page->index, fio) &&
823 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
829 /* page can't be merged into bio; submit the bio */
831 __submit_bio(sbi, *bio, DATA);
834 up_write(&io->bio_list_lock);
845 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
846 struct bio **bio, struct page *page)
850 struct bio *target = bio ? *bio : NULL;
852 f2fs_bug_on(sbi, !target && !page);
854 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
855 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
856 struct list_head *head = &io->bio_list;
857 struct bio_entry *be;
859 if (list_empty(head))
862 down_read(&io->bio_list_lock);
863 list_for_each_entry(be, head, list) {
865 found = (target == be->bio);
867 found = __has_merged_page(be->bio, NULL,
872 up_read(&io->bio_list_lock);
879 down_write(&io->bio_list_lock);
880 list_for_each_entry(be, head, list) {
882 found = (target == be->bio);
884 found = __has_merged_page(be->bio, NULL,
892 up_write(&io->bio_list_lock);
896 __submit_bio(sbi, target, DATA);
903 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
905 struct bio *bio = *fio->bio;
906 struct page *page = fio->encrypted_page ?
907 fio->encrypted_page : fio->page;
909 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
910 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
911 return -EFSCORRUPTED;
913 trace_f2fs_submit_page_bio(page, fio);
914 f2fs_trace_ios(fio, 0);
916 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
918 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
921 bio = __bio_alloc(fio, BIO_MAX_PAGES);
922 __attach_io_flag(fio);
923 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
924 fio->page->index, fio, GFP_NOIO);
925 bio_set_op_attrs(bio, fio->op, fio->op_flags);
927 add_bio_entry(fio->sbi, bio, page, fio->temp);
929 if (add_ipu_page(fio, &bio, page))
934 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
936 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
938 *fio->last_block = fio->new_blkaddr;
944 void f2fs_submit_page_write(struct f2fs_io_info *fio)
946 struct f2fs_sb_info *sbi = fio->sbi;
947 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
948 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
949 struct page *bio_page;
951 f2fs_bug_on(sbi, is_read_io(fio->op));
953 down_write(&io->io_rwsem);
956 spin_lock(&io->io_lock);
957 if (list_empty(&io->io_list)) {
958 spin_unlock(&io->io_lock);
961 fio = list_first_entry(&io->io_list,
962 struct f2fs_io_info, list);
963 list_del(&fio->list);
964 spin_unlock(&io->io_lock);
967 verify_fio_blkaddr(fio);
969 if (fio->encrypted_page)
970 bio_page = fio->encrypted_page;
971 else if (fio->compressed_page)
972 bio_page = fio->compressed_page;
974 bio_page = fio->page;
976 /* set submitted = true as a return value */
977 fio->submitted = true;
979 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
982 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
984 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
985 bio_page->index, fio)))
986 __submit_merged_bio(io);
988 if (io->bio == NULL) {
989 if (F2FS_IO_ALIGNED(sbi) &&
990 (fio->type == DATA || fio->type == NODE) &&
991 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
992 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
996 io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
997 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
998 bio_page->index, fio, GFP_NOIO);
1002 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1003 __submit_merged_bio(io);
1008 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1010 io->last_block_in_bio = fio->new_blkaddr;
1011 f2fs_trace_ios(fio, 0);
1013 trace_f2fs_submit_page_write(fio->page, fio);
1018 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1019 !f2fs_is_checkpoint_ready(sbi))
1020 __submit_merged_bio(io);
1021 up_write(&io->io_rwsem);
1024 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
1026 return fsverity_active(inode) &&
1027 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
1030 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1031 unsigned nr_pages, unsigned op_flag,
1032 pgoff_t first_idx, bool for_write,
1035 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1037 struct bio_post_read_ctx *ctx;
1038 unsigned int post_read_steps = 0;
1040 bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
1043 return ERR_PTR(-ENOMEM);
1045 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1047 f2fs_target_device(sbi, blkaddr, bio);
1048 bio->bi_end_io = f2fs_read_end_io;
1049 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
1051 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1052 post_read_steps |= 1 << STEP_DECRYPT;
1053 if (f2fs_compressed_file(inode))
1054 post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1055 if (for_verity && f2fs_need_verity(inode, first_idx))
1056 post_read_steps |= 1 << STEP_VERITY;
1058 if (post_read_steps) {
1059 /* Due to the mempool, this never fails. */
1060 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1063 ctx->enabled_steps = post_read_steps;
1064 bio->bi_private = ctx;
1070 static void f2fs_release_read_bio(struct bio *bio)
1072 if (bio->bi_private)
1073 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1077 /* This can handle encryption stuffs */
1078 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1079 block_t blkaddr, int op_flags, bool for_write)
1081 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1084 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1085 page->index, for_write, true);
1087 return PTR_ERR(bio);
1089 /* wait for GCed page writeback via META_MAPPING */
1090 f2fs_wait_on_block_writeback(inode, blkaddr);
1092 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1096 ClearPageError(page);
1097 inc_page_count(sbi, F2FS_RD_DATA);
1098 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1099 __submit_bio(sbi, bio, DATA);
1103 static void __set_data_blkaddr(struct dnode_of_data *dn)
1105 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1109 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1110 base = get_extra_isize(dn->inode);
1112 /* Get physical address of data block */
1113 addr_array = blkaddr_in_node(rn);
1114 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1118 * Lock ordering for the change of data block address:
1121 * update block addresses in the node page
1123 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1125 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1126 __set_data_blkaddr(dn);
1127 if (set_page_dirty(dn->node_page))
1128 dn->node_changed = true;
1131 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1133 dn->data_blkaddr = blkaddr;
1134 f2fs_set_data_blkaddr(dn);
1135 f2fs_update_extent_cache(dn);
1138 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1139 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1141 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1147 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1149 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1152 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1153 dn->ofs_in_node, count);
1155 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1157 for (; count > 0; dn->ofs_in_node++) {
1158 block_t blkaddr = f2fs_data_blkaddr(dn);
1159 if (blkaddr == NULL_ADDR) {
1160 dn->data_blkaddr = NEW_ADDR;
1161 __set_data_blkaddr(dn);
1166 if (set_page_dirty(dn->node_page))
1167 dn->node_changed = true;
1171 /* Should keep dn->ofs_in_node unchanged */
1172 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1174 unsigned int ofs_in_node = dn->ofs_in_node;
1177 ret = f2fs_reserve_new_blocks(dn, 1);
1178 dn->ofs_in_node = ofs_in_node;
1182 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1184 bool need_put = dn->inode_page ? false : true;
1187 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1191 if (dn->data_blkaddr == NULL_ADDR)
1192 err = f2fs_reserve_new_block(dn);
1193 if (err || need_put)
1198 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1200 struct extent_info ei = {0, 0, 0};
1201 struct inode *inode = dn->inode;
1203 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1204 dn->data_blkaddr = ei.blk + index - ei.fofs;
1208 return f2fs_reserve_block(dn, index);
1211 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1212 int op_flags, bool for_write)
1214 struct address_space *mapping = inode->i_mapping;
1215 struct dnode_of_data dn;
1217 struct extent_info ei = {0,0,0};
1220 page = f2fs_grab_cache_page(mapping, index, for_write);
1222 return ERR_PTR(-ENOMEM);
1224 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1225 dn.data_blkaddr = ei.blk + index - ei.fofs;
1226 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1227 DATA_GENERIC_ENHANCE_READ)) {
1228 err = -EFSCORRUPTED;
1234 set_new_dnode(&dn, inode, NULL, NULL, 0);
1235 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1238 f2fs_put_dnode(&dn);
1240 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1244 if (dn.data_blkaddr != NEW_ADDR &&
1245 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1247 DATA_GENERIC_ENHANCE)) {
1248 err = -EFSCORRUPTED;
1252 if (PageUptodate(page)) {
1258 * A new dentry page is allocated but not able to be written, since its
1259 * new inode page couldn't be allocated due to -ENOSPC.
1260 * In such the case, its blkaddr can be remained as NEW_ADDR.
1261 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1262 * f2fs_init_inode_metadata.
1264 if (dn.data_blkaddr == NEW_ADDR) {
1265 zero_user_segment(page, 0, PAGE_SIZE);
1266 if (!PageUptodate(page))
1267 SetPageUptodate(page);
1272 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1273 op_flags, for_write);
1279 f2fs_put_page(page, 1);
1280 return ERR_PTR(err);
1283 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1285 struct address_space *mapping = inode->i_mapping;
1288 page = find_get_page(mapping, index);
1289 if (page && PageUptodate(page))
1291 f2fs_put_page(page, 0);
1293 page = f2fs_get_read_data_page(inode, index, 0, false);
1297 if (PageUptodate(page))
1300 wait_on_page_locked(page);
1301 if (unlikely(!PageUptodate(page))) {
1302 f2fs_put_page(page, 0);
1303 return ERR_PTR(-EIO);
1309 * If it tries to access a hole, return an error.
1310 * Because, the callers, functions in dir.c and GC, should be able to know
1311 * whether this page exists or not.
1313 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1316 struct address_space *mapping = inode->i_mapping;
1319 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1323 /* wait for read completion */
1325 if (unlikely(page->mapping != mapping)) {
1326 f2fs_put_page(page, 1);
1329 if (unlikely(!PageUptodate(page))) {
1330 f2fs_put_page(page, 1);
1331 return ERR_PTR(-EIO);
1337 * Caller ensures that this data page is never allocated.
1338 * A new zero-filled data page is allocated in the page cache.
1340 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1342 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1343 * ipage should be released by this function.
1345 struct page *f2fs_get_new_data_page(struct inode *inode,
1346 struct page *ipage, pgoff_t index, bool new_i_size)
1348 struct address_space *mapping = inode->i_mapping;
1350 struct dnode_of_data dn;
1353 page = f2fs_grab_cache_page(mapping, index, true);
1356 * before exiting, we should make sure ipage will be released
1357 * if any error occur.
1359 f2fs_put_page(ipage, 1);
1360 return ERR_PTR(-ENOMEM);
1363 set_new_dnode(&dn, inode, ipage, NULL, 0);
1364 err = f2fs_reserve_block(&dn, index);
1366 f2fs_put_page(page, 1);
1367 return ERR_PTR(err);
1370 f2fs_put_dnode(&dn);
1372 if (PageUptodate(page))
1375 if (dn.data_blkaddr == NEW_ADDR) {
1376 zero_user_segment(page, 0, PAGE_SIZE);
1377 if (!PageUptodate(page))
1378 SetPageUptodate(page);
1380 f2fs_put_page(page, 1);
1382 /* if ipage exists, blkaddr should be NEW_ADDR */
1383 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1384 page = f2fs_get_lock_data_page(inode, index, true);
1389 if (new_i_size && i_size_read(inode) <
1390 ((loff_t)(index + 1) << PAGE_SHIFT))
1391 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1395 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1397 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1398 struct f2fs_summary sum;
1399 struct node_info ni;
1400 block_t old_blkaddr;
1404 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1407 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1411 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412 if (dn->data_blkaddr != NULL_ADDR)
1415 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1419 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1420 old_blkaddr = dn->data_blkaddr;
1421 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1422 &sum, seg_type, NULL);
1423 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1424 invalidate_mapping_pages(META_MAPPING(sbi),
1425 old_blkaddr, old_blkaddr);
1426 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1429 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1430 * data from unwritten block via dio_read.
1435 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1437 struct inode *inode = file_inode(iocb->ki_filp);
1438 struct f2fs_map_blocks map;
1441 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1443 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1444 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1445 if (map.m_len > map.m_lblk)
1446 map.m_len -= map.m_lblk;
1450 map.m_next_pgofs = NULL;
1451 map.m_next_extent = NULL;
1452 map.m_seg_type = NO_CHECK_TYPE;
1453 map.m_may_create = true;
1456 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1457 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1458 F2FS_GET_BLOCK_PRE_AIO :
1459 F2FS_GET_BLOCK_PRE_DIO;
1462 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1463 err = f2fs_convert_inline_inode(inode);
1467 if (f2fs_has_inline_data(inode))
1470 flag = F2FS_GET_BLOCK_PRE_AIO;
1473 err = f2fs_map_blocks(inode, &map, 1, flag);
1474 if (map.m_len > 0 && err == -ENOSPC) {
1476 set_inode_flag(inode, FI_NO_PREALLOC);
1482 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1484 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1486 down_read(&sbi->node_change);
1488 up_read(&sbi->node_change);
1493 f2fs_unlock_op(sbi);
1498 * f2fs_map_blocks() tries to find or build mapping relationship which
1499 * maps continuous logical blocks to physical blocks, and return such
1500 * info via f2fs_map_blocks structure.
1502 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1503 int create, int flag)
1505 unsigned int maxblocks = map->m_len;
1506 struct dnode_of_data dn;
1507 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1508 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1509 pgoff_t pgofs, end_offset, end;
1510 int err = 0, ofs = 1;
1511 unsigned int ofs_in_node, last_ofs_in_node;
1513 struct extent_info ei = {0,0,0};
1515 unsigned int start_pgofs;
1523 /* it only supports block size == page size */
1524 pgofs = (pgoff_t)map->m_lblk;
1525 end = pgofs + maxblocks;
1527 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1528 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1532 map->m_pblk = ei.blk + pgofs - ei.fofs;
1533 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1534 map->m_flags = F2FS_MAP_MAPPED;
1535 if (map->m_next_extent)
1536 *map->m_next_extent = pgofs + map->m_len;
1538 /* for hardware encryption, but to avoid potential issue in future */
1539 if (flag == F2FS_GET_BLOCK_DIO)
1540 f2fs_wait_on_block_writeback_range(inode,
1541 map->m_pblk, map->m_len);
1546 if (map->m_may_create)
1547 f2fs_do_map_lock(sbi, flag, true);
1549 /* When reading holes, we need its node page */
1550 set_new_dnode(&dn, inode, NULL, NULL, 0);
1551 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1553 if (flag == F2FS_GET_BLOCK_BMAP)
1556 if (err == -ENOENT) {
1558 * There is one exceptional case that read_node_page()
1559 * may return -ENOENT due to filesystem has been
1560 * shutdown or cp_error, so force to convert error
1561 * number to EIO for such case.
1563 if (map->m_may_create &&
1564 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1565 f2fs_cp_error(sbi))) {
1571 if (map->m_next_pgofs)
1572 *map->m_next_pgofs =
1573 f2fs_get_next_page_offset(&dn, pgofs);
1574 if (map->m_next_extent)
1575 *map->m_next_extent =
1576 f2fs_get_next_page_offset(&dn, pgofs);
1581 start_pgofs = pgofs;
1583 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1584 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1587 blkaddr = f2fs_data_blkaddr(&dn);
1589 if (__is_valid_data_blkaddr(blkaddr) &&
1590 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1591 err = -EFSCORRUPTED;
1595 if (__is_valid_data_blkaddr(blkaddr)) {
1596 /* use out-place-update for driect IO under LFS mode */
1597 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1598 map->m_may_create) {
1599 err = __allocate_data_block(&dn, map->m_seg_type);
1602 blkaddr = dn.data_blkaddr;
1603 set_inode_flag(inode, FI_APPEND_WRITE);
1607 if (unlikely(f2fs_cp_error(sbi))) {
1611 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1612 if (blkaddr == NULL_ADDR) {
1614 last_ofs_in_node = dn.ofs_in_node;
1617 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1618 flag != F2FS_GET_BLOCK_DIO);
1619 err = __allocate_data_block(&dn,
1622 set_inode_flag(inode, FI_APPEND_WRITE);
1626 map->m_flags |= F2FS_MAP_NEW;
1627 blkaddr = dn.data_blkaddr;
1629 if (flag == F2FS_GET_BLOCK_BMAP) {
1633 if (flag == F2FS_GET_BLOCK_PRECACHE)
1635 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1636 blkaddr == NULL_ADDR) {
1637 if (map->m_next_pgofs)
1638 *map->m_next_pgofs = pgofs + 1;
1641 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1642 /* for defragment case */
1643 if (map->m_next_pgofs)
1644 *map->m_next_pgofs = pgofs + 1;
1650 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1653 if (map->m_len == 0) {
1654 /* preallocated unwritten block should be mapped for fiemap. */
1655 if (blkaddr == NEW_ADDR)
1656 map->m_flags |= F2FS_MAP_UNWRITTEN;
1657 map->m_flags |= F2FS_MAP_MAPPED;
1659 map->m_pblk = blkaddr;
1661 } else if ((map->m_pblk != NEW_ADDR &&
1662 blkaddr == (map->m_pblk + ofs)) ||
1663 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1664 flag == F2FS_GET_BLOCK_PRE_DIO) {
1675 /* preallocate blocks in batch for one dnode page */
1676 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1677 (pgofs == end || dn.ofs_in_node == end_offset)) {
1679 dn.ofs_in_node = ofs_in_node;
1680 err = f2fs_reserve_new_blocks(&dn, prealloc);
1684 map->m_len += dn.ofs_in_node - ofs_in_node;
1685 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1689 dn.ofs_in_node = end_offset;
1694 else if (dn.ofs_in_node < end_offset)
1697 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1698 if (map->m_flags & F2FS_MAP_MAPPED) {
1699 unsigned int ofs = start_pgofs - map->m_lblk;
1701 f2fs_update_extent_cache_range(&dn,
1702 start_pgofs, map->m_pblk + ofs,
1707 f2fs_put_dnode(&dn);
1709 if (map->m_may_create) {
1710 f2fs_do_map_lock(sbi, flag, false);
1711 f2fs_balance_fs(sbi, dn.node_changed);
1717 /* for hardware encryption, but to avoid potential issue in future */
1718 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1719 f2fs_wait_on_block_writeback_range(inode,
1720 map->m_pblk, map->m_len);
1722 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1723 if (map->m_flags & F2FS_MAP_MAPPED) {
1724 unsigned int ofs = start_pgofs - map->m_lblk;
1726 f2fs_update_extent_cache_range(&dn,
1727 start_pgofs, map->m_pblk + ofs,
1730 if (map->m_next_extent)
1731 *map->m_next_extent = pgofs + 1;
1733 f2fs_put_dnode(&dn);
1735 if (map->m_may_create) {
1736 f2fs_do_map_lock(sbi, flag, false);
1737 f2fs_balance_fs(sbi, dn.node_changed);
1740 trace_f2fs_map_blocks(inode, map, err);
1744 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1746 struct f2fs_map_blocks map;
1750 if (pos + len > i_size_read(inode))
1753 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1754 map.m_next_pgofs = NULL;
1755 map.m_next_extent = NULL;
1756 map.m_seg_type = NO_CHECK_TYPE;
1757 map.m_may_create = false;
1758 last_lblk = F2FS_BLK_ALIGN(pos + len);
1760 while (map.m_lblk < last_lblk) {
1761 map.m_len = last_lblk - map.m_lblk;
1762 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1763 if (err || map.m_len == 0)
1765 map.m_lblk += map.m_len;
1770 static int __get_data_block(struct inode *inode, sector_t iblock,
1771 struct buffer_head *bh, int create, int flag,
1772 pgoff_t *next_pgofs, int seg_type, bool may_write)
1774 struct f2fs_map_blocks map;
1777 map.m_lblk = iblock;
1778 map.m_len = bh->b_size >> inode->i_blkbits;
1779 map.m_next_pgofs = next_pgofs;
1780 map.m_next_extent = NULL;
1781 map.m_seg_type = seg_type;
1782 map.m_may_create = may_write;
1784 err = f2fs_map_blocks(inode, &map, create, flag);
1786 map_bh(bh, inode->i_sb, map.m_pblk);
1787 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1788 bh->b_size = (u64)map.m_len << inode->i_blkbits;
1793 static int get_data_block(struct inode *inode, sector_t iblock,
1794 struct buffer_head *bh_result, int create, int flag,
1795 pgoff_t *next_pgofs)
1797 return __get_data_block(inode, iblock, bh_result, create,
1799 NO_CHECK_TYPE, create);
1802 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1803 struct buffer_head *bh_result, int create)
1805 return __get_data_block(inode, iblock, bh_result, create,
1806 F2FS_GET_BLOCK_DIO, NULL,
1807 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1808 IS_SWAPFILE(inode) ? false : true);
1811 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1812 struct buffer_head *bh_result, int create)
1814 return __get_data_block(inode, iblock, bh_result, create,
1815 F2FS_GET_BLOCK_DIO, NULL,
1816 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1820 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1821 struct buffer_head *bh_result, int create)
1823 return __get_data_block(inode, iblock, bh_result, create,
1824 F2FS_GET_BLOCK_BMAP, NULL,
1825 NO_CHECK_TYPE, create);
1828 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1830 return (offset >> inode->i_blkbits);
1833 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1835 return (blk << inode->i_blkbits);
1838 static int f2fs_xattr_fiemap(struct inode *inode,
1839 struct fiemap_extent_info *fieinfo)
1841 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1843 struct node_info ni;
1844 __u64 phys = 0, len;
1846 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1849 if (f2fs_has_inline_xattr(inode)) {
1852 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1853 inode->i_ino, false);
1857 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1859 f2fs_put_page(page, 1);
1863 phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1864 offset = offsetof(struct f2fs_inode, i_addr) +
1865 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1866 get_inline_xattr_addrs(inode));
1869 len = inline_xattr_size(inode);
1871 f2fs_put_page(page, 1);
1873 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1876 flags |= FIEMAP_EXTENT_LAST;
1878 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1879 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1880 if (err || err == 1)
1885 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1889 err = f2fs_get_node_info(sbi, xnid, &ni);
1891 f2fs_put_page(page, 1);
1895 phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1896 len = inode->i_sb->s_blocksize;
1898 f2fs_put_page(page, 1);
1900 flags = FIEMAP_EXTENT_LAST;
1904 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1905 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1908 return (err < 0 ? err : 0);
1911 static loff_t max_inode_blocks(struct inode *inode)
1913 loff_t result = ADDRS_PER_INODE(inode);
1914 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1916 /* two direct node blocks */
1917 result += (leaf_count * 2);
1919 /* two indirect node blocks */
1920 leaf_count *= NIDS_PER_BLOCK;
1921 result += (leaf_count * 2);
1923 /* one double indirect node block */
1924 leaf_count *= NIDS_PER_BLOCK;
1925 result += leaf_count;
1930 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1933 struct buffer_head map_bh;
1934 sector_t start_blk, last_blk;
1936 u64 logical = 0, phys = 0, size = 0;
1939 bool compr_cluster = false;
1940 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1942 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1943 ret = f2fs_precache_extents(inode);
1948 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1954 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1955 ret = f2fs_xattr_fiemap(inode, fieinfo);
1959 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1960 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1965 if (logical_to_blk(inode, len) == 0)
1966 len = blk_to_logical(inode, 1);
1968 start_blk = logical_to_blk(inode, start);
1969 last_blk = logical_to_blk(inode, start + len - 1);
1972 memset(&map_bh, 0, sizeof(struct buffer_head));
1973 map_bh.b_size = len;
1976 map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
1978 ret = get_data_block(inode, start_blk, &map_bh, 0,
1979 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1984 if (!buffer_mapped(&map_bh)) {
1985 start_blk = next_pgofs;
1987 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1988 max_inode_blocks(inode)))
1991 flags |= FIEMAP_EXTENT_LAST;
1995 if (IS_ENCRYPTED(inode))
1996 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1998 ret = fiemap_fill_next_extent(fieinfo, logical,
2000 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2006 if (start_blk > last_blk)
2009 if (compr_cluster) {
2010 compr_cluster = false;
2013 logical = blk_to_logical(inode, start_blk - 1);
2014 phys = blk_to_logical(inode, map_bh.b_blocknr);
2015 size = blk_to_logical(inode, cluster_size);
2017 flags |= FIEMAP_EXTENT_ENCODED;
2019 start_blk += cluster_size - 1;
2021 if (start_blk > last_blk)
2027 if (map_bh.b_blocknr == COMPRESS_ADDR) {
2028 compr_cluster = true;
2033 logical = blk_to_logical(inode, start_blk);
2034 phys = blk_to_logical(inode, map_bh.b_blocknr);
2035 size = map_bh.b_size;
2037 if (buffer_unwritten(&map_bh))
2038 flags = FIEMAP_EXTENT_UNWRITTEN;
2040 start_blk += logical_to_blk(inode, size);
2044 if (fatal_signal_pending(current))
2052 inode_unlock(inode);
2056 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2058 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2059 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2060 return inode->i_sb->s_maxbytes;
2062 return i_size_read(inode);
2065 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2067 struct f2fs_map_blocks *map,
2068 struct bio **bio_ret,
2069 sector_t *last_block_in_bio,
2072 struct bio *bio = *bio_ret;
2073 const unsigned blkbits = inode->i_blkbits;
2074 const unsigned blocksize = 1 << blkbits;
2075 sector_t block_in_file;
2076 sector_t last_block;
2077 sector_t last_block_in_file;
2081 block_in_file = (sector_t)page_index(page);
2082 last_block = block_in_file + nr_pages;
2083 last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
2085 if (last_block > last_block_in_file)
2086 last_block = last_block_in_file;
2088 /* just zeroing out page which is beyond EOF */
2089 if (block_in_file >= last_block)
2092 * Map blocks using the previous result first.
2094 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2095 block_in_file > map->m_lblk &&
2096 block_in_file < (map->m_lblk + map->m_len))
2100 * Then do more f2fs_map_blocks() calls until we are
2101 * done with this page.
2103 map->m_lblk = block_in_file;
2104 map->m_len = last_block - block_in_file;
2106 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2110 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2111 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2112 SetPageMappedToDisk(page);
2114 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2115 !cleancache_get_page(page))) {
2116 SetPageUptodate(page);
2120 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2121 DATA_GENERIC_ENHANCE_READ)) {
2122 ret = -EFSCORRUPTED;
2127 zero_user_segment(page, 0, PAGE_SIZE);
2128 if (f2fs_need_verity(inode, page->index) &&
2129 !fsverity_verify_page(page)) {
2133 if (!PageUptodate(page))
2134 SetPageUptodate(page);
2140 * This page will go to BIO. Do we need to send this
2143 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2144 *last_block_in_bio, block_nr) ||
2145 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2147 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2151 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2152 is_readahead ? REQ_RAHEAD : 0, page->index,
2162 * If the page is under writeback, we need to wait for
2163 * its completion to see the correct decrypted data.
2165 f2fs_wait_on_block_writeback(inode, block_nr);
2167 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2168 goto submit_and_realloc;
2170 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2171 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2172 ClearPageError(page);
2173 *last_block_in_bio = block_nr;
2177 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2186 #ifdef CONFIG_F2FS_FS_COMPRESSION
2187 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2188 unsigned nr_pages, sector_t *last_block_in_bio,
2189 bool is_readahead, bool for_write)
2191 struct dnode_of_data dn;
2192 struct inode *inode = cc->inode;
2193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2194 struct bio *bio = *bio_ret;
2195 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2196 sector_t last_block_in_file;
2197 const unsigned blkbits = inode->i_blkbits;
2198 const unsigned blocksize = 1 << blkbits;
2199 struct decompress_io_ctx *dic = NULL;
2200 struct bio_post_read_ctx *ctx;
2201 bool for_verity = false;
2205 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2207 last_block_in_file = (f2fs_readpage_limit(inode) +
2208 blocksize - 1) >> blkbits;
2210 /* get rid of pages beyond EOF */
2211 for (i = 0; i < cc->cluster_size; i++) {
2212 struct page *page = cc->rpages[i];
2216 if ((sector_t)page->index >= last_block_in_file) {
2217 zero_user_segment(page, 0, PAGE_SIZE);
2218 if (!PageUptodate(page))
2219 SetPageUptodate(page);
2220 } else if (!PageUptodate(page)) {
2226 cc->rpages[i] = NULL;
2230 /* we are done since all pages are beyond EOF */
2231 if (f2fs_cluster_is_empty(cc))
2234 set_new_dnode(&dn, inode, NULL, NULL, 0);
2235 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2239 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2241 for (i = 1; i < cc->cluster_size; i++) {
2244 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2245 dn.ofs_in_node + i);
2247 if (!__is_valid_data_blkaddr(blkaddr))
2250 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2257 /* nothing to decompress */
2258 if (cc->nr_cpages == 0) {
2263 dic = f2fs_alloc_dic(cc);
2270 * It's possible to enable fsverity on the fly when handling a cluster,
2271 * which requires complicated error handling. Instead of adding more
2272 * complexity, let's give a rule where end_io post-processes fsverity
2273 * per cluster. In order to do that, we need to submit bio, if previous
2274 * bio sets a different post-process policy.
2276 if (fsverity_active(cc->inode)) {
2277 atomic_set(&dic->verity_pages, cc->nr_cpages);
2281 ctx = bio->bi_private;
2282 if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
2283 __submit_bio(sbi, bio, DATA);
2289 for (i = 0; i < dic->nr_cpages; i++) {
2290 struct page *page = dic->cpages[i];
2293 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2294 dn.ofs_in_node + i + 1);
2296 if (bio && (!page_is_mergeable(sbi, bio,
2297 *last_block_in_bio, blkaddr) ||
2298 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2300 __submit_bio(sbi, bio, DATA);
2305 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2306 is_readahead ? REQ_RAHEAD : 0,
2307 page->index, for_write, for_verity);
2309 unsigned int remained = dic->nr_cpages - i;
2310 bool release = false;
2316 if (!atomic_sub_return(remained,
2317 &dic->verity_pages))
2320 if (!atomic_sub_return(remained,
2321 &dic->pending_pages))
2326 f2fs_decompress_end_io(dic->rpages,
2327 cc->cluster_size, true,
2332 f2fs_put_dnode(&dn);
2338 f2fs_wait_on_block_writeback(inode, blkaddr);
2340 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2341 goto submit_and_realloc;
2343 /* tag STEP_DECOMPRESS to handle IO in wq */
2344 ctx = bio->bi_private;
2345 if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
2346 ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
2348 inc_page_count(sbi, F2FS_RD_DATA);
2349 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2350 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2351 ClearPageError(page);
2352 *last_block_in_bio = blkaddr;
2355 f2fs_put_dnode(&dn);
2361 f2fs_put_dnode(&dn);
2363 f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
2370 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2371 * Major change was from block_size == page_size in f2fs by default.
2373 * Note that the aops->readpages() function is ONLY used for read-ahead. If
2374 * this function ever deviates from doing just read-ahead, it should either
2375 * use ->readpage() or do the necessary surgery to decouple ->readpages()
2378 static int f2fs_mpage_readpages(struct inode *inode,
2379 struct readahead_control *rac, struct page *page)
2381 struct bio *bio = NULL;
2382 sector_t last_block_in_bio = 0;
2383 struct f2fs_map_blocks map;
2384 #ifdef CONFIG_F2FS_FS_COMPRESSION
2385 struct compress_ctx cc = {
2387 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2388 .cluster_size = F2FS_I(inode)->i_cluster_size,
2389 .cluster_idx = NULL_CLUSTER,
2396 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2397 unsigned max_nr_pages = nr_pages;
2399 bool drop_ra = false;
2405 map.m_next_pgofs = NULL;
2406 map.m_next_extent = NULL;
2407 map.m_seg_type = NO_CHECK_TYPE;
2408 map.m_may_create = false;
2411 * Two readahead threads for same address range can cause race condition
2412 * which fragments sequential read IOs. So let's avoid each other.
2414 if (rac && readahead_count(rac)) {
2415 if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
2418 WRITE_ONCE(F2FS_I(inode)->ra_offset,
2419 readahead_index(rac));
2422 for (; nr_pages; nr_pages--) {
2424 page = readahead_page(rac);
2425 prefetchw(&page->flags);
2427 f2fs_put_page(page, 1);
2432 #ifdef CONFIG_F2FS_FS_COMPRESSION
2433 if (f2fs_compressed_file(inode)) {
2434 /* there are remained comressed pages, submit them */
2435 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2436 ret = f2fs_read_multi_pages(&cc, &bio,
2439 rac != NULL, false);
2440 f2fs_destroy_compress_ctx(&cc, false);
2442 goto set_error_page;
2444 ret = f2fs_is_compressed_cluster(inode, page->index);
2446 goto set_error_page;
2448 goto read_single_page;
2450 ret = f2fs_init_compress_ctx(&cc);
2452 goto set_error_page;
2454 f2fs_compress_ctx_add_page(&cc, page);
2461 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2462 &bio, &last_block_in_bio, rac);
2464 #ifdef CONFIG_F2FS_FS_COMPRESSION
2468 zero_user_segment(page, 0, PAGE_SIZE);
2471 #ifdef CONFIG_F2FS_FS_COMPRESSION
2477 #ifdef CONFIG_F2FS_FS_COMPRESSION
2478 if (f2fs_compressed_file(inode)) {
2480 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2481 ret = f2fs_read_multi_pages(&cc, &bio,
2484 rac != NULL, false);
2485 f2fs_destroy_compress_ctx(&cc, false);
2491 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2493 if (rac && readahead_count(rac) && !drop_ra)
2494 WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
2498 static int f2fs_read_data_page(struct file *file, struct page *page)
2500 struct inode *inode = page_file_mapping(page)->host;
2503 trace_f2fs_readpage(page, DATA);
2505 if (!f2fs_is_compress_backend_ready(inode)) {
2510 /* If the file has inline data, try to read it directly */
2511 if (f2fs_has_inline_data(inode))
2512 ret = f2fs_read_inline_data(inode, page);
2514 ret = f2fs_mpage_readpages(inode, NULL, page);
2518 static void f2fs_readahead(struct readahead_control *rac)
2520 struct inode *inode = rac->mapping->host;
2522 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2524 if (!f2fs_is_compress_backend_ready(inode))
2527 /* If the file has inline data, skip readpages */
2528 if (f2fs_has_inline_data(inode))
2531 f2fs_mpage_readpages(inode, rac, NULL);
2534 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2536 struct inode *inode = fio->page->mapping->host;
2537 struct page *mpage, *page;
2538 gfp_t gfp_flags = GFP_NOFS;
2540 if (!f2fs_encrypted_file(inode))
2543 page = fio->compressed_page ? fio->compressed_page : fio->page;
2545 /* wait for GCed page writeback via META_MAPPING */
2546 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2548 if (fscrypt_inode_uses_inline_crypto(inode))
2552 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2553 PAGE_SIZE, 0, gfp_flags);
2554 if (IS_ERR(fio->encrypted_page)) {
2555 /* flush pending IOs and wait for a while in the ENOMEM case */
2556 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2557 f2fs_flush_merged_writes(fio->sbi);
2558 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2559 gfp_flags |= __GFP_NOFAIL;
2562 return PTR_ERR(fio->encrypted_page);
2565 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2567 if (PageUptodate(mpage))
2568 memcpy(page_address(mpage),
2569 page_address(fio->encrypted_page), PAGE_SIZE);
2570 f2fs_put_page(mpage, 1);
2575 static inline bool check_inplace_update_policy(struct inode *inode,
2576 struct f2fs_io_info *fio)
2578 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2579 unsigned int policy = SM_I(sbi)->ipu_policy;
2581 if (policy & (0x1 << F2FS_IPU_FORCE))
2583 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2585 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2586 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2588 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2589 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2593 * IPU for rewrite async pages
2595 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2596 fio && fio->op == REQ_OP_WRITE &&
2597 !(fio->op_flags & REQ_SYNC) &&
2598 !IS_ENCRYPTED(inode))
2601 /* this is only set during fdatasync */
2602 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2603 is_inode_flag_set(inode, FI_NEED_IPU))
2606 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2607 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2613 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2615 if (f2fs_is_pinned_file(inode))
2618 /* if this is cold file, we should overwrite to avoid fragmentation */
2619 if (file_is_cold(inode))
2622 return check_inplace_update_policy(inode, fio);
2625 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2627 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2629 if (f2fs_lfs_mode(sbi))
2631 if (S_ISDIR(inode->i_mode))
2633 if (IS_NOQUOTA(inode))
2635 if (f2fs_is_atomic_file(inode))
2638 if (is_cold_data(fio->page))
2640 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2642 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2643 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2649 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2651 struct inode *inode = fio->page->mapping->host;
2653 if (f2fs_should_update_outplace(inode, fio))
2656 return f2fs_should_update_inplace(inode, fio);
2659 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2661 struct page *page = fio->page;
2662 struct inode *inode = page->mapping->host;
2663 struct dnode_of_data dn;
2664 struct extent_info ei = {0,0,0};
2665 struct node_info ni;
2666 bool ipu_force = false;
2669 set_new_dnode(&dn, inode, NULL, NULL, 0);
2670 if (need_inplace_update(fio) &&
2671 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2672 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2674 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2675 DATA_GENERIC_ENHANCE))
2676 return -EFSCORRUPTED;
2679 fio->need_lock = LOCK_DONE;
2683 /* Deadlock due to between page->lock and f2fs_lock_op */
2684 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2687 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2691 fio->old_blkaddr = dn.data_blkaddr;
2693 /* This page is already truncated */
2694 if (fio->old_blkaddr == NULL_ADDR) {
2695 ClearPageUptodate(page);
2696 clear_cold_data(page);
2700 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2701 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2702 DATA_GENERIC_ENHANCE)) {
2703 err = -EFSCORRUPTED;
2707 * If current allocation needs SSR,
2708 * it had better in-place writes for updated data.
2711 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2712 need_inplace_update(fio))) {
2713 err = f2fs_encrypt_one_page(fio);
2717 set_page_writeback(page);
2718 ClearPageError(page);
2719 f2fs_put_dnode(&dn);
2720 if (fio->need_lock == LOCK_REQ)
2721 f2fs_unlock_op(fio->sbi);
2722 err = f2fs_inplace_write_data(fio);
2724 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2725 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2726 if (PageWriteback(page))
2727 end_page_writeback(page);
2729 set_inode_flag(inode, FI_UPDATE_WRITE);
2731 trace_f2fs_do_write_data_page(fio->page, IPU);
2735 if (fio->need_lock == LOCK_RETRY) {
2736 if (!f2fs_trylock_op(fio->sbi)) {
2740 fio->need_lock = LOCK_REQ;
2743 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2747 fio->version = ni.version;
2749 err = f2fs_encrypt_one_page(fio);
2753 set_page_writeback(page);
2754 ClearPageError(page);
2756 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2757 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2759 /* LFS mode write path */
2760 f2fs_outplace_write_data(&dn, fio);
2761 trace_f2fs_do_write_data_page(page, OPU);
2762 set_inode_flag(inode, FI_APPEND_WRITE);
2763 if (page->index == 0)
2764 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2766 f2fs_put_dnode(&dn);
2768 if (fio->need_lock == LOCK_REQ)
2769 f2fs_unlock_op(fio->sbi);
2773 int f2fs_write_single_data_page(struct page *page, int *submitted,
2775 sector_t *last_block,
2776 struct writeback_control *wbc,
2777 enum iostat_type io_type,
2781 struct inode *inode = page->mapping->host;
2782 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2783 loff_t i_size = i_size_read(inode);
2784 const pgoff_t end_index = ((unsigned long long)i_size)
2786 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2787 unsigned offset = 0;
2788 bool need_balance_fs = false;
2790 struct f2fs_io_info fio = {
2792 .ino = inode->i_ino,
2795 .op_flags = wbc_to_write_flags(wbc),
2796 .old_blkaddr = NULL_ADDR,
2798 .encrypted_page = NULL,
2800 .compr_blocks = compr_blocks,
2801 .need_lock = LOCK_RETRY,
2805 .last_block = last_block,
2808 trace_f2fs_writepage(page, DATA);
2810 /* we should bypass data pages to proceed the kworkder jobs */
2811 if (unlikely(f2fs_cp_error(sbi))) {
2812 mapping_set_error(page->mapping, -EIO);
2814 * don't drop any dirty dentry pages for keeping lastest
2815 * directory structure.
2817 if (S_ISDIR(inode->i_mode) &&
2818 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2823 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2826 if (page->index < end_index ||
2827 f2fs_verity_in_progress(inode) ||
2832 * If the offset is out-of-range of file size,
2833 * this page does not have to be written to disk.
2835 offset = i_size & (PAGE_SIZE - 1);
2836 if ((page->index >= end_index + 1) || !offset)
2839 zero_user_segment(page, offset, PAGE_SIZE);
2841 if (f2fs_is_drop_cache(inode))
2843 /* we should not write 0'th page having journal header */
2844 if (f2fs_is_volatile_file(inode) && (!page->index ||
2845 (!wbc->for_reclaim &&
2846 f2fs_available_free_memory(sbi, BASE_CHECK))))
2849 /* Dentry/quota blocks are controlled by checkpoint */
2850 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2852 * We need to wait for node_write to avoid block allocation during
2853 * checkpoint. This can only happen to quota writes which can cause
2854 * the below discard race condition.
2856 if (IS_NOQUOTA(inode))
2857 down_read(&sbi->node_write);
2859 fio.need_lock = LOCK_DONE;
2860 err = f2fs_do_write_data_page(&fio);
2862 if (IS_NOQUOTA(inode))
2863 up_read(&sbi->node_write);
2868 if (!wbc->for_reclaim)
2869 need_balance_fs = true;
2870 else if (has_not_enough_free_secs(sbi, 0, 0))
2873 set_inode_flag(inode, FI_HOT_DATA);
2876 if (f2fs_has_inline_data(inode)) {
2877 err = f2fs_write_inline_data(inode, page);
2882 if (err == -EAGAIN) {
2883 err = f2fs_do_write_data_page(&fio);
2884 if (err == -EAGAIN) {
2885 fio.need_lock = LOCK_REQ;
2886 err = f2fs_do_write_data_page(&fio);
2891 file_set_keep_isize(inode);
2893 spin_lock(&F2FS_I(inode)->i_size_lock);
2894 if (F2FS_I(inode)->last_disk_size < psize)
2895 F2FS_I(inode)->last_disk_size = psize;
2896 spin_unlock(&F2FS_I(inode)->i_size_lock);
2900 if (err && err != -ENOENT)
2904 inode_dec_dirty_pages(inode);
2906 ClearPageUptodate(page);
2907 clear_cold_data(page);
2910 if (wbc->for_reclaim) {
2911 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2912 clear_inode_flag(inode, FI_HOT_DATA);
2913 f2fs_remove_dirty_inode(inode);
2917 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2918 !F2FS_I(inode)->wb_task && allow_balance)
2919 f2fs_balance_fs(sbi, need_balance_fs);
2921 if (unlikely(f2fs_cp_error(sbi))) {
2922 f2fs_submit_merged_write(sbi, DATA);
2924 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2929 *submitted = fio.submitted ? 1 : 0;
2934 redirty_page_for_writepage(wbc, page);
2936 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2937 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2938 * file_write_and_wait_range() will see EIO error, which is critical
2939 * to return value of fsync() followed by atomic_write failure to user.
2941 if (!err || wbc->for_reclaim)
2942 return AOP_WRITEPAGE_ACTIVATE;
2947 static int f2fs_write_data_page(struct page *page,
2948 struct writeback_control *wbc)
2950 #ifdef CONFIG_F2FS_FS_COMPRESSION
2951 struct inode *inode = page->mapping->host;
2953 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2956 if (f2fs_compressed_file(inode)) {
2957 if (f2fs_is_compressed_cluster(inode, page->index)) {
2958 redirty_page_for_writepage(wbc, page);
2959 return AOP_WRITEPAGE_ACTIVATE;
2965 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2966 wbc, FS_DATA_IO, 0, true);
2970 * This function was copied from write_cche_pages from mm/page-writeback.c.
2971 * The major change is making write step of cold data page separately from
2972 * warm/hot data page.
2974 static int f2fs_write_cache_pages(struct address_space *mapping,
2975 struct writeback_control *wbc,
2976 enum iostat_type io_type)
2979 int done = 0, retry = 0;
2980 struct pagevec pvec;
2981 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2982 struct bio *bio = NULL;
2983 sector_t last_block;
2984 #ifdef CONFIG_F2FS_FS_COMPRESSION
2985 struct inode *inode = mapping->host;
2986 struct compress_ctx cc = {
2988 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2989 .cluster_size = F2FS_I(inode)->i_cluster_size,
2990 .cluster_idx = NULL_CLUSTER,
2996 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
3002 pgoff_t end; /* Inclusive */
3004 int range_whole = 0;
3010 pagevec_init(&pvec);
3012 if (get_dirty_pages(mapping->host) <=
3013 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3014 set_inode_flag(mapping->host, FI_HOT_DATA);
3016 clear_inode_flag(mapping->host, FI_HOT_DATA);
3018 if (wbc->range_cyclic) {
3019 index = mapping->writeback_index; /* prev offset */
3022 index = wbc->range_start >> PAGE_SHIFT;
3023 end = wbc->range_end >> PAGE_SHIFT;
3024 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3027 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3028 tag = PAGECACHE_TAG_TOWRITE;
3030 tag = PAGECACHE_TAG_DIRTY;
3033 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3034 tag_pages_for_writeback(mapping, index, end);
3036 while (!done && !retry && (index <= end)) {
3037 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3042 for (i = 0; i < nr_pages; i++) {
3043 struct page *page = pvec.pages[i];
3047 #ifdef CONFIG_F2FS_FS_COMPRESSION
3048 if (f2fs_compressed_file(inode)) {
3049 ret = f2fs_init_compress_ctx(&cc);
3055 if (!f2fs_cluster_can_merge_page(&cc,
3057 ret = f2fs_write_multi_pages(&cc,
3058 &submitted, wbc, io_type);
3064 if (unlikely(f2fs_cp_error(sbi)))
3067 if (f2fs_cluster_is_empty(&cc)) {
3068 void *fsdata = NULL;
3072 ret2 = f2fs_prepare_compress_overwrite(
3074 page->index, &fsdata);
3080 !f2fs_compress_write_end(inode,
3081 fsdata, page->index,
3091 /* give a priority to WB_SYNC threads */
3092 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3093 wbc->sync_mode == WB_SYNC_NONE) {
3097 #ifdef CONFIG_F2FS_FS_COMPRESSION
3100 done_index = page->index;
3104 if (unlikely(page->mapping != mapping)) {
3110 if (!PageDirty(page)) {
3111 /* someone wrote it for us */
3112 goto continue_unlock;
3115 if (PageWriteback(page)) {
3116 if (wbc->sync_mode != WB_SYNC_NONE)
3117 f2fs_wait_on_page_writeback(page,
3120 goto continue_unlock;
3123 if (!clear_page_dirty_for_io(page))
3124 goto continue_unlock;
3126 #ifdef CONFIG_F2FS_FS_COMPRESSION
3127 if (f2fs_compressed_file(inode)) {
3129 f2fs_compress_ctx_add_page(&cc, page);
3133 ret = f2fs_write_single_data_page(page, &submitted,
3134 &bio, &last_block, wbc, io_type,
3136 if (ret == AOP_WRITEPAGE_ACTIVATE)
3138 #ifdef CONFIG_F2FS_FS_COMPRESSION
3141 nwritten += submitted;
3142 wbc->nr_to_write -= submitted;
3144 if (unlikely(ret)) {
3146 * keep nr_to_write, since vfs uses this to
3147 * get # of written pages.
3149 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3152 } else if (ret == -EAGAIN) {
3154 if (wbc->sync_mode == WB_SYNC_ALL) {
3156 congestion_wait(BLK_RW_ASYNC,
3157 DEFAULT_IO_TIMEOUT);
3162 done_index = page->index + 1;
3167 if (wbc->nr_to_write <= 0 &&
3168 wbc->sync_mode == WB_SYNC_NONE) {
3176 pagevec_release(&pvec);
3179 #ifdef CONFIG_F2FS_FS_COMPRESSION
3180 /* flush remained pages in compress cluster */
3181 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3182 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3183 nwritten += submitted;
3184 wbc->nr_to_write -= submitted;
3190 if (f2fs_compressed_file(inode))
3191 f2fs_destroy_compress_ctx(&cc, false);
3198 if (wbc->range_cyclic && !done)
3200 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3201 mapping->writeback_index = done_index;
3204 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3206 /* submit cached bio of IPU write */
3208 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3213 static inline bool __should_serialize_io(struct inode *inode,
3214 struct writeback_control *wbc)
3216 /* to avoid deadlock in path of data flush */
3217 if (F2FS_I(inode)->wb_task)
3220 if (!S_ISREG(inode->i_mode))
3222 if (IS_NOQUOTA(inode))
3225 if (f2fs_compressed_file(inode))
3227 if (wbc->sync_mode != WB_SYNC_ALL)
3229 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3234 static int __f2fs_write_data_pages(struct address_space *mapping,
3235 struct writeback_control *wbc,
3236 enum iostat_type io_type)
3238 struct inode *inode = mapping->host;
3239 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3240 struct blk_plug plug;
3242 bool locked = false;
3244 /* deal with chardevs and other special file */
3245 if (!mapping->a_ops->writepage)
3248 /* skip writing if there is no dirty page in this inode */
3249 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3252 /* during POR, we don't need to trigger writepage at all. */
3253 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3256 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3257 wbc->sync_mode == WB_SYNC_NONE &&
3258 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3259 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3262 /* skip writing during file defragment */
3263 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3266 trace_f2fs_writepages(mapping->host, wbc, DATA);
3268 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3269 if (wbc->sync_mode == WB_SYNC_ALL)
3270 atomic_inc(&sbi->wb_sync_req[DATA]);
3271 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3272 /* to avoid potential deadlock */
3274 blk_finish_plug(current->plug);
3278 if (__should_serialize_io(inode, wbc)) {
3279 mutex_lock(&sbi->writepages);
3283 blk_start_plug(&plug);
3284 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3285 blk_finish_plug(&plug);
3288 mutex_unlock(&sbi->writepages);
3290 if (wbc->sync_mode == WB_SYNC_ALL)
3291 atomic_dec(&sbi->wb_sync_req[DATA]);
3293 * if some pages were truncated, we cannot guarantee its mapping->host
3294 * to detect pending bios.
3297 f2fs_remove_dirty_inode(inode);
3301 wbc->pages_skipped += get_dirty_pages(inode);
3302 trace_f2fs_writepages(mapping->host, wbc, DATA);
3306 static int f2fs_write_data_pages(struct address_space *mapping,
3307 struct writeback_control *wbc)
3309 struct inode *inode = mapping->host;
3311 return __f2fs_write_data_pages(mapping, wbc,
3312 F2FS_I(inode)->cp_task == current ?
3313 FS_CP_DATA_IO : FS_DATA_IO);
3316 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3318 struct inode *inode = mapping->host;
3319 loff_t i_size = i_size_read(inode);
3321 if (IS_NOQUOTA(inode))
3324 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3325 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3326 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3327 down_write(&F2FS_I(inode)->i_mmap_sem);
3329 truncate_pagecache(inode, i_size);
3330 f2fs_truncate_blocks(inode, i_size, true);
3332 up_write(&F2FS_I(inode)->i_mmap_sem);
3333 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3337 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3338 struct page *page, loff_t pos, unsigned len,
3339 block_t *blk_addr, bool *node_changed)
3341 struct inode *inode = page->mapping->host;
3342 pgoff_t index = page->index;
3343 struct dnode_of_data dn;
3345 bool locked = false;
3346 struct extent_info ei = {0,0,0};
3351 * we already allocated all the blocks, so we don't need to get
3352 * the block addresses when there is no need to fill the page.
3354 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3355 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3356 !f2fs_verity_in_progress(inode))
3359 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3360 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3361 flag = F2FS_GET_BLOCK_DEFAULT;
3363 flag = F2FS_GET_BLOCK_PRE_AIO;
3365 if (f2fs_has_inline_data(inode) ||
3366 (pos & PAGE_MASK) >= i_size_read(inode)) {
3367 f2fs_do_map_lock(sbi, flag, true);
3372 /* check inline_data */
3373 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3374 if (IS_ERR(ipage)) {
3375 err = PTR_ERR(ipage);
3379 set_new_dnode(&dn, inode, ipage, ipage, 0);
3381 if (f2fs_has_inline_data(inode)) {
3382 if (pos + len <= MAX_INLINE_DATA(inode)) {
3383 f2fs_do_read_inline_data(page, ipage);
3384 set_inode_flag(inode, FI_DATA_EXIST);
3386 set_inline_node(ipage);
3388 err = f2fs_convert_inline_page(&dn, page);
3391 if (dn.data_blkaddr == NULL_ADDR)
3392 err = f2fs_get_block(&dn, index);
3394 } else if (locked) {
3395 err = f2fs_get_block(&dn, index);
3397 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3398 dn.data_blkaddr = ei.blk + index - ei.fofs;
3401 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3402 if (err || dn.data_blkaddr == NULL_ADDR) {
3403 f2fs_put_dnode(&dn);
3404 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3406 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3413 /* convert_inline_page can make node_changed */
3414 *blk_addr = dn.data_blkaddr;
3415 *node_changed = dn.node_changed;
3417 f2fs_put_dnode(&dn);
3420 f2fs_do_map_lock(sbi, flag, false);
3424 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3425 loff_t pos, unsigned len, unsigned flags,
3426 struct page **pagep, void **fsdata)
3428 struct inode *inode = mapping->host;
3429 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3430 struct page *page = NULL;
3431 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3432 bool need_balance = false, drop_atomic = false;
3433 block_t blkaddr = NULL_ADDR;
3436 trace_f2fs_write_begin(inode, pos, len, flags);
3438 if (!f2fs_is_checkpoint_ready(sbi)) {
3443 if ((f2fs_is_atomic_file(inode) &&
3444 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3445 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3452 * We should check this at this moment to avoid deadlock on inode page
3453 * and #0 page. The locking rule for inline_data conversion should be:
3454 * lock_page(page #0) -> lock_page(inode_page)
3457 err = f2fs_convert_inline_inode(inode);
3462 #ifdef CONFIG_F2FS_FS_COMPRESSION
3463 if (f2fs_compressed_file(inode)) {
3468 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3471 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3484 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3485 * wait_for_stable_page. Will wait that below with our IO control.
3487 page = f2fs_pagecache_get_page(mapping, index,
3488 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3494 /* TODO: cluster can be compressed due to race with .writepage */
3498 err = prepare_write_begin(sbi, page, pos, len,
3499 &blkaddr, &need_balance);
3503 if (need_balance && !IS_NOQUOTA(inode) &&
3504 has_not_enough_free_secs(sbi, 0, 0)) {
3506 f2fs_balance_fs(sbi, true);
3508 if (page->mapping != mapping) {
3509 /* The page got truncated from under us */
3510 f2fs_put_page(page, 1);
3515 f2fs_wait_on_page_writeback(page, DATA, false, true);
3517 if (len == PAGE_SIZE || PageUptodate(page))
3520 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3521 !f2fs_verity_in_progress(inode)) {
3522 zero_user_segment(page, len, PAGE_SIZE);
3526 if (blkaddr == NEW_ADDR) {
3527 zero_user_segment(page, 0, PAGE_SIZE);
3528 SetPageUptodate(page);
3530 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3531 DATA_GENERIC_ENHANCE_READ)) {
3532 err = -EFSCORRUPTED;
3535 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3540 if (unlikely(page->mapping != mapping)) {
3541 f2fs_put_page(page, 1);
3544 if (unlikely(!PageUptodate(page))) {
3552 f2fs_put_page(page, 1);
3553 f2fs_write_failed(mapping, pos + len);
3555 f2fs_drop_inmem_pages_all(sbi, false);
3559 static int f2fs_write_end(struct file *file,
3560 struct address_space *mapping,
3561 loff_t pos, unsigned len, unsigned copied,
3562 struct page *page, void *fsdata)
3564 struct inode *inode = page->mapping->host;
3566 trace_f2fs_write_end(inode, pos, len, copied);
3569 * This should be come from len == PAGE_SIZE, and we expect copied
3570 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3571 * let generic_perform_write() try to copy data again through copied=0.
3573 if (!PageUptodate(page)) {
3574 if (unlikely(copied != len))
3577 SetPageUptodate(page);
3580 #ifdef CONFIG_F2FS_FS_COMPRESSION
3581 /* overwrite compressed file */
3582 if (f2fs_compressed_file(inode) && fsdata) {
3583 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3584 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3586 if (pos + copied > i_size_read(inode) &&
3587 !f2fs_verity_in_progress(inode))
3588 f2fs_i_size_write(inode, pos + copied);
3596 set_page_dirty(page);
3598 if (pos + copied > i_size_read(inode) &&
3599 !f2fs_verity_in_progress(inode))
3600 f2fs_i_size_write(inode, pos + copied);
3602 f2fs_put_page(page, 1);
3603 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3607 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3610 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3611 unsigned blkbits = i_blkbits;
3612 unsigned blocksize_mask = (1 << blkbits) - 1;
3613 unsigned long align = offset | iov_iter_alignment(iter);
3614 struct block_device *bdev = inode->i_sb->s_bdev;
3616 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3619 if (align & blocksize_mask) {
3621 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3622 blocksize_mask = (1 << blkbits) - 1;
3623 if (align & blocksize_mask)
3630 static void f2fs_dio_end_io(struct bio *bio)
3632 struct f2fs_private_dio *dio = bio->bi_private;
3634 dec_page_count(F2FS_I_SB(dio->inode),
3635 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3637 bio->bi_private = dio->orig_private;
3638 bio->bi_end_io = dio->orig_end_io;
3645 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3648 struct f2fs_private_dio *dio;
3649 bool write = (bio_op(bio) == REQ_OP_WRITE);
3651 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3652 sizeof(struct f2fs_private_dio), GFP_NOFS);
3657 dio->orig_end_io = bio->bi_end_io;
3658 dio->orig_private = bio->bi_private;
3661 bio->bi_end_io = f2fs_dio_end_io;
3662 bio->bi_private = dio;
3664 inc_page_count(F2FS_I_SB(inode),
3665 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3670 bio->bi_status = BLK_STS_IOERR;
3674 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3676 struct address_space *mapping = iocb->ki_filp->f_mapping;
3677 struct inode *inode = mapping->host;
3678 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3679 struct f2fs_inode_info *fi = F2FS_I(inode);
3680 size_t count = iov_iter_count(iter);
3681 loff_t offset = iocb->ki_pos;
3682 int rw = iov_iter_rw(iter);
3684 enum rw_hint hint = iocb->ki_hint;
3685 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3688 err = check_direct_IO(inode, iter, offset);
3690 return err < 0 ? err : 0;
3692 if (f2fs_force_buffered_io(inode, iocb, iter))
3695 do_opu = allow_outplace_dio(inode, iocb, iter);
3697 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3699 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3700 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3702 if (iocb->ki_flags & IOCB_NOWAIT) {
3703 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3704 iocb->ki_hint = hint;
3708 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3709 up_read(&fi->i_gc_rwsem[rw]);
3710 iocb->ki_hint = hint;
3715 down_read(&fi->i_gc_rwsem[rw]);
3717 down_read(&fi->i_gc_rwsem[READ]);
3720 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3721 iter, rw == WRITE ? get_data_block_dio_write :
3722 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3723 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3727 up_read(&fi->i_gc_rwsem[READ]);
3729 up_read(&fi->i_gc_rwsem[rw]);
3732 if (whint_mode == WHINT_MODE_OFF)
3733 iocb->ki_hint = hint;
3735 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3738 set_inode_flag(inode, FI_UPDATE_WRITE);
3739 } else if (err == -EIOCBQUEUED) {
3740 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3741 count - iov_iter_count(iter));
3742 } else if (err < 0) {
3743 f2fs_write_failed(mapping, offset + count);
3747 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3748 else if (err == -EIOCBQUEUED)
3749 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3750 count - iov_iter_count(iter));
3754 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3759 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3760 unsigned int length)
3762 struct inode *inode = page->mapping->host;
3763 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3765 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3766 (offset % PAGE_SIZE || length != PAGE_SIZE))
3769 if (PageDirty(page)) {
3770 if (inode->i_ino == F2FS_META_INO(sbi)) {
3771 dec_page_count(sbi, F2FS_DIRTY_META);
3772 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3773 dec_page_count(sbi, F2FS_DIRTY_NODES);
3775 inode_dec_dirty_pages(inode);
3776 f2fs_remove_dirty_inode(inode);
3780 clear_cold_data(page);
3782 if (IS_ATOMIC_WRITTEN_PAGE(page))
3783 return f2fs_drop_inmem_page(inode, page);
3785 f2fs_clear_page_private(page);
3788 int f2fs_release_page(struct page *page, gfp_t wait)
3790 /* If this is dirty page, keep PagePrivate */
3791 if (PageDirty(page))
3794 /* This is atomic written page, keep Private */
3795 if (IS_ATOMIC_WRITTEN_PAGE(page))
3798 clear_cold_data(page);
3799 f2fs_clear_page_private(page);
3803 static int f2fs_set_data_page_dirty(struct page *page)
3805 struct inode *inode = page_file_mapping(page)->host;
3807 trace_f2fs_set_page_dirty(page, DATA);
3809 if (!PageUptodate(page))
3810 SetPageUptodate(page);
3811 if (PageSwapCache(page))
3812 return __set_page_dirty_nobuffers(page);
3814 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3815 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3816 f2fs_register_inmem_page(inode, page);
3820 * Previously, this page has been registered, we just
3826 if (!PageDirty(page)) {
3827 __set_page_dirty_nobuffers(page);
3828 f2fs_update_dirty_page(inode, page);
3835 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3837 #ifdef CONFIG_F2FS_FS_COMPRESSION
3838 struct dnode_of_data dn;
3839 sector_t start_idx, blknr = 0;
3842 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3844 set_new_dnode(&dn, inode, NULL, NULL, 0);
3845 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3849 if (dn.data_blkaddr != COMPRESS_ADDR) {
3850 dn.ofs_in_node += block - start_idx;
3851 blknr = f2fs_data_blkaddr(&dn);
3852 if (!__is_valid_data_blkaddr(blknr))
3856 f2fs_put_dnode(&dn);
3864 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3866 struct inode *inode = mapping->host;
3867 struct buffer_head tmp = {
3868 .b_size = i_blocksize(inode),
3872 if (f2fs_has_inline_data(inode))
3875 /* make sure allocating whole blocks */
3876 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3877 filemap_write_and_wait(mapping);
3879 /* Block number less than F2FS MAX BLOCKS */
3880 if (unlikely(block >= F2FS_I_SB(inode)->max_file_blocks))
3883 if (f2fs_compressed_file(inode)) {
3884 blknr = f2fs_bmap_compress(inode, block);
3886 if (!get_data_block_bmap(inode, block, &tmp, 0))
3887 blknr = tmp.b_blocknr;
3890 trace_f2fs_bmap(inode, block, blknr);
3894 #ifdef CONFIG_MIGRATION
3895 #include <linux/migrate.h>
3897 int f2fs_migrate_page(struct address_space *mapping,
3898 struct page *newpage, struct page *page, enum migrate_mode mode)
3900 int rc, extra_count;
3901 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3902 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3904 BUG_ON(PageWriteback(page));
3906 /* migrating an atomic written page is safe with the inmem_lock hold */
3907 if (atomic_written) {
3908 if (mode != MIGRATE_SYNC)
3910 if (!mutex_trylock(&fi->inmem_lock))
3914 /* one extra reference was held for atomic_write page */
3915 extra_count = atomic_written ? 1 : 0;
3916 rc = migrate_page_move_mapping(mapping, newpage,
3918 if (rc != MIGRATEPAGE_SUCCESS) {
3920 mutex_unlock(&fi->inmem_lock);
3924 if (atomic_written) {
3925 struct inmem_pages *cur;
3926 list_for_each_entry(cur, &fi->inmem_pages, list)
3927 if (cur->page == page) {
3928 cur->page = newpage;
3931 mutex_unlock(&fi->inmem_lock);
3936 if (PagePrivate(page)) {
3937 f2fs_set_page_private(newpage, page_private(page));
3938 f2fs_clear_page_private(page);
3941 if (mode != MIGRATE_SYNC_NO_COPY)
3942 migrate_page_copy(newpage, page);
3944 migrate_page_states(newpage, page);
3946 return MIGRATEPAGE_SUCCESS;
3951 static int check_swap_activate_fast(struct swap_info_struct *sis,
3952 struct file *swap_file, sector_t *span)
3954 struct address_space *mapping = swap_file->f_mapping;
3955 struct inode *inode = mapping->host;
3956 sector_t cur_lblock;
3957 sector_t last_lblock;
3959 sector_t lowest_pblock = -1;
3960 sector_t highest_pblock = 0;
3962 unsigned long nr_pblocks;
3967 * Map all the blocks into the extent list. This code doesn't try
3971 last_lblock = logical_to_blk(inode, i_size_read(inode));
3972 len = i_size_read(inode);
3974 while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
3975 struct buffer_head map_bh;
3980 memset(&map_bh, 0, sizeof(struct buffer_head));
3981 map_bh.b_size = len - cur_lblock;
3983 ret = get_data_block(inode, cur_lblock, &map_bh, 0,
3984 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
3989 if (!buffer_mapped(&map_bh))
3992 pblock = map_bh.b_blocknr;
3993 nr_pblocks = logical_to_blk(inode, map_bh.b_size);
3995 if (cur_lblock + nr_pblocks >= sis->max)
3996 nr_pblocks = sis->max - cur_lblock;
3998 if (cur_lblock) { /* exclude the header page */
3999 if (pblock < lowest_pblock)
4000 lowest_pblock = pblock;
4001 if (pblock + nr_pblocks - 1 > highest_pblock)
4002 highest_pblock = pblock + nr_pblocks - 1;
4006 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4008 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4012 cur_lblock += nr_pblocks;
4015 *span = 1 + highest_pblock - lowest_pblock;
4016 if (cur_lblock == 0)
4017 cur_lblock = 1; /* force Empty message */
4018 sis->max = cur_lblock;
4019 sis->pages = cur_lblock - 1;
4020 sis->highest_bit = cur_lblock - 1;
4024 pr_err("swapon: swapfile has holes\n");
4028 /* Copied from generic_swapfile_activate() to check any holes */
4029 static int check_swap_activate(struct swap_info_struct *sis,
4030 struct file *swap_file, sector_t *span)
4032 struct address_space *mapping = swap_file->f_mapping;
4033 struct inode *inode = mapping->host;
4034 unsigned blocks_per_page;
4035 unsigned long page_no;
4037 sector_t probe_block;
4038 sector_t last_block;
4039 sector_t lowest_block = -1;
4040 sector_t highest_block = 0;
4044 if (PAGE_SIZE == F2FS_BLKSIZE)
4045 return check_swap_activate_fast(sis, swap_file, span);
4047 blkbits = inode->i_blkbits;
4048 blocks_per_page = PAGE_SIZE >> blkbits;
4051 * Map all the blocks into the extent list. This code doesn't try
4056 last_block = i_size_read(inode) >> blkbits;
4057 while ((probe_block + blocks_per_page) <= last_block &&
4058 page_no < sis->max) {
4059 unsigned block_in_page;
4060 sector_t first_block;
4066 block = probe_block;
4067 err = bmap(inode, &block);
4070 first_block = block;
4073 * It must be PAGE_SIZE aligned on-disk
4075 if (first_block & (blocks_per_page - 1)) {
4080 for (block_in_page = 1; block_in_page < blocks_per_page;
4083 block = probe_block + block_in_page;
4084 err = bmap(inode, &block);
4089 if (block != first_block + block_in_page) {
4096 first_block >>= (PAGE_SHIFT - blkbits);
4097 if (page_no) { /* exclude the header page */
4098 if (first_block < lowest_block)
4099 lowest_block = first_block;
4100 if (first_block > highest_block)
4101 highest_block = first_block;
4105 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4107 ret = add_swap_extent(sis, page_no, 1, first_block);
4112 probe_block += blocks_per_page;
4117 *span = 1 + highest_block - lowest_block;
4119 page_no = 1; /* force Empty message */
4121 sis->pages = page_no - 1;
4122 sis->highest_bit = page_no - 1;
4126 pr_err("swapon: swapfile has holes\n");
4130 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4133 struct inode *inode = file_inode(file);
4136 if (!S_ISREG(inode->i_mode))
4139 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4142 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4143 f2fs_err(F2FS_I_SB(inode),
4144 "Swapfile not supported in LFS mode");
4148 ret = f2fs_convert_inline_inode(inode);
4152 if (!f2fs_disable_compressed_file(inode))
4155 ret = check_swap_activate(sis, file, span);
4159 set_inode_flag(inode, FI_PIN_FILE);
4160 f2fs_precache_extents(inode);
4161 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4165 static void f2fs_swap_deactivate(struct file *file)
4167 struct inode *inode = file_inode(file);
4169 clear_inode_flag(inode, FI_PIN_FILE);
4172 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4178 static void f2fs_swap_deactivate(struct file *file)
4183 const struct address_space_operations f2fs_dblock_aops = {
4184 .readpage = f2fs_read_data_page,
4185 .readahead = f2fs_readahead,
4186 .writepage = f2fs_write_data_page,
4187 .writepages = f2fs_write_data_pages,
4188 .write_begin = f2fs_write_begin,
4189 .write_end = f2fs_write_end,
4190 .set_page_dirty = f2fs_set_data_page_dirty,
4191 .invalidatepage = f2fs_invalidate_page,
4192 .releasepage = f2fs_release_page,
4193 .direct_IO = f2fs_direct_IO,
4195 .swap_activate = f2fs_swap_activate,
4196 .swap_deactivate = f2fs_swap_deactivate,
4197 #ifdef CONFIG_MIGRATION
4198 .migratepage = f2fs_migrate_page,
4202 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4204 struct address_space *mapping = page_mapping(page);
4205 unsigned long flags;
4207 xa_lock_irqsave(&mapping->i_pages, flags);
4208 __xa_clear_mark(&mapping->i_pages, page_index(page),
4209 PAGECACHE_TAG_DIRTY);
4210 xa_unlock_irqrestore(&mapping->i_pages, flags);
4213 int __init f2fs_init_post_read_processing(void)
4215 bio_post_read_ctx_cache =
4216 kmem_cache_create("f2fs_bio_post_read_ctx",
4217 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4218 if (!bio_post_read_ctx_cache)
4220 bio_post_read_ctx_pool =
4221 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4222 bio_post_read_ctx_cache);
4223 if (!bio_post_read_ctx_pool)
4224 goto fail_free_cache;
4228 kmem_cache_destroy(bio_post_read_ctx_cache);
4233 void f2fs_destroy_post_read_processing(void)
4235 mempool_destroy(bio_post_read_ctx_pool);
4236 kmem_cache_destroy(bio_post_read_ctx_cache);
4239 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4241 if (!f2fs_sb_has_encrypt(sbi) &&
4242 !f2fs_sb_has_verity(sbi) &&
4243 !f2fs_sb_has_compression(sbi))
4246 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4247 WQ_UNBOUND | WQ_HIGHPRI,
4249 if (!sbi->post_read_wq)
4254 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4256 if (sbi->post_read_wq)
4257 destroy_workqueue(sbi->post_read_wq);
4260 int __init f2fs_init_bio_entry_cache(void)
4262 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4263 sizeof(struct bio_entry));
4264 if (!bio_entry_slab)
4269 void f2fs_destroy_bio_entry_cache(void)
4271 kmem_cache_destroy(bio_entry_slab);