1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
21 #include <trace/events/f2fs.h>
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
26 static void *page_array_alloc(struct inode *inode, int nr)
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
31 if (likely(size <= sbi->page_array_slab_size))
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
37 static void page_array_free(struct inode *inode, void *pages, int nr)
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
51 struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
58 bool (*is_level_valid)(int level);
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
63 return index & (cc->cluster_size - 1);
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
68 return index >> cc->log_cluster_size;
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
73 return cc->cluster_idx << cc->log_cluster_size;
76 bool f2fs_is_compressed_page(struct page *page)
78 if (!PagePrivate(page))
80 if (!page_private(page))
82 if (page_private_nonpointer(page))
85 f2fs_bug_on(F2FS_M_SB(page->mapping),
86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
90 static void f2fs_set_compressed_page(struct page *page,
91 struct inode *inode, pgoff_t index, void *data)
93 attach_page_private(page, (void *)data);
95 /* i_crypto_info and iv index */
97 page->mapping = inode->i_mapping;
100 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
104 for (i = 0; i < len; i++) {
108 unlock_page(cc->rpages[i]);
110 put_page(cc->rpages[i]);
114 static void f2fs_put_rpages(struct compress_ctx *cc)
116 f2fs_drop_rpages(cc, cc->cluster_size, false);
119 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
121 f2fs_drop_rpages(cc, len, true);
124 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
125 struct writeback_control *wbc, bool redirty, int unlock)
129 for (i = 0; i < cc->cluster_size; i++) {
133 redirty_page_for_writepage(wbc, cc->rpages[i]);
134 f2fs_put_page(cc->rpages[i], unlock);
138 struct page *f2fs_compress_control_page(struct page *page)
140 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
148 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
149 return cc->rpages ? 0 : -ENOMEM;
152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
154 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
158 cc->valid_nr_cpages = 0;
160 cc->cluster_idx = NULL_CLUSTER;
163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
165 unsigned int cluster_ofs;
167 if (!f2fs_cluster_can_merge_page(cc, page->index))
168 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
170 cluster_ofs = offset_in_cluster(cc, page->index);
171 cc->rpages[cluster_ofs] = page;
173 cc->cluster_idx = cluster_idx(cc, page->index);
176 #ifdef CONFIG_F2FS_FS_LZO
177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
179 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
180 LZO1X_MEM_COMPRESS, GFP_NOFS);
184 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
194 static int lzo_compress_pages(struct compress_ctx *cc)
198 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 &cc->clen, cc->private);
200 if (ret != LZO_E_OK) {
201 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
202 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
208 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
212 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
213 dic->rbuf, &dic->rlen);
214 if (ret != LZO_E_OK) {
215 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
216 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
220 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
221 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
222 "expected:%lu\n", KERN_ERR,
223 F2FS_I_SB(dic->inode)->sb->s_id,
225 PAGE_SIZE << dic->log_cluster_size);
231 static const struct f2fs_compress_ops f2fs_lzo_ops = {
232 .init_compress_ctx = lzo_init_compress_ctx,
233 .destroy_compress_ctx = lzo_destroy_compress_ctx,
234 .compress_pages = lzo_compress_pages,
235 .decompress_pages = lzo_decompress_pages,
239 #ifdef CONFIG_F2FS_FS_LZ4
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
242 unsigned int size = LZ4_MEM_COMPRESS;
244 #ifdef CONFIG_F2FS_FS_LZ4HC
245 if (F2FS_I(cc->inode)->i_compress_level)
246 size = LZ4HC_MEM_COMPRESS;
249 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
254 * we do not change cc->clen to LZ4_compressBound(inputsize) to
255 * adapt worst compress case, because lz4 compressor can handle
256 * output budget properly.
258 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
268 #ifdef CONFIG_F2FS_FS_LZ4HC
269 static int lz4hc_compress_pages(struct compress_ctx *cc)
271 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
275 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
276 cc->clen, level, cc->private);
278 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279 cc->clen, cc->private);
288 static int lz4_compress_pages(struct compress_ctx *cc)
292 #ifdef CONFIG_F2FS_FS_LZ4HC
293 return lz4hc_compress_pages(cc);
295 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
296 cc->clen, cc->private);
304 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
308 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
309 dic->clen, dic->rlen);
311 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
312 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
316 if (ret != PAGE_SIZE << dic->log_cluster_size) {
317 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
318 "expected:%lu\n", KERN_ERR,
319 F2FS_I_SB(dic->inode)->sb->s_id, ret,
320 PAGE_SIZE << dic->log_cluster_size);
326 static bool lz4_is_level_valid(int lvl)
328 #ifdef CONFIG_F2FS_FS_LZ4HC
329 return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
335 static const struct f2fs_compress_ops f2fs_lz4_ops = {
336 .init_compress_ctx = lz4_init_compress_ctx,
337 .destroy_compress_ctx = lz4_destroy_compress_ctx,
338 .compress_pages = lz4_compress_pages,
339 .decompress_pages = lz4_decompress_pages,
340 .is_level_valid = lz4_is_level_valid,
344 #ifdef CONFIG_F2FS_FS_ZSTD
345 static int zstd_init_compress_ctx(struct compress_ctx *cc)
347 zstd_parameters params;
348 zstd_cstream *stream;
350 unsigned int workspace_size;
351 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
353 /* Need to remain this for backward compatibility */
355 level = F2FS_ZSTD_DEFAULT_CLEVEL;
357 params = zstd_get_params(level, cc->rlen);
358 workspace_size = zstd_cstream_workspace_bound(¶ms.cParams);
360 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
361 workspace_size, GFP_NOFS);
365 stream = zstd_init_cstream(¶ms, 0, workspace, workspace_size);
367 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
368 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
374 cc->private = workspace;
375 cc->private2 = stream;
377 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
381 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
388 static int zstd_compress_pages(struct compress_ctx *cc)
390 zstd_cstream *stream = cc->private2;
391 zstd_in_buffer inbuf;
392 zstd_out_buffer outbuf;
393 int src_size = cc->rlen;
394 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
398 inbuf.src = cc->rbuf;
399 inbuf.size = src_size;
402 outbuf.dst = cc->cbuf->cdata;
403 outbuf.size = dst_size;
405 ret = zstd_compress_stream(stream, &outbuf, &inbuf);
406 if (zstd_is_error(ret)) {
407 printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
408 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
409 __func__, zstd_get_error_code(ret));
413 ret = zstd_end_stream(stream, &outbuf);
414 if (zstd_is_error(ret)) {
415 printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
416 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
417 __func__, zstd_get_error_code(ret));
422 * there is compressed data remained in intermediate buffer due to
423 * no more space in cbuf.cdata
428 cc->clen = outbuf.pos;
432 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
434 zstd_dstream *stream;
436 unsigned int workspace_size;
437 unsigned int max_window_size =
438 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
440 workspace_size = zstd_dstream_workspace_bound(max_window_size);
442 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
443 workspace_size, GFP_NOFS);
447 stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
449 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
450 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
456 dic->private = workspace;
457 dic->private2 = stream;
462 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
464 kvfree(dic->private);
466 dic->private2 = NULL;
469 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
471 zstd_dstream *stream = dic->private2;
472 zstd_in_buffer inbuf;
473 zstd_out_buffer outbuf;
477 inbuf.src = dic->cbuf->cdata;
478 inbuf.size = dic->clen;
481 outbuf.dst = dic->rbuf;
482 outbuf.size = dic->rlen;
484 ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
485 if (zstd_is_error(ret)) {
486 printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
487 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
488 __func__, zstd_get_error_code(ret));
492 if (dic->rlen != outbuf.pos) {
493 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
494 "expected:%lu\n", KERN_ERR,
495 F2FS_I_SB(dic->inode)->sb->s_id,
497 PAGE_SIZE << dic->log_cluster_size);
504 static bool zstd_is_level_valid(int lvl)
506 return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
509 static const struct f2fs_compress_ops f2fs_zstd_ops = {
510 .init_compress_ctx = zstd_init_compress_ctx,
511 .destroy_compress_ctx = zstd_destroy_compress_ctx,
512 .compress_pages = zstd_compress_pages,
513 .init_decompress_ctx = zstd_init_decompress_ctx,
514 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
515 .decompress_pages = zstd_decompress_pages,
516 .is_level_valid = zstd_is_level_valid,
520 #ifdef CONFIG_F2FS_FS_LZO
521 #ifdef CONFIG_F2FS_FS_LZORLE
522 static int lzorle_compress_pages(struct compress_ctx *cc)
526 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
527 &cc->clen, cc->private);
528 if (ret != LZO_E_OK) {
529 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
530 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
536 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
537 .init_compress_ctx = lzo_init_compress_ctx,
538 .destroy_compress_ctx = lzo_destroy_compress_ctx,
539 .compress_pages = lzorle_compress_pages,
540 .decompress_pages = lzo_decompress_pages,
545 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
546 #ifdef CONFIG_F2FS_FS_LZO
551 #ifdef CONFIG_F2FS_FS_LZ4
556 #ifdef CONFIG_F2FS_FS_ZSTD
561 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
568 bool f2fs_is_compress_backend_ready(struct inode *inode)
570 if (!f2fs_compressed_file(inode))
572 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
575 bool f2fs_is_compress_level_valid(int alg, int lvl)
577 const struct f2fs_compress_ops *cops = f2fs_cops[alg];
579 if (cops->is_level_valid)
580 return cops->is_level_valid(lvl);
585 static mempool_t *compress_page_pool;
586 static int num_compress_pages = 512;
587 module_param(num_compress_pages, uint, 0444);
588 MODULE_PARM_DESC(num_compress_pages,
589 "Number of intermediate compress pages to preallocate");
591 int f2fs_init_compress_mempool(void)
593 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
594 if (!compress_page_pool)
600 void f2fs_destroy_compress_mempool(void)
602 mempool_destroy(compress_page_pool);
605 static struct page *f2fs_compress_alloc_page(void)
609 page = mempool_alloc(compress_page_pool, GFP_NOFS);
615 static void f2fs_compress_free_page(struct page *page)
619 detach_page_private(page);
620 page->mapping = NULL;
622 mempool_free(page, compress_page_pool);
625 #define MAX_VMAP_RETRIES 3
627 static void *f2fs_vmap(struct page **pages, unsigned int count)
632 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
633 buf = vm_map_ram(pages, count, -1);
641 static int f2fs_compress_pages(struct compress_ctx *cc)
643 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
644 const struct f2fs_compress_ops *cops =
645 f2fs_cops[fi->i_compress_algorithm];
646 unsigned int max_len, new_nr_cpages;
650 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
651 cc->cluster_size, fi->i_compress_algorithm);
653 if (cops->init_compress_ctx) {
654 ret = cops->init_compress_ctx(cc);
659 max_len = COMPRESS_HEADER_SIZE + cc->clen;
660 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
661 cc->valid_nr_cpages = cc->nr_cpages;
663 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
666 goto destroy_compress_ctx;
669 for (i = 0; i < cc->nr_cpages; i++) {
670 cc->cpages[i] = f2fs_compress_alloc_page();
671 if (!cc->cpages[i]) {
673 goto out_free_cpages;
677 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
680 goto out_free_cpages;
683 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
686 goto out_vunmap_rbuf;
689 ret = cops->compress_pages(cc);
691 goto out_vunmap_cbuf;
693 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
695 if (cc->clen > max_len) {
697 goto out_vunmap_cbuf;
700 cc->cbuf->clen = cpu_to_le32(cc->clen);
702 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
703 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
704 cc->cbuf->cdata, cc->clen);
705 cc->cbuf->chksum = cpu_to_le32(chksum);
707 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
708 cc->cbuf->reserved[i] = cpu_to_le32(0);
710 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
712 /* zero out any unused part of the last page */
713 memset(&cc->cbuf->cdata[cc->clen], 0,
714 (new_nr_cpages * PAGE_SIZE) -
715 (cc->clen + COMPRESS_HEADER_SIZE));
717 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
718 vm_unmap_ram(cc->rbuf, cc->cluster_size);
720 for (i = 0; i < cc->nr_cpages; i++) {
721 if (i < new_nr_cpages)
723 f2fs_compress_free_page(cc->cpages[i]);
724 cc->cpages[i] = NULL;
727 if (cops->destroy_compress_ctx)
728 cops->destroy_compress_ctx(cc);
730 cc->valid_nr_cpages = new_nr_cpages;
732 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
737 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
739 vm_unmap_ram(cc->rbuf, cc->cluster_size);
741 for (i = 0; i < cc->nr_cpages; i++) {
743 f2fs_compress_free_page(cc->cpages[i]);
745 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
747 destroy_compress_ctx:
748 if (cops->destroy_compress_ctx)
749 cops->destroy_compress_ctx(cc);
751 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
756 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
758 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
759 bool bypass_destroy_callback, bool pre_alloc);
761 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
763 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
764 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
765 const struct f2fs_compress_ops *cops =
766 f2fs_cops[fi->i_compress_algorithm];
767 bool bypass_callback = false;
770 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
771 dic->cluster_size, fi->i_compress_algorithm);
778 ret = f2fs_prepare_decomp_mem(dic, false);
780 bypass_callback = true;
784 dic->clen = le32_to_cpu(dic->cbuf->clen);
785 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
787 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
790 /* Avoid f2fs_commit_super in irq context */
792 f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
794 f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
798 ret = cops->decompress_pages(dic);
800 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
801 u32 provided = le32_to_cpu(dic->cbuf->chksum);
802 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
804 if (provided != calculated) {
805 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
806 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
808 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
809 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
810 provided, calculated);
812 set_sbi_flag(sbi, SBI_NEED_FSCK);
817 f2fs_release_decomp_mem(dic, bypass_callback, false);
820 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
822 f2fs_decompress_end_io(dic, ret, in_task);
826 * This is called when a page of a compressed cluster has been read from disk
827 * (or failed to be read from disk). It checks whether this page was the last
828 * page being waited on in the cluster, and if so, it decompresses the cluster
829 * (or in the case of a failure, cleans up without actually decompressing).
831 void f2fs_end_read_compressed_page(struct page *page, bool failed,
832 block_t blkaddr, bool in_task)
834 struct decompress_io_ctx *dic =
835 (struct decompress_io_ctx *)page_private(page);
836 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
838 dec_page_count(sbi, F2FS_RD_DATA);
841 WRITE_ONCE(dic->failed, true);
842 else if (blkaddr && in_task)
843 f2fs_cache_compressed_page(sbi, page,
844 dic->inode->i_ino, blkaddr);
846 if (atomic_dec_and_test(&dic->remaining_pages))
847 f2fs_decompress_cluster(dic, in_task);
850 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
852 if (cc->cluster_idx == NULL_CLUSTER)
854 return cc->cluster_idx == cluster_idx(cc, index);
857 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
859 return cc->nr_rpages == 0;
862 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
864 return cc->cluster_size == cc->nr_rpages;
867 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
869 if (f2fs_cluster_is_empty(cc))
871 return is_page_in_cluster(cc, index);
874 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
875 int index, int nr_pages, bool uptodate)
877 unsigned long pgidx = pages[index]->index;
878 int i = uptodate ? 0 : 1;
881 * when uptodate set to true, try to check all pages in cluster is
884 if (uptodate && (pgidx % cc->cluster_size))
887 if (nr_pages - index < cc->cluster_size)
890 for (; i < cc->cluster_size; i++) {
891 if (pages[index + i]->index != pgidx + i)
893 if (uptodate && !PageUptodate(pages[index + i]))
900 static bool cluster_has_invalid_data(struct compress_ctx *cc)
902 loff_t i_size = i_size_read(cc->inode);
903 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
906 for (i = 0; i < cc->cluster_size; i++) {
907 struct page *page = cc->rpages[i];
909 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
912 if (page->index >= nr_pages)
918 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
920 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
921 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
922 bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
930 /* [..., COMPR_ADDR, ...] */
931 if (dn->ofs_in_node % cluster_size) {
932 reason = "[*|C|*|*]";
936 for (i = 1; i < cluster_size; i++) {
937 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
938 dn->ofs_in_node + i);
940 /* [COMPR_ADDR, ..., COMPR_ADDR] */
941 if (blkaddr == COMPRESS_ADDR) {
942 reason = "[C|*|C|*]";
945 if (!__is_valid_data_blkaddr(blkaddr)) {
950 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
952 reason = "[C|N|N|V]";
958 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
959 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
960 set_sbi_flag(sbi, SBI_NEED_FSCK);
964 static int __f2fs_cluster_blocks(struct inode *inode,
965 unsigned int cluster_idx, bool compr)
967 struct dnode_of_data dn;
968 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
969 unsigned int start_idx = cluster_idx <<
970 F2FS_I(inode)->i_log_cluster_size;
973 set_new_dnode(&dn, inode, NULL, NULL, 0);
974 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
981 if (f2fs_sanity_check_cluster(&dn)) {
983 f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
987 if (dn.data_blkaddr == COMPRESS_ADDR) {
991 for (i = 1; i < cluster_size; i++) {
994 blkaddr = data_blkaddr(dn.inode,
995 dn.node_page, dn.ofs_in_node + i);
997 if (__is_valid_data_blkaddr(blkaddr))
1000 if (blkaddr != NULL_ADDR)
1005 f2fs_bug_on(F2FS_I_SB(inode),
1006 !compr && ret != cluster_size &&
1007 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
1010 f2fs_put_dnode(&dn);
1014 /* return # of compressed blocks in compressed cluster */
1015 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1017 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1020 /* return # of valid blocks in compressed cluster */
1021 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1023 return __f2fs_cluster_blocks(inode,
1024 index >> F2FS_I(inode)->i_log_cluster_size,
1028 static bool cluster_may_compress(struct compress_ctx *cc)
1030 if (!f2fs_need_compress_data(cc->inode))
1032 if (f2fs_is_atomic_file(cc->inode))
1034 if (!f2fs_cluster_is_full(cc))
1036 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1038 return !cluster_has_invalid_data(cc);
1041 static void set_cluster_writeback(struct compress_ctx *cc)
1045 for (i = 0; i < cc->cluster_size; i++) {
1047 set_page_writeback(cc->rpages[i]);
1051 static void set_cluster_dirty(struct compress_ctx *cc)
1055 for (i = 0; i < cc->cluster_size; i++)
1056 if (cc->rpages[i]) {
1057 set_page_dirty(cc->rpages[i]);
1058 set_page_private_gcing(cc->rpages[i]);
1062 static int prepare_compress_overwrite(struct compress_ctx *cc,
1063 struct page **pagep, pgoff_t index, void **fsdata)
1065 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1066 struct address_space *mapping = cc->inode->i_mapping;
1068 sector_t last_block_in_bio;
1069 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1070 pgoff_t start_idx = start_idx_of_cluster(cc);
1074 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1078 ret = f2fs_init_compress_ctx(cc);
1082 /* keep page reference to avoid page reclaim */
1083 for (i = 0; i < cc->cluster_size; i++) {
1084 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1085 fgp_flag, GFP_NOFS);
1091 if (PageUptodate(page))
1092 f2fs_put_page(page, 1);
1094 f2fs_compress_ctx_add_page(cc, page);
1097 if (!f2fs_cluster_is_empty(cc)) {
1098 struct bio *bio = NULL;
1100 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1101 &last_block_in_bio, false, true);
1102 f2fs_put_rpages(cc);
1103 f2fs_destroy_compress_ctx(cc, true);
1107 f2fs_submit_bio(sbi, bio, DATA);
1109 ret = f2fs_init_compress_ctx(cc);
1114 for (i = 0; i < cc->cluster_size; i++) {
1115 f2fs_bug_on(sbi, cc->rpages[i]);
1117 page = find_lock_page(mapping, start_idx + i);
1119 /* page can be truncated */
1120 goto release_and_retry;
1123 f2fs_wait_on_page_writeback(page, DATA, true, true);
1124 f2fs_compress_ctx_add_page(cc, page);
1126 if (!PageUptodate(page)) {
1128 f2fs_put_rpages(cc);
1129 f2fs_unlock_rpages(cc, i + 1);
1130 f2fs_destroy_compress_ctx(cc, true);
1136 *fsdata = cc->rpages;
1137 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1138 return cc->cluster_size;
1142 f2fs_put_rpages(cc);
1143 f2fs_unlock_rpages(cc, i);
1144 f2fs_destroy_compress_ctx(cc, true);
1149 int f2fs_prepare_compress_overwrite(struct inode *inode,
1150 struct page **pagep, pgoff_t index, void **fsdata)
1152 struct compress_ctx cc = {
1154 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1155 .cluster_size = F2FS_I(inode)->i_cluster_size,
1156 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1161 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1164 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1165 pgoff_t index, unsigned copied)
1168 struct compress_ctx cc = {
1170 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1171 .cluster_size = F2FS_I(inode)->i_cluster_size,
1174 bool first_index = (index == cc.rpages[0]->index);
1177 set_cluster_dirty(&cc);
1179 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1180 f2fs_destroy_compress_ctx(&cc, false);
1185 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1187 void *fsdata = NULL;
1189 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1190 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1194 err = f2fs_is_compressed_cluster(inode, start_idx);
1198 /* truncate normal cluster */
1200 return f2fs_do_truncate_blocks(inode, from, lock);
1202 /* truncate compressed cluster */
1203 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1204 start_idx, &fsdata);
1206 /* should not be a normal cluster */
1207 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1213 struct page **rpages = fsdata;
1214 int cluster_size = F2FS_I(inode)->i_cluster_size;
1217 for (i = cluster_size - 1; i >= 0; i--) {
1218 loff_t start = rpages[i]->index << PAGE_SHIFT;
1220 if (from <= start) {
1221 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1223 zero_user_segment(rpages[i], from - start,
1229 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1234 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1236 struct writeback_control *wbc,
1237 enum iostat_type io_type)
1239 struct inode *inode = cc->inode;
1240 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1241 struct f2fs_inode_info *fi = F2FS_I(inode);
1242 struct f2fs_io_info fio = {
1244 .ino = cc->inode->i_ino,
1247 .op_flags = wbc_to_write_flags(wbc),
1248 .old_blkaddr = NEW_ADDR,
1250 .encrypted_page = NULL,
1251 .compressed_page = NULL,
1255 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1258 struct dnode_of_data dn;
1259 struct node_info ni;
1260 struct compress_io_ctx *cic;
1261 pgoff_t start_idx = start_idx_of_cluster(cc);
1262 unsigned int last_index = cc->cluster_size - 1;
1265 bool quota_inode = IS_NOQUOTA(inode);
1267 /* we should bypass data pages to proceed the kworkder jobs */
1268 if (unlikely(f2fs_cp_error(sbi))) {
1269 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1275 * We need to wait for node_write to avoid block allocation during
1276 * checkpoint. This can only happen to quota writes which can cause
1277 * the below discard race condition.
1279 f2fs_down_read(&sbi->node_write);
1280 } else if (!f2fs_trylock_op(sbi)) {
1284 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1286 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1290 for (i = 0; i < cc->cluster_size; i++) {
1291 if (data_blkaddr(dn.inode, dn.node_page,
1292 dn.ofs_in_node + i) == NULL_ADDR)
1296 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1298 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1302 fio.version = ni.version;
1304 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1308 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1310 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1311 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1315 cic->nr_rpages = cc->cluster_size;
1317 for (i = 0; i < cc->valid_nr_cpages; i++) {
1318 f2fs_set_compressed_page(cc->cpages[i], inode,
1319 cc->rpages[i + 1]->index, cic);
1320 fio.compressed_page = cc->cpages[i];
1322 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1323 dn.ofs_in_node + i + 1);
1325 /* wait for GCed page writeback via META_MAPPING */
1326 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1328 if (fio.encrypted) {
1329 fio.page = cc->rpages[i + 1];
1330 err = f2fs_encrypt_one_page(&fio);
1332 goto out_destroy_crypt;
1333 cc->cpages[i] = fio.encrypted_page;
1337 set_cluster_writeback(cc);
1339 for (i = 0; i < cc->cluster_size; i++)
1340 cic->rpages[i] = cc->rpages[i];
1342 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1345 blkaddr = f2fs_data_blkaddr(&dn);
1346 fio.page = cc->rpages[i];
1347 fio.old_blkaddr = blkaddr;
1349 /* cluster header */
1351 if (blkaddr == COMPRESS_ADDR)
1353 if (__is_valid_data_blkaddr(blkaddr))
1354 f2fs_invalidate_blocks(sbi, blkaddr);
1355 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1356 goto unlock_continue;
1359 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1362 if (i > cc->valid_nr_cpages) {
1363 if (__is_valid_data_blkaddr(blkaddr)) {
1364 f2fs_invalidate_blocks(sbi, blkaddr);
1365 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1367 goto unlock_continue;
1370 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1373 fio.encrypted_page = cc->cpages[i - 1];
1375 fio.compressed_page = cc->cpages[i - 1];
1377 cc->cpages[i - 1] = NULL;
1378 f2fs_outplace_write_data(&dn, &fio);
1381 inode_dec_dirty_pages(cc->inode);
1382 unlock_page(fio.page);
1385 if (fio.compr_blocks)
1386 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1387 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1388 add_compr_block_stat(inode, cc->valid_nr_cpages);
1390 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1392 f2fs_put_dnode(&dn);
1394 f2fs_up_read(&sbi->node_write);
1396 f2fs_unlock_op(sbi);
1398 spin_lock(&fi->i_size_lock);
1399 if (fi->last_disk_size < psize)
1400 fi->last_disk_size = psize;
1401 spin_unlock(&fi->i_size_lock);
1403 f2fs_put_rpages(cc);
1404 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1406 f2fs_destroy_compress_ctx(cc, false);
1410 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1412 for (--i; i >= 0; i--)
1413 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1415 kmem_cache_free(cic_entry_slab, cic);
1417 f2fs_put_dnode(&dn);
1420 f2fs_up_read(&sbi->node_write);
1422 f2fs_unlock_op(sbi);
1424 for (i = 0; i < cc->valid_nr_cpages; i++) {
1425 f2fs_compress_free_page(cc->cpages[i]);
1426 cc->cpages[i] = NULL;
1428 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1433 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1435 struct f2fs_sb_info *sbi = bio->bi_private;
1436 struct compress_io_ctx *cic =
1437 (struct compress_io_ctx *)page_private(page);
1438 enum count_type type = WB_DATA_TYPE(page,
1439 f2fs_is_compressed_page(page));
1442 if (unlikely(bio->bi_status))
1443 mapping_set_error(cic->inode->i_mapping, -EIO);
1445 f2fs_compress_free_page(page);
1447 dec_page_count(sbi, type);
1449 if (atomic_dec_return(&cic->pending_pages))
1452 for (i = 0; i < cic->nr_rpages; i++) {
1453 WARN_ON(!cic->rpages[i]);
1454 clear_page_private_gcing(cic->rpages[i]);
1455 end_page_writeback(cic->rpages[i]);
1458 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1459 kmem_cache_free(cic_entry_slab, cic);
1462 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1464 struct writeback_control *wbc,
1465 enum iostat_type io_type)
1467 struct address_space *mapping = cc->inode->i_mapping;
1468 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1469 int submitted, compr_blocks, i;
1472 compr_blocks = f2fs_compressed_blocks(cc);
1474 for (i = 0; i < cc->cluster_size; i++) {
1478 redirty_page_for_writepage(wbc, cc->rpages[i]);
1479 unlock_page(cc->rpages[i]);
1482 if (compr_blocks < 0)
1483 return compr_blocks;
1485 /* overwrite compressed cluster w/ normal cluster */
1486 if (compr_blocks > 0)
1489 for (i = 0; i < cc->cluster_size; i++) {
1493 lock_page(cc->rpages[i]);
1495 if (cc->rpages[i]->mapping != mapping) {
1497 unlock_page(cc->rpages[i]);
1501 if (!PageDirty(cc->rpages[i]))
1502 goto continue_unlock;
1504 if (PageWriteback(cc->rpages[i])) {
1505 if (wbc->sync_mode == WB_SYNC_NONE)
1506 goto continue_unlock;
1507 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1510 if (!clear_page_dirty_for_io(cc->rpages[i]))
1511 goto continue_unlock;
1513 ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
1514 NULL, NULL, wbc, io_type,
1515 compr_blocks, false);
1517 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1518 unlock_page(cc->rpages[i]);
1520 } else if (ret == -EAGAIN) {
1523 * for quota file, just redirty left pages to
1524 * avoid deadlock caused by cluster update race
1525 * from foreground operation.
1527 if (IS_NOQUOTA(cc->inode))
1529 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1535 *submitted_p += submitted;
1539 if (compr_blocks > 0)
1540 f2fs_unlock_op(sbi);
1542 f2fs_balance_fs(sbi, true);
1546 int f2fs_write_multi_pages(struct compress_ctx *cc,
1548 struct writeback_control *wbc,
1549 enum iostat_type io_type)
1554 if (cluster_may_compress(cc)) {
1555 err = f2fs_compress_pages(cc);
1556 if (err == -EAGAIN) {
1557 add_compr_block_stat(cc->inode, cc->cluster_size);
1560 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1564 err = f2fs_write_compressed_pages(cc, submitted,
1568 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1571 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1573 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1574 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1576 f2fs_destroy_compress_ctx(cc, false);
1580 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1583 return pre_alloc ^ f2fs_low_mem_mode(sbi);
1586 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1589 const struct f2fs_compress_ops *cops =
1590 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1593 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1596 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1600 for (i = 0; i < dic->cluster_size; i++) {
1601 if (dic->rpages[i]) {
1602 dic->tpages[i] = dic->rpages[i];
1606 dic->tpages[i] = f2fs_compress_alloc_page();
1607 if (!dic->tpages[i])
1611 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1615 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1619 if (cops->init_decompress_ctx)
1620 return cops->init_decompress_ctx(dic);
1625 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1626 bool bypass_destroy_callback, bool pre_alloc)
1628 const struct f2fs_compress_ops *cops =
1629 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1631 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1634 if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1635 cops->destroy_decompress_ctx(dic);
1638 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1641 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1644 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1645 bool bypass_destroy_callback);
1647 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1649 struct decompress_io_ctx *dic;
1650 pgoff_t start_idx = start_idx_of_cluster(cc);
1651 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1654 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1656 return ERR_PTR(-ENOMEM);
1658 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1660 kmem_cache_free(dic_entry_slab, dic);
1661 return ERR_PTR(-ENOMEM);
1664 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1665 dic->inode = cc->inode;
1666 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1667 dic->cluster_idx = cc->cluster_idx;
1668 dic->cluster_size = cc->cluster_size;
1669 dic->log_cluster_size = cc->log_cluster_size;
1670 dic->nr_cpages = cc->nr_cpages;
1671 refcount_set(&dic->refcnt, 1);
1672 dic->failed = false;
1673 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1675 for (i = 0; i < dic->cluster_size; i++)
1676 dic->rpages[i] = cc->rpages[i];
1677 dic->nr_rpages = cc->cluster_size;
1679 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1685 for (i = 0; i < dic->nr_cpages; i++) {
1688 page = f2fs_compress_alloc_page();
1694 f2fs_set_compressed_page(page, cc->inode,
1695 start_idx + i + 1, dic);
1696 dic->cpages[i] = page;
1699 ret = f2fs_prepare_decomp_mem(dic, true);
1706 f2fs_free_dic(dic, true);
1707 return ERR_PTR(ret);
1710 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1711 bool bypass_destroy_callback)
1715 f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1718 for (i = 0; i < dic->cluster_size; i++) {
1721 if (!dic->tpages[i])
1723 f2fs_compress_free_page(dic->tpages[i]);
1725 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1729 for (i = 0; i < dic->nr_cpages; i++) {
1730 if (!dic->cpages[i])
1732 f2fs_compress_free_page(dic->cpages[i]);
1734 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1737 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1738 kmem_cache_free(dic_entry_slab, dic);
1741 static void f2fs_late_free_dic(struct work_struct *work)
1743 struct decompress_io_ctx *dic =
1744 container_of(work, struct decompress_io_ctx, free_work);
1746 f2fs_free_dic(dic, false);
1749 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1751 if (refcount_dec_and_test(&dic->refcnt)) {
1753 f2fs_free_dic(dic, false);
1755 INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1756 queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1763 * Update and unlock the cluster's pagecache pages, and release the reference to
1764 * the decompress_io_ctx that was being held for I/O completion.
1766 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1771 for (i = 0; i < dic->cluster_size; i++) {
1772 struct page *rpage = dic->rpages[i];
1777 /* PG_error was set if verity failed. */
1778 if (failed || PageError(rpage)) {
1779 ClearPageUptodate(rpage);
1780 /* will re-read again later */
1781 ClearPageError(rpage);
1783 SetPageUptodate(rpage);
1788 f2fs_put_dic(dic, in_task);
1791 static void f2fs_verify_cluster(struct work_struct *work)
1793 struct decompress_io_ctx *dic =
1794 container_of(work, struct decompress_io_ctx, verity_work);
1797 /* Verify the cluster's decompressed pages with fs-verity. */
1798 for (i = 0; i < dic->cluster_size; i++) {
1799 struct page *rpage = dic->rpages[i];
1801 if (rpage && !fsverity_verify_page(rpage))
1802 SetPageError(rpage);
1805 __f2fs_decompress_end_io(dic, false, true);
1809 * This is called when a compressed cluster has been decompressed
1810 * (or failed to be read and/or decompressed).
1812 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1815 if (!failed && dic->need_verity) {
1817 * Note that to avoid deadlocks, the verity work can't be done
1818 * on the decompression workqueue. This is because verifying
1819 * the data pages can involve reading metadata pages from the
1820 * file, and these metadata pages may be compressed.
1822 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1823 fsverity_enqueue_verify_work(&dic->verity_work);
1825 __f2fs_decompress_end_io(dic, failed, in_task);
1830 * Put a reference to a compressed page's decompress_io_ctx.
1832 * This is called when the page is no longer needed and can be freed.
1834 void f2fs_put_page_dic(struct page *page, bool in_task)
1836 struct decompress_io_ctx *dic =
1837 (struct decompress_io_ctx *)page_private(page);
1839 f2fs_put_dic(dic, in_task);
1843 * check whether cluster blocks are contiguous, and add extent cache entry
1844 * only if cluster blocks are logically and physically contiguous.
1846 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1847 unsigned int ofs_in_node)
1849 bool compressed = data_blkaddr(dn->inode, dn->node_page,
1850 ofs_in_node) == COMPRESS_ADDR;
1851 int i = compressed ? 1 : 0;
1852 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1855 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1856 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1859 if (!__is_valid_data_blkaddr(blkaddr))
1861 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1865 return compressed ? i - 1 : i;
1868 const struct address_space_operations f2fs_compress_aops = {
1869 .release_folio = f2fs_release_folio,
1870 .invalidate_folio = f2fs_invalidate_folio,
1873 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1875 return sbi->compress_inode->i_mapping;
1878 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1880 if (!sbi->compress_inode)
1882 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1885 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1886 nid_t ino, block_t blkaddr)
1891 if (!test_opt(sbi, COMPRESS_CACHE))
1894 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1897 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1900 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1902 f2fs_put_page(cpage, 0);
1906 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1910 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1913 f2fs_put_page(cpage, 0);
1917 set_page_private_data(cpage, ino);
1919 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1922 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1923 SetPageUptodate(cpage);
1925 f2fs_put_page(cpage, 1);
1928 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1932 bool hitted = false;
1934 if (!test_opt(sbi, COMPRESS_CACHE))
1937 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1938 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1940 if (PageUptodate(cpage)) {
1941 atomic_inc(&sbi->compress_page_hit);
1942 memcpy(page_address(page),
1943 page_address(cpage), PAGE_SIZE);
1946 f2fs_put_page(cpage, 1);
1952 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1954 struct address_space *mapping = COMPRESS_MAPPING(sbi);
1955 struct folio_batch fbatch;
1957 pgoff_t end = MAX_BLKADDR(sbi);
1959 if (!mapping->nrpages)
1962 folio_batch_init(&fbatch);
1967 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1971 for (i = 0; i < nr; i++) {
1972 struct folio *folio = fbatch.folios[i];
1975 if (folio->mapping != mapping) {
1976 folio_unlock(folio);
1980 if (ino != get_page_private_data(&folio->page)) {
1981 folio_unlock(folio);
1985 generic_error_remove_page(mapping, &folio->page);
1986 folio_unlock(folio);
1988 folio_batch_release(&fbatch);
1990 } while (index < end);
1993 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1995 struct inode *inode;
1997 if (!test_opt(sbi, COMPRESS_CACHE))
2000 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2002 return PTR_ERR(inode);
2003 sbi->compress_inode = inode;
2005 sbi->compress_percent = COMPRESS_PERCENT;
2006 sbi->compress_watermark = COMPRESS_WATERMARK;
2008 atomic_set(&sbi->compress_page_hit, 0);
2013 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2015 if (!sbi->compress_inode)
2017 iput(sbi->compress_inode);
2018 sbi->compress_inode = NULL;
2021 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2023 dev_t dev = sbi->sb->s_bdev->bd_dev;
2026 if (!f2fs_sb_has_compression(sbi))
2029 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2031 sbi->page_array_slab_size = sizeof(struct page *) <<
2032 F2FS_OPTION(sbi).compress_log_size;
2034 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2035 sbi->page_array_slab_size);
2036 if (!sbi->page_array_slab)
2041 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2043 kmem_cache_destroy(sbi->page_array_slab);
2046 static int __init f2fs_init_cic_cache(void)
2048 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2049 sizeof(struct compress_io_ctx));
2050 if (!cic_entry_slab)
2055 static void f2fs_destroy_cic_cache(void)
2057 kmem_cache_destroy(cic_entry_slab);
2060 static int __init f2fs_init_dic_cache(void)
2062 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2063 sizeof(struct decompress_io_ctx));
2064 if (!dic_entry_slab)
2069 static void f2fs_destroy_dic_cache(void)
2071 kmem_cache_destroy(dic_entry_slab);
2074 int __init f2fs_init_compress_cache(void)
2078 err = f2fs_init_cic_cache();
2081 err = f2fs_init_dic_cache();
2086 f2fs_destroy_cic_cache();
2091 void f2fs_destroy_compress_cache(void)
2093 f2fs_destroy_dic_cache();
2094 f2fs_destroy_cic_cache();