1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 #include <linux/pagevec.h>
20 #include <trace/events/f2fs.h>
22 static struct kmem_cache *cic_entry_slab;
23 static struct kmem_cache *dic_entry_slab;
25 static void *page_array_alloc(struct inode *inode, int nr)
27 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
28 unsigned int size = sizeof(struct page *) * nr;
30 if (likely(size <= sbi->page_array_slab_size))
31 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
32 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
33 return f2fs_kzalloc(sbi, size, GFP_NOFS);
36 static void page_array_free(struct inode *inode, void *pages, int nr)
38 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
39 unsigned int size = sizeof(struct page *) * nr;
44 if (likely(size <= sbi->page_array_slab_size))
45 kmem_cache_free(sbi->page_array_slab, pages);
50 struct f2fs_compress_ops {
51 int (*init_compress_ctx)(struct compress_ctx *cc);
52 void (*destroy_compress_ctx)(struct compress_ctx *cc);
53 int (*compress_pages)(struct compress_ctx *cc);
54 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
55 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
56 int (*decompress_pages)(struct decompress_io_ctx *dic);
59 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
61 return index & (cc->cluster_size - 1);
64 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
66 return index >> cc->log_cluster_size;
69 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
71 return cc->cluster_idx << cc->log_cluster_size;
74 bool f2fs_is_compressed_page(struct page *page)
76 if (!PagePrivate(page))
78 if (!page_private(page))
80 if (page_private_nonpointer(page))
83 f2fs_bug_on(F2FS_M_SB(page->mapping),
84 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
88 static void f2fs_set_compressed_page(struct page *page,
89 struct inode *inode, pgoff_t index, void *data)
91 attach_page_private(page, (void *)data);
93 /* i_crypto_info and iv index */
95 page->mapping = inode->i_mapping;
98 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
102 for (i = 0; i < len; i++) {
106 unlock_page(cc->rpages[i]);
108 put_page(cc->rpages[i]);
112 static void f2fs_put_rpages(struct compress_ctx *cc)
114 f2fs_drop_rpages(cc, cc->cluster_size, false);
117 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
119 f2fs_drop_rpages(cc, len, true);
122 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
123 struct writeback_control *wbc, bool redirty, int unlock)
127 for (i = 0; i < cc->cluster_size; i++) {
131 redirty_page_for_writepage(wbc, cc->rpages[i]);
132 f2fs_put_page(cc->rpages[i], unlock);
136 struct page *f2fs_compress_control_page(struct page *page)
138 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
141 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
147 return cc->rpages ? 0 : -ENOMEM;
150 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
152 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157 cc->cluster_idx = NULL_CLUSTER;
160 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
162 unsigned int cluster_ofs;
164 if (!f2fs_cluster_can_merge_page(cc, page->index))
165 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
167 cluster_ofs = offset_in_cluster(cc, page->index);
168 cc->rpages[cluster_ofs] = page;
170 cc->cluster_idx = cluster_idx(cc, page->index);
173 #ifdef CONFIG_F2FS_FS_LZO
174 static int lzo_init_compress_ctx(struct compress_ctx *cc)
176 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
177 LZO1X_MEM_COMPRESS, GFP_NOFS);
181 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
185 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
191 static int lzo_compress_pages(struct compress_ctx *cc)
195 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
196 &cc->clen, cc->private);
197 if (ret != LZO_E_OK) {
198 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
199 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
205 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
209 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
210 dic->rbuf, &dic->rlen);
211 if (ret != LZO_E_OK) {
212 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
213 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
217 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
218 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
219 "expected:%lu\n", KERN_ERR,
220 F2FS_I_SB(dic->inode)->sb->s_id,
222 PAGE_SIZE << dic->log_cluster_size);
228 static const struct f2fs_compress_ops f2fs_lzo_ops = {
229 .init_compress_ctx = lzo_init_compress_ctx,
230 .destroy_compress_ctx = lzo_destroy_compress_ctx,
231 .compress_pages = lzo_compress_pages,
232 .decompress_pages = lzo_decompress_pages,
236 #ifdef CONFIG_F2FS_FS_LZ4
237 static int lz4_init_compress_ctx(struct compress_ctx *cc)
239 unsigned int size = LZ4_MEM_COMPRESS;
241 #ifdef CONFIG_F2FS_FS_LZ4HC
242 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
243 size = LZ4HC_MEM_COMPRESS;
246 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
251 * we do not change cc->clen to LZ4_compressBound(inputsize) to
252 * adapt worst compress case, because lz4 compressor can handle
253 * output budget properly.
255 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
259 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
265 #ifdef CONFIG_F2FS_FS_LZ4HC
266 static int lz4hc_compress_pages(struct compress_ctx *cc)
268 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
269 COMPRESS_LEVEL_OFFSET;
273 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
274 cc->clen, level, cc->private);
276 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
277 cc->clen, cc->private);
286 static int lz4_compress_pages(struct compress_ctx *cc)
290 #ifdef CONFIG_F2FS_FS_LZ4HC
291 return lz4hc_compress_pages(cc);
293 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
294 cc->clen, cc->private);
302 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
306 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
307 dic->clen, dic->rlen);
309 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
310 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
314 if (ret != PAGE_SIZE << dic->log_cluster_size) {
315 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
316 "expected:%lu\n", KERN_ERR,
317 F2FS_I_SB(dic->inode)->sb->s_id, ret,
318 PAGE_SIZE << dic->log_cluster_size);
324 static const struct f2fs_compress_ops f2fs_lz4_ops = {
325 .init_compress_ctx = lz4_init_compress_ctx,
326 .destroy_compress_ctx = lz4_destroy_compress_ctx,
327 .compress_pages = lz4_compress_pages,
328 .decompress_pages = lz4_decompress_pages,
332 #ifdef CONFIG_F2FS_FS_ZSTD
333 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
335 static int zstd_init_compress_ctx(struct compress_ctx *cc)
337 ZSTD_parameters params;
338 ZSTD_CStream *stream;
340 unsigned int workspace_size;
341 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
342 COMPRESS_LEVEL_OFFSET;
345 level = F2FS_ZSTD_DEFAULT_CLEVEL;
347 params = ZSTD_getParams(level, cc->rlen, 0);
348 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
350 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
351 workspace_size, GFP_NOFS);
355 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
357 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
358 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
364 cc->private = workspace;
365 cc->private2 = stream;
367 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
371 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
378 static int zstd_compress_pages(struct compress_ctx *cc)
380 ZSTD_CStream *stream = cc->private2;
382 ZSTD_outBuffer outbuf;
383 int src_size = cc->rlen;
384 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
388 inbuf.src = cc->rbuf;
389 inbuf.size = src_size;
392 outbuf.dst = cc->cbuf->cdata;
393 outbuf.size = dst_size;
395 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
396 if (ZSTD_isError(ret)) {
397 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
398 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
399 __func__, ZSTD_getErrorCode(ret));
403 ret = ZSTD_endStream(stream, &outbuf);
404 if (ZSTD_isError(ret)) {
405 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
406 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
407 __func__, ZSTD_getErrorCode(ret));
412 * there is compressed data remained in intermediate buffer due to
413 * no more space in cbuf.cdata
418 cc->clen = outbuf.pos;
422 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
424 ZSTD_DStream *stream;
426 unsigned int workspace_size;
427 unsigned int max_window_size =
428 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
430 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
432 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
433 workspace_size, GFP_NOFS);
437 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
439 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
440 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
446 dic->private = workspace;
447 dic->private2 = stream;
452 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
454 kvfree(dic->private);
456 dic->private2 = NULL;
459 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
461 ZSTD_DStream *stream = dic->private2;
463 ZSTD_outBuffer outbuf;
467 inbuf.src = dic->cbuf->cdata;
468 inbuf.size = dic->clen;
471 outbuf.dst = dic->rbuf;
472 outbuf.size = dic->rlen;
474 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
475 if (ZSTD_isError(ret)) {
476 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
477 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
478 __func__, ZSTD_getErrorCode(ret));
482 if (dic->rlen != outbuf.pos) {
483 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
484 "expected:%lu\n", KERN_ERR,
485 F2FS_I_SB(dic->inode)->sb->s_id,
487 PAGE_SIZE << dic->log_cluster_size);
494 static const struct f2fs_compress_ops f2fs_zstd_ops = {
495 .init_compress_ctx = zstd_init_compress_ctx,
496 .destroy_compress_ctx = zstd_destroy_compress_ctx,
497 .compress_pages = zstd_compress_pages,
498 .init_decompress_ctx = zstd_init_decompress_ctx,
499 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
500 .decompress_pages = zstd_decompress_pages,
504 #ifdef CONFIG_F2FS_FS_LZO
505 #ifdef CONFIG_F2FS_FS_LZORLE
506 static int lzorle_compress_pages(struct compress_ctx *cc)
510 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
511 &cc->clen, cc->private);
512 if (ret != LZO_E_OK) {
513 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
514 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
520 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
521 .init_compress_ctx = lzo_init_compress_ctx,
522 .destroy_compress_ctx = lzo_destroy_compress_ctx,
523 .compress_pages = lzorle_compress_pages,
524 .decompress_pages = lzo_decompress_pages,
529 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
530 #ifdef CONFIG_F2FS_FS_LZO
535 #ifdef CONFIG_F2FS_FS_LZ4
540 #ifdef CONFIG_F2FS_FS_ZSTD
545 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
552 bool f2fs_is_compress_backend_ready(struct inode *inode)
554 if (!f2fs_compressed_file(inode))
556 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
559 static mempool_t *compress_page_pool;
560 static int num_compress_pages = 512;
561 module_param(num_compress_pages, uint, 0444);
562 MODULE_PARM_DESC(num_compress_pages,
563 "Number of intermediate compress pages to preallocate");
565 int f2fs_init_compress_mempool(void)
567 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
568 if (!compress_page_pool)
574 void f2fs_destroy_compress_mempool(void)
576 mempool_destroy(compress_page_pool);
579 static struct page *f2fs_compress_alloc_page(void)
583 page = mempool_alloc(compress_page_pool, GFP_NOFS);
589 static void f2fs_compress_free_page(struct page *page)
593 detach_page_private(page);
594 page->mapping = NULL;
596 mempool_free(page, compress_page_pool);
599 #define MAX_VMAP_RETRIES 3
601 static void *f2fs_vmap(struct page **pages, unsigned int count)
606 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
607 buf = vm_map_ram(pages, count, -1);
615 static int f2fs_compress_pages(struct compress_ctx *cc)
617 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
618 const struct f2fs_compress_ops *cops =
619 f2fs_cops[fi->i_compress_algorithm];
620 unsigned int max_len, new_nr_cpages;
621 struct page **new_cpages;
625 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
626 cc->cluster_size, fi->i_compress_algorithm);
628 if (cops->init_compress_ctx) {
629 ret = cops->init_compress_ctx(cc);
634 max_len = COMPRESS_HEADER_SIZE + cc->clen;
635 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
637 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
640 goto destroy_compress_ctx;
643 for (i = 0; i < cc->nr_cpages; i++) {
644 cc->cpages[i] = f2fs_compress_alloc_page();
645 if (!cc->cpages[i]) {
647 goto out_free_cpages;
651 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
654 goto out_free_cpages;
657 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
660 goto out_vunmap_rbuf;
663 ret = cops->compress_pages(cc);
665 goto out_vunmap_cbuf;
667 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
669 if (cc->clen > max_len) {
671 goto out_vunmap_cbuf;
674 cc->cbuf->clen = cpu_to_le32(cc->clen);
676 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
677 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678 cc->cbuf->cdata, cc->clen);
679 cc->cbuf->chksum = cpu_to_le32(chksum);
681 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682 cc->cbuf->reserved[i] = cpu_to_le32(0);
684 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
686 /* Now we're going to cut unnecessary tail pages */
687 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
690 goto out_vunmap_cbuf;
693 /* zero out any unused part of the last page */
694 memset(&cc->cbuf->cdata[cc->clen], 0,
695 (new_nr_cpages * PAGE_SIZE) -
696 (cc->clen + COMPRESS_HEADER_SIZE));
698 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
699 vm_unmap_ram(cc->rbuf, cc->cluster_size);
701 for (i = 0; i < cc->nr_cpages; i++) {
702 if (i < new_nr_cpages) {
703 new_cpages[i] = cc->cpages[i];
706 f2fs_compress_free_page(cc->cpages[i]);
707 cc->cpages[i] = NULL;
710 if (cops->destroy_compress_ctx)
711 cops->destroy_compress_ctx(cc);
713 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
714 cc->cpages = new_cpages;
715 cc->nr_cpages = new_nr_cpages;
717 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
722 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
724 vm_unmap_ram(cc->rbuf, cc->cluster_size);
726 for (i = 0; i < cc->nr_cpages; i++) {
728 f2fs_compress_free_page(cc->cpages[i]);
730 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
732 destroy_compress_ctx:
733 if (cops->destroy_compress_ctx)
734 cops->destroy_compress_ctx(cc);
736 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
741 void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
743 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
744 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
745 const struct f2fs_compress_ops *cops =
746 f2fs_cops[fi->i_compress_algorithm];
750 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
751 dic->cluster_size, fi->i_compress_algorithm);
758 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
764 for (i = 0; i < dic->cluster_size; i++) {
765 if (dic->rpages[i]) {
766 dic->tpages[i] = dic->rpages[i];
770 dic->tpages[i] = f2fs_compress_alloc_page();
771 if (!dic->tpages[i]) {
777 if (cops->init_decompress_ctx) {
778 ret = cops->init_decompress_ctx(dic);
783 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
786 goto out_destroy_decompress_ctx;
789 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
792 goto out_vunmap_rbuf;
795 dic->clen = le32_to_cpu(dic->cbuf->clen);
796 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
798 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
800 goto out_vunmap_cbuf;
803 ret = cops->decompress_pages(dic);
805 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
806 u32 provided = le32_to_cpu(dic->cbuf->chksum);
807 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
809 if (provided != calculated) {
810 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
811 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
813 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
814 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
815 provided, calculated);
817 set_sbi_flag(sbi, SBI_NEED_FSCK);
822 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
824 vm_unmap_ram(dic->rbuf, dic->cluster_size);
825 out_destroy_decompress_ctx:
826 if (cops->destroy_decompress_ctx)
827 cops->destroy_decompress_ctx(dic);
829 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
831 f2fs_decompress_end_io(dic, ret);
835 * This is called when a page of a compressed cluster has been read from disk
836 * (or failed to be read from disk). It checks whether this page was the last
837 * page being waited on in the cluster, and if so, it decompresses the cluster
838 * (or in the case of a failure, cleans up without actually decompressing).
840 void f2fs_end_read_compressed_page(struct page *page, bool failed,
843 struct decompress_io_ctx *dic =
844 (struct decompress_io_ctx *)page_private(page);
845 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
847 dec_page_count(sbi, F2FS_RD_DATA);
850 WRITE_ONCE(dic->failed, true);
852 f2fs_cache_compressed_page(sbi, page,
853 dic->inode->i_ino, blkaddr);
855 if (atomic_dec_and_test(&dic->remaining_pages))
856 f2fs_decompress_cluster(dic);
859 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
861 if (cc->cluster_idx == NULL_CLUSTER)
863 return cc->cluster_idx == cluster_idx(cc, index);
866 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
868 return cc->nr_rpages == 0;
871 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
873 return cc->cluster_size == cc->nr_rpages;
876 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
878 if (f2fs_cluster_is_empty(cc))
880 return is_page_in_cluster(cc, index);
883 static bool cluster_has_invalid_data(struct compress_ctx *cc)
885 loff_t i_size = i_size_read(cc->inode);
886 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
889 for (i = 0; i < cc->cluster_size; i++) {
890 struct page *page = cc->rpages[i];
892 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
895 if (page->index >= nr_pages)
901 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
903 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
904 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
905 bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
913 /* [..., COMPR_ADDR, ...] */
914 if (dn->ofs_in_node % cluster_size) {
915 reason = "[*|C|*|*]";
919 for (i = 1; i < cluster_size; i++) {
920 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
921 dn->ofs_in_node + i);
923 /* [COMPR_ADDR, ..., COMPR_ADDR] */
924 if (blkaddr == COMPRESS_ADDR) {
925 reason = "[C|*|C|*]";
929 if (!__is_valid_data_blkaddr(blkaddr)) {
934 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
936 reason = "[C|N|N|V]";
943 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
944 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
945 set_sbi_flag(sbi, SBI_NEED_FSCK);
949 static int __f2fs_cluster_blocks(struct inode *inode,
950 unsigned int cluster_idx, bool compr)
952 struct dnode_of_data dn;
953 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
954 unsigned int start_idx = cluster_idx <<
955 F2FS_I(inode)->i_log_cluster_size;
958 set_new_dnode(&dn, inode, NULL, NULL, 0);
959 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
966 if (f2fs_sanity_check_cluster(&dn)) {
971 if (dn.data_blkaddr == COMPRESS_ADDR) {
975 for (i = 1; i < cluster_size; i++) {
978 blkaddr = data_blkaddr(dn.inode,
979 dn.node_page, dn.ofs_in_node + i);
981 if (__is_valid_data_blkaddr(blkaddr))
984 if (blkaddr != NULL_ADDR)
989 f2fs_bug_on(F2FS_I_SB(inode),
990 !compr && ret != cluster_size &&
991 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
998 /* return # of compressed blocks in compressed cluster */
999 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1001 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1004 /* return # of valid blocks in compressed cluster */
1005 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1007 return __f2fs_cluster_blocks(inode,
1008 index >> F2FS_I(inode)->i_log_cluster_size,
1012 static bool cluster_may_compress(struct compress_ctx *cc)
1014 if (!f2fs_need_compress_data(cc->inode))
1016 if (f2fs_is_atomic_file(cc->inode))
1018 if (!f2fs_cluster_is_full(cc))
1020 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1022 return !cluster_has_invalid_data(cc);
1025 static void set_cluster_writeback(struct compress_ctx *cc)
1029 for (i = 0; i < cc->cluster_size; i++) {
1031 set_page_writeback(cc->rpages[i]);
1035 static void set_cluster_dirty(struct compress_ctx *cc)
1039 for (i = 0; i < cc->cluster_size; i++)
1041 set_page_dirty(cc->rpages[i]);
1044 static int prepare_compress_overwrite(struct compress_ctx *cc,
1045 struct page **pagep, pgoff_t index, void **fsdata)
1047 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1048 struct address_space *mapping = cc->inode->i_mapping;
1050 sector_t last_block_in_bio;
1051 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1052 pgoff_t start_idx = start_idx_of_cluster(cc);
1056 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1060 ret = f2fs_init_compress_ctx(cc);
1064 /* keep page reference to avoid page reclaim */
1065 for (i = 0; i < cc->cluster_size; i++) {
1066 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1067 fgp_flag, GFP_NOFS);
1073 if (PageUptodate(page))
1074 f2fs_put_page(page, 1);
1076 f2fs_compress_ctx_add_page(cc, page);
1079 if (!f2fs_cluster_is_empty(cc)) {
1080 struct bio *bio = NULL;
1082 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1083 &last_block_in_bio, false, true);
1084 f2fs_put_rpages(cc);
1085 f2fs_destroy_compress_ctx(cc, true);
1089 f2fs_submit_bio(sbi, bio, DATA);
1091 ret = f2fs_init_compress_ctx(cc);
1096 for (i = 0; i < cc->cluster_size; i++) {
1097 f2fs_bug_on(sbi, cc->rpages[i]);
1099 page = find_lock_page(mapping, start_idx + i);
1101 /* page can be truncated */
1102 goto release_and_retry;
1105 f2fs_wait_on_page_writeback(page, DATA, true, true);
1106 f2fs_compress_ctx_add_page(cc, page);
1108 if (!PageUptodate(page)) {
1110 f2fs_put_rpages(cc);
1111 f2fs_unlock_rpages(cc, i + 1);
1112 f2fs_destroy_compress_ctx(cc, true);
1118 *fsdata = cc->rpages;
1119 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1120 return cc->cluster_size;
1124 f2fs_put_rpages(cc);
1125 f2fs_unlock_rpages(cc, i);
1126 f2fs_destroy_compress_ctx(cc, true);
1131 int f2fs_prepare_compress_overwrite(struct inode *inode,
1132 struct page **pagep, pgoff_t index, void **fsdata)
1134 struct compress_ctx cc = {
1136 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1137 .cluster_size = F2FS_I(inode)->i_cluster_size,
1138 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1143 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1146 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1147 pgoff_t index, unsigned copied)
1150 struct compress_ctx cc = {
1152 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1153 .cluster_size = F2FS_I(inode)->i_cluster_size,
1156 bool first_index = (index == cc.rpages[0]->index);
1159 set_cluster_dirty(&cc);
1161 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1162 f2fs_destroy_compress_ctx(&cc, false);
1167 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1169 void *fsdata = NULL;
1171 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1172 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1176 err = f2fs_is_compressed_cluster(inode, start_idx);
1180 /* truncate normal cluster */
1182 return f2fs_do_truncate_blocks(inode, from, lock);
1184 /* truncate compressed cluster */
1185 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1186 start_idx, &fsdata);
1188 /* should not be a normal cluster */
1189 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1195 struct page **rpages = fsdata;
1196 int cluster_size = F2FS_I(inode)->i_cluster_size;
1199 for (i = cluster_size - 1; i >= 0; i--) {
1200 loff_t start = rpages[i]->index << PAGE_SHIFT;
1202 if (from <= start) {
1203 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1205 zero_user_segment(rpages[i], from - start,
1211 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1216 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1218 struct writeback_control *wbc,
1219 enum iostat_type io_type)
1221 struct inode *inode = cc->inode;
1222 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1223 struct f2fs_inode_info *fi = F2FS_I(inode);
1224 struct f2fs_io_info fio = {
1226 .ino = cc->inode->i_ino,
1229 .op_flags = wbc_to_write_flags(wbc),
1230 .old_blkaddr = NEW_ADDR,
1232 .encrypted_page = NULL,
1233 .compressed_page = NULL,
1237 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1239 struct dnode_of_data dn;
1240 struct node_info ni;
1241 struct compress_io_ctx *cic;
1242 pgoff_t start_idx = start_idx_of_cluster(cc);
1243 unsigned int last_index = cc->cluster_size - 1;
1247 /* we should bypass data pages to proceed the kworkder jobs */
1248 if (unlikely(f2fs_cp_error(sbi))) {
1249 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1253 if (IS_NOQUOTA(inode)) {
1255 * We need to wait for node_write to avoid block allocation during
1256 * checkpoint. This can only happen to quota writes which can cause
1257 * the below discard race condition.
1259 down_read(&sbi->node_write);
1260 } else if (!f2fs_trylock_op(sbi)) {
1264 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1266 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1270 for (i = 0; i < cc->cluster_size; i++) {
1271 if (data_blkaddr(dn.inode, dn.node_page,
1272 dn.ofs_in_node + i) == NULL_ADDR)
1276 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1278 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1282 fio.version = ni.version;
1284 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1288 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1290 atomic_set(&cic->pending_pages, cc->nr_cpages);
1291 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1295 cic->nr_rpages = cc->cluster_size;
1297 for (i = 0; i < cc->nr_cpages; i++) {
1298 f2fs_set_compressed_page(cc->cpages[i], inode,
1299 cc->rpages[i + 1]->index, cic);
1300 fio.compressed_page = cc->cpages[i];
1302 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1303 dn.ofs_in_node + i + 1);
1305 /* wait for GCed page writeback via META_MAPPING */
1306 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1308 if (fio.encrypted) {
1309 fio.page = cc->rpages[i + 1];
1310 err = f2fs_encrypt_one_page(&fio);
1312 goto out_destroy_crypt;
1313 cc->cpages[i] = fio.encrypted_page;
1317 set_cluster_writeback(cc);
1319 for (i = 0; i < cc->cluster_size; i++)
1320 cic->rpages[i] = cc->rpages[i];
1322 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1325 blkaddr = f2fs_data_blkaddr(&dn);
1326 fio.page = cc->rpages[i];
1327 fio.old_blkaddr = blkaddr;
1329 /* cluster header */
1331 if (blkaddr == COMPRESS_ADDR)
1333 if (__is_valid_data_blkaddr(blkaddr))
1334 f2fs_invalidate_blocks(sbi, blkaddr);
1335 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1336 goto unlock_continue;
1339 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1342 if (i > cc->nr_cpages) {
1343 if (__is_valid_data_blkaddr(blkaddr)) {
1344 f2fs_invalidate_blocks(sbi, blkaddr);
1345 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1347 goto unlock_continue;
1350 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1353 fio.encrypted_page = cc->cpages[i - 1];
1355 fio.compressed_page = cc->cpages[i - 1];
1357 cc->cpages[i - 1] = NULL;
1358 f2fs_outplace_write_data(&dn, &fio);
1361 inode_dec_dirty_pages(cc->inode);
1362 unlock_page(fio.page);
1365 if (fio.compr_blocks)
1366 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1367 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1368 add_compr_block_stat(inode, cc->nr_cpages);
1370 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1371 if (cc->cluster_idx == 0)
1372 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1374 f2fs_put_dnode(&dn);
1375 if (IS_NOQUOTA(inode))
1376 up_read(&sbi->node_write);
1378 f2fs_unlock_op(sbi);
1380 spin_lock(&fi->i_size_lock);
1381 if (fi->last_disk_size < psize)
1382 fi->last_disk_size = psize;
1383 spin_unlock(&fi->i_size_lock);
1385 f2fs_put_rpages(cc);
1386 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1388 f2fs_destroy_compress_ctx(cc, false);
1392 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1394 for (--i; i >= 0; i--)
1395 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1397 kmem_cache_free(cic_entry_slab, cic);
1399 f2fs_put_dnode(&dn);
1401 if (IS_NOQUOTA(inode))
1402 up_read(&sbi->node_write);
1404 f2fs_unlock_op(sbi);
1406 for (i = 0; i < cc->nr_cpages; i++) {
1409 f2fs_compress_free_page(cc->cpages[i]);
1410 cc->cpages[i] = NULL;
1412 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1417 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1419 struct f2fs_sb_info *sbi = bio->bi_private;
1420 struct compress_io_ctx *cic =
1421 (struct compress_io_ctx *)page_private(page);
1424 if (unlikely(bio->bi_status))
1425 mapping_set_error(cic->inode->i_mapping, -EIO);
1427 f2fs_compress_free_page(page);
1429 dec_page_count(sbi, F2FS_WB_DATA);
1431 if (atomic_dec_return(&cic->pending_pages))
1434 for (i = 0; i < cic->nr_rpages; i++) {
1435 WARN_ON(!cic->rpages[i]);
1436 clear_page_private_gcing(cic->rpages[i]);
1437 end_page_writeback(cic->rpages[i]);
1440 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1441 kmem_cache_free(cic_entry_slab, cic);
1444 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1446 struct writeback_control *wbc,
1447 enum iostat_type io_type)
1449 struct address_space *mapping = cc->inode->i_mapping;
1450 int _submitted, compr_blocks, ret, i;
1452 compr_blocks = f2fs_compressed_blocks(cc);
1454 for (i = 0; i < cc->cluster_size; i++) {
1458 redirty_page_for_writepage(wbc, cc->rpages[i]);
1459 unlock_page(cc->rpages[i]);
1462 if (compr_blocks < 0)
1463 return compr_blocks;
1465 for (i = 0; i < cc->cluster_size; i++) {
1469 lock_page(cc->rpages[i]);
1471 if (cc->rpages[i]->mapping != mapping) {
1473 unlock_page(cc->rpages[i]);
1477 if (!PageDirty(cc->rpages[i]))
1478 goto continue_unlock;
1480 if (PageWriteback(cc->rpages[i])) {
1481 if (wbc->sync_mode == WB_SYNC_NONE)
1482 goto continue_unlock;
1483 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1486 if (!clear_page_dirty_for_io(cc->rpages[i]))
1487 goto continue_unlock;
1489 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1490 NULL, NULL, wbc, io_type,
1491 compr_blocks, false);
1493 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1494 unlock_page(cc->rpages[i]);
1496 } else if (ret == -EAGAIN) {
1498 * for quota file, just redirty left pages to
1499 * avoid deadlock caused by cluster update race
1500 * from foreground operation.
1502 if (IS_NOQUOTA(cc->inode))
1506 congestion_wait(BLK_RW_ASYNC,
1507 DEFAULT_IO_TIMEOUT);
1513 *submitted += _submitted;
1516 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1521 int f2fs_write_multi_pages(struct compress_ctx *cc,
1523 struct writeback_control *wbc,
1524 enum iostat_type io_type)
1529 if (cluster_may_compress(cc)) {
1530 err = f2fs_compress_pages(cc);
1531 if (err == -EAGAIN) {
1532 add_compr_block_stat(cc->inode, cc->cluster_size);
1535 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1539 err = f2fs_write_compressed_pages(cc, submitted,
1543 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1546 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1548 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1549 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1551 f2fs_destroy_compress_ctx(cc, false);
1555 static void f2fs_free_dic(struct decompress_io_ctx *dic);
1557 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1559 struct decompress_io_ctx *dic;
1560 pgoff_t start_idx = start_idx_of_cluster(cc);
1563 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
1564 false, F2FS_I_SB(cc->inode));
1566 return ERR_PTR(-ENOMEM);
1568 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1570 kmem_cache_free(dic_entry_slab, dic);
1571 return ERR_PTR(-ENOMEM);
1574 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1575 dic->inode = cc->inode;
1576 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1577 dic->cluster_idx = cc->cluster_idx;
1578 dic->cluster_size = cc->cluster_size;
1579 dic->log_cluster_size = cc->log_cluster_size;
1580 dic->nr_cpages = cc->nr_cpages;
1581 refcount_set(&dic->refcnt, 1);
1582 dic->failed = false;
1583 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1585 for (i = 0; i < dic->cluster_size; i++)
1586 dic->rpages[i] = cc->rpages[i];
1587 dic->nr_rpages = cc->cluster_size;
1589 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1593 for (i = 0; i < dic->nr_cpages; i++) {
1596 page = f2fs_compress_alloc_page();
1600 f2fs_set_compressed_page(page, cc->inode,
1601 start_idx + i + 1, dic);
1602 dic->cpages[i] = page;
1609 return ERR_PTR(-ENOMEM);
1612 static void f2fs_free_dic(struct decompress_io_ctx *dic)
1617 for (i = 0; i < dic->cluster_size; i++) {
1620 if (!dic->tpages[i])
1622 f2fs_compress_free_page(dic->tpages[i]);
1624 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1628 for (i = 0; i < dic->nr_cpages; i++) {
1629 if (!dic->cpages[i])
1631 f2fs_compress_free_page(dic->cpages[i]);
1633 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1636 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1637 kmem_cache_free(dic_entry_slab, dic);
1640 static void f2fs_put_dic(struct decompress_io_ctx *dic)
1642 if (refcount_dec_and_test(&dic->refcnt))
1647 * Update and unlock the cluster's pagecache pages, and release the reference to
1648 * the decompress_io_ctx that was being held for I/O completion.
1650 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1654 for (i = 0; i < dic->cluster_size; i++) {
1655 struct page *rpage = dic->rpages[i];
1660 /* PG_error was set if verity failed. */
1661 if (failed || PageError(rpage)) {
1662 ClearPageUptodate(rpage);
1663 /* will re-read again later */
1664 ClearPageError(rpage);
1666 SetPageUptodate(rpage);
1674 static void f2fs_verify_cluster(struct work_struct *work)
1676 struct decompress_io_ctx *dic =
1677 container_of(work, struct decompress_io_ctx, verity_work);
1680 /* Verify the cluster's decompressed pages with fs-verity. */
1681 for (i = 0; i < dic->cluster_size; i++) {
1682 struct page *rpage = dic->rpages[i];
1684 if (rpage && !fsverity_verify_page(rpage))
1685 SetPageError(rpage);
1688 __f2fs_decompress_end_io(dic, false);
1692 * This is called when a compressed cluster has been decompressed
1693 * (or failed to be read and/or decompressed).
1695 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1697 if (!failed && dic->need_verity) {
1699 * Note that to avoid deadlocks, the verity work can't be done
1700 * on the decompression workqueue. This is because verifying
1701 * the data pages can involve reading metadata pages from the
1702 * file, and these metadata pages may be compressed.
1704 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1705 fsverity_enqueue_verify_work(&dic->verity_work);
1707 __f2fs_decompress_end_io(dic, failed);
1712 * Put a reference to a compressed page's decompress_io_ctx.
1714 * This is called when the page is no longer needed and can be freed.
1716 void f2fs_put_page_dic(struct page *page)
1718 struct decompress_io_ctx *dic =
1719 (struct decompress_io_ctx *)page_private(page);
1725 * check whether cluster blocks are contiguous, and add extent cache entry
1726 * only if cluster blocks are logically and physically contiguous.
1728 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1730 bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1731 int i = compressed ? 1 : 0;
1732 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1733 dn->ofs_in_node + i);
1735 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1736 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1737 dn->ofs_in_node + i);
1739 if (!__is_valid_data_blkaddr(blkaddr))
1741 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1745 return compressed ? i - 1 : i;
1748 const struct address_space_operations f2fs_compress_aops = {
1749 .releasepage = f2fs_release_page,
1750 .invalidatepage = f2fs_invalidate_page,
1753 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1755 return sbi->compress_inode->i_mapping;
1758 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1760 if (!sbi->compress_inode)
1762 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1765 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1766 nid_t ino, block_t blkaddr)
1771 if (!test_opt(sbi, COMPRESS_CACHE))
1774 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1777 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1780 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1782 f2fs_put_page(cpage, 0);
1786 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1790 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1793 f2fs_put_page(cpage, 0);
1797 set_page_private_data(cpage, ino);
1799 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1802 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1803 SetPageUptodate(cpage);
1805 f2fs_put_page(cpage, 1);
1808 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1812 bool hitted = false;
1814 if (!test_opt(sbi, COMPRESS_CACHE))
1817 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1818 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1820 if (PageUptodate(cpage)) {
1821 atomic_inc(&sbi->compress_page_hit);
1822 memcpy(page_address(page),
1823 page_address(cpage), PAGE_SIZE);
1826 f2fs_put_page(cpage, 1);
1832 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1834 struct address_space *mapping = sbi->compress_inode->i_mapping;
1835 struct pagevec pvec;
1837 pgoff_t end = MAX_BLKADDR(sbi);
1839 if (!mapping->nrpages)
1842 pagevec_init(&pvec);
1845 unsigned int nr_pages;
1848 nr_pages = pagevec_lookup_range(&pvec, mapping,
1853 for (i = 0; i < nr_pages; i++) {
1854 struct page *page = pvec.pages[i];
1856 if (page->index > end)
1860 if (page->mapping != mapping) {
1865 if (ino != get_page_private_data(page)) {
1870 generic_error_remove_page(mapping, page);
1873 pagevec_release(&pvec);
1875 } while (index < end);
1878 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1880 struct inode *inode;
1882 if (!test_opt(sbi, COMPRESS_CACHE))
1885 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1887 return PTR_ERR(inode);
1888 sbi->compress_inode = inode;
1890 sbi->compress_percent = COMPRESS_PERCENT;
1891 sbi->compress_watermark = COMPRESS_WATERMARK;
1893 atomic_set(&sbi->compress_page_hit, 0);
1898 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1900 if (!sbi->compress_inode)
1902 iput(sbi->compress_inode);
1903 sbi->compress_inode = NULL;
1906 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1908 dev_t dev = sbi->sb->s_bdev->bd_dev;
1911 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1913 sbi->page_array_slab_size = sizeof(struct page *) <<
1914 F2FS_OPTION(sbi).compress_log_size;
1916 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1917 sbi->page_array_slab_size);
1918 if (!sbi->page_array_slab)
1923 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1925 kmem_cache_destroy(sbi->page_array_slab);
1928 static int __init f2fs_init_cic_cache(void)
1930 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1931 sizeof(struct compress_io_ctx));
1932 if (!cic_entry_slab)
1937 static void f2fs_destroy_cic_cache(void)
1939 kmem_cache_destroy(cic_entry_slab);
1942 static int __init f2fs_init_dic_cache(void)
1944 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1945 sizeof(struct decompress_io_ctx));
1946 if (!dic_entry_slab)
1951 static void f2fs_destroy_dic_cache(void)
1953 kmem_cache_destroy(dic_entry_slab);
1956 int __init f2fs_init_compress_cache(void)
1960 err = f2fs_init_cic_cache();
1963 err = f2fs_init_dic_cache();
1968 f2fs_destroy_cic_cache();
1973 void f2fs_destroy_compress_cache(void)
1975 f2fs_destroy_dic_cache();
1976 f2fs_destroy_cic_cache();