1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
18 #include <trace/events/f2fs.h>
20 static struct kmem_cache *cic_entry_slab;
21 static struct kmem_cache *dic_entry_slab;
23 static void *page_array_alloc(struct inode *inode, int nr)
25 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 unsigned int size = sizeof(struct page *) * nr;
28 if (likely(size <= sbi->page_array_slab_size))
29 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 return f2fs_kzalloc(sbi, size, GFP_NOFS);
33 static void page_array_free(struct inode *inode, void *pages, int nr)
35 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 unsigned int size = sizeof(struct page *) * nr;
41 if (likely(size <= sbi->page_array_slab_size))
42 kmem_cache_free(sbi->page_array_slab, pages);
47 struct f2fs_compress_ops {
48 int (*init_compress_ctx)(struct compress_ctx *cc);
49 void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 int (*compress_pages)(struct compress_ctx *cc);
51 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
53 int (*decompress_pages)(struct decompress_io_ctx *dic);
56 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
58 return index & (cc->cluster_size - 1);
61 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
63 return index >> cc->log_cluster_size;
66 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
68 return cc->cluster_idx << cc->log_cluster_size;
71 bool f2fs_is_compressed_page(struct page *page)
73 if (!PagePrivate(page))
75 if (!page_private(page))
77 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
80 * page->private may be set with pid.
81 * pid_max is enough to check if it is traced.
83 if (IS_IO_TRACED_PAGE(page))
86 f2fs_bug_on(F2FS_M_SB(page->mapping),
87 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
91 static void f2fs_set_compressed_page(struct page *page,
92 struct inode *inode, pgoff_t index, void *data)
95 set_page_private(page, (unsigned long)data);
97 /* i_crypto_info and iv index */
99 page->mapping = inode->i_mapping;
102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
106 for (i = 0; i < len; i++) {
110 unlock_page(cc->rpages[i]);
112 put_page(cc->rpages[i]);
116 static void f2fs_put_rpages(struct compress_ctx *cc)
118 f2fs_drop_rpages(cc, cc->cluster_size, false);
121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
123 f2fs_drop_rpages(cc, len, true);
126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 struct writeback_control *wbc, bool redirty, int unlock)
131 for (i = 0; i < cc->cluster_size; i++) {
135 redirty_page_for_writepage(wbc, cc->rpages[i]);
136 f2fs_put_page(cc->rpages[i], unlock);
140 struct page *f2fs_compress_control_page(struct page *page)
142 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
150 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 return cc->rpages ? 0 : -ENOMEM;
154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
156 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
161 cc->cluster_idx = NULL_CLUSTER;
164 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
166 unsigned int cluster_ofs;
168 if (!f2fs_cluster_can_merge_page(cc, page->index))
169 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
171 cluster_ofs = offset_in_cluster(cc, page->index);
172 cc->rpages[cluster_ofs] = page;
174 cc->cluster_idx = cluster_idx(cc, page->index);
177 #ifdef CONFIG_F2FS_FS_LZO
178 static int lzo_init_compress_ctx(struct compress_ctx *cc)
180 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
181 LZO1X_MEM_COMPRESS, GFP_NOFS);
185 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
189 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
195 static int lzo_compress_pages(struct compress_ctx *cc)
199 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
200 &cc->clen, cc->private);
201 if (ret != LZO_E_OK) {
202 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
203 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
209 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
213 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
214 dic->rbuf, &dic->rlen);
215 if (ret != LZO_E_OK) {
216 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
217 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
221 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
222 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
223 "expected:%lu\n", KERN_ERR,
224 F2FS_I_SB(dic->inode)->sb->s_id,
226 PAGE_SIZE << dic->log_cluster_size);
232 static const struct f2fs_compress_ops f2fs_lzo_ops = {
233 .init_compress_ctx = lzo_init_compress_ctx,
234 .destroy_compress_ctx = lzo_destroy_compress_ctx,
235 .compress_pages = lzo_compress_pages,
236 .decompress_pages = lzo_decompress_pages,
240 #ifdef CONFIG_F2FS_FS_LZ4
241 static int lz4_init_compress_ctx(struct compress_ctx *cc)
243 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
244 LZ4_MEM_COMPRESS, GFP_NOFS);
249 * we do not change cc->clen to LZ4_compressBound(inputsize) to
250 * adapt worst compress case, because lz4 compressor can handle
251 * output budget properly.
253 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
257 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
263 static int lz4_compress_pages(struct compress_ctx *cc)
267 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
268 cc->clen, cc->private);
276 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
280 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
281 dic->clen, dic->rlen);
283 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
284 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
288 if (ret != PAGE_SIZE << dic->log_cluster_size) {
289 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
290 "expected:%lu\n", KERN_ERR,
291 F2FS_I_SB(dic->inode)->sb->s_id, ret,
292 PAGE_SIZE << dic->log_cluster_size);
298 static const struct f2fs_compress_ops f2fs_lz4_ops = {
299 .init_compress_ctx = lz4_init_compress_ctx,
300 .destroy_compress_ctx = lz4_destroy_compress_ctx,
301 .compress_pages = lz4_compress_pages,
302 .decompress_pages = lz4_decompress_pages,
306 #ifdef CONFIG_F2FS_FS_ZSTD
307 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
309 static int zstd_init_compress_ctx(struct compress_ctx *cc)
311 ZSTD_parameters params;
312 ZSTD_CStream *stream;
314 unsigned int workspace_size;
316 params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
317 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
319 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
320 workspace_size, GFP_NOFS);
324 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
326 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
327 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
333 cc->private = workspace;
334 cc->private2 = stream;
336 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
340 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
347 static int zstd_compress_pages(struct compress_ctx *cc)
349 ZSTD_CStream *stream = cc->private2;
351 ZSTD_outBuffer outbuf;
352 int src_size = cc->rlen;
353 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
357 inbuf.src = cc->rbuf;
358 inbuf.size = src_size;
361 outbuf.dst = cc->cbuf->cdata;
362 outbuf.size = dst_size;
364 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
365 if (ZSTD_isError(ret)) {
366 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
367 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
368 __func__, ZSTD_getErrorCode(ret));
372 ret = ZSTD_endStream(stream, &outbuf);
373 if (ZSTD_isError(ret)) {
374 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
375 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
376 __func__, ZSTD_getErrorCode(ret));
381 * there is compressed data remained in intermediate buffer due to
382 * no more space in cbuf.cdata
387 cc->clen = outbuf.pos;
391 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
393 ZSTD_DStream *stream;
395 unsigned int workspace_size;
396 unsigned int max_window_size =
397 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
399 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
401 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
402 workspace_size, GFP_NOFS);
406 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
408 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
409 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
415 dic->private = workspace;
416 dic->private2 = stream;
421 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
423 kvfree(dic->private);
425 dic->private2 = NULL;
428 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
430 ZSTD_DStream *stream = dic->private2;
432 ZSTD_outBuffer outbuf;
436 inbuf.src = dic->cbuf->cdata;
437 inbuf.size = dic->clen;
440 outbuf.dst = dic->rbuf;
441 outbuf.size = dic->rlen;
443 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
444 if (ZSTD_isError(ret)) {
445 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
446 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
447 __func__, ZSTD_getErrorCode(ret));
451 if (dic->rlen != outbuf.pos) {
452 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
453 "expected:%lu\n", KERN_ERR,
454 F2FS_I_SB(dic->inode)->sb->s_id,
456 PAGE_SIZE << dic->log_cluster_size);
463 static const struct f2fs_compress_ops f2fs_zstd_ops = {
464 .init_compress_ctx = zstd_init_compress_ctx,
465 .destroy_compress_ctx = zstd_destroy_compress_ctx,
466 .compress_pages = zstd_compress_pages,
467 .init_decompress_ctx = zstd_init_decompress_ctx,
468 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
469 .decompress_pages = zstd_decompress_pages,
473 #ifdef CONFIG_F2FS_FS_LZO
474 #ifdef CONFIG_F2FS_FS_LZORLE
475 static int lzorle_compress_pages(struct compress_ctx *cc)
479 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
480 &cc->clen, cc->private);
481 if (ret != LZO_E_OK) {
482 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
483 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
489 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
490 .init_compress_ctx = lzo_init_compress_ctx,
491 .destroy_compress_ctx = lzo_destroy_compress_ctx,
492 .compress_pages = lzorle_compress_pages,
493 .decompress_pages = lzo_decompress_pages,
498 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
499 #ifdef CONFIG_F2FS_FS_LZO
504 #ifdef CONFIG_F2FS_FS_LZ4
509 #ifdef CONFIG_F2FS_FS_ZSTD
514 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
521 bool f2fs_is_compress_backend_ready(struct inode *inode)
523 if (!f2fs_compressed_file(inode))
525 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
528 static mempool_t *compress_page_pool;
529 static int num_compress_pages = 512;
530 module_param(num_compress_pages, uint, 0444);
531 MODULE_PARM_DESC(num_compress_pages,
532 "Number of intermediate compress pages to preallocate");
534 int f2fs_init_compress_mempool(void)
536 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
537 if (!compress_page_pool)
543 void f2fs_destroy_compress_mempool(void)
545 mempool_destroy(compress_page_pool);
548 static struct page *f2fs_compress_alloc_page(void)
552 page = mempool_alloc(compress_page_pool, GFP_NOFS);
558 static void f2fs_compress_free_page(struct page *page)
562 set_page_private(page, (unsigned long)NULL);
563 ClearPagePrivate(page);
564 page->mapping = NULL;
566 mempool_free(page, compress_page_pool);
569 #define MAX_VMAP_RETRIES 3
571 static void *f2fs_vmap(struct page **pages, unsigned int count)
576 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
577 buf = vm_map_ram(pages, count, -1);
585 static int f2fs_compress_pages(struct compress_ctx *cc)
587 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
588 const struct f2fs_compress_ops *cops =
589 f2fs_cops[fi->i_compress_algorithm];
590 unsigned int max_len, new_nr_cpages;
591 struct page **new_cpages;
595 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
596 cc->cluster_size, fi->i_compress_algorithm);
598 if (cops->init_compress_ctx) {
599 ret = cops->init_compress_ctx(cc);
604 max_len = COMPRESS_HEADER_SIZE + cc->clen;
605 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
607 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
610 goto destroy_compress_ctx;
613 for (i = 0; i < cc->nr_cpages; i++) {
614 cc->cpages[i] = f2fs_compress_alloc_page();
615 if (!cc->cpages[i]) {
617 goto out_free_cpages;
621 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
624 goto out_free_cpages;
627 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
630 goto out_vunmap_rbuf;
633 ret = cops->compress_pages(cc);
635 goto out_vunmap_cbuf;
637 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
639 if (cc->clen > max_len) {
641 goto out_vunmap_cbuf;
644 cc->cbuf->clen = cpu_to_le32(cc->clen);
646 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
647 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
648 cc->cbuf->cdata, cc->clen);
649 cc->cbuf->chksum = cpu_to_le32(chksum);
651 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
652 cc->cbuf->reserved[i] = cpu_to_le32(0);
654 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
656 /* Now we're going to cut unnecessary tail pages */
657 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
660 goto out_vunmap_cbuf;
663 /* zero out any unused part of the last page */
664 memset(&cc->cbuf->cdata[cc->clen], 0,
665 (new_nr_cpages * PAGE_SIZE) -
666 (cc->clen + COMPRESS_HEADER_SIZE));
668 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
669 vm_unmap_ram(cc->rbuf, cc->cluster_size);
671 for (i = 0; i < cc->nr_cpages; i++) {
672 if (i < new_nr_cpages) {
673 new_cpages[i] = cc->cpages[i];
676 f2fs_compress_free_page(cc->cpages[i]);
677 cc->cpages[i] = NULL;
680 if (cops->destroy_compress_ctx)
681 cops->destroy_compress_ctx(cc);
683 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
684 cc->cpages = new_cpages;
685 cc->nr_cpages = new_nr_cpages;
687 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
692 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
694 vm_unmap_ram(cc->rbuf, cc->cluster_size);
696 for (i = 0; i < cc->nr_cpages; i++) {
698 f2fs_compress_free_page(cc->cpages[i]);
700 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
702 destroy_compress_ctx:
703 if (cops->destroy_compress_ctx)
704 cops->destroy_compress_ctx(cc);
706 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
711 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
713 struct decompress_io_ctx *dic =
714 (struct decompress_io_ctx *)page_private(page);
715 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
716 struct f2fs_inode_info *fi= F2FS_I(dic->inode);
717 const struct f2fs_compress_ops *cops =
718 f2fs_cops[fi->i_compress_algorithm];
722 dec_page_count(sbi, F2FS_RD_DATA);
724 if (bio->bi_status || PageError(page))
727 if (atomic_dec_return(&dic->pending_pages))
730 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
731 dic->cluster_size, fi->i_compress_algorithm);
733 /* submit partial compressed pages */
739 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
745 for (i = 0; i < dic->cluster_size; i++) {
746 if (dic->rpages[i]) {
747 dic->tpages[i] = dic->rpages[i];
751 dic->tpages[i] = f2fs_compress_alloc_page();
752 if (!dic->tpages[i]) {
758 if (cops->init_decompress_ctx) {
759 ret = cops->init_decompress_ctx(dic);
764 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
767 goto destroy_decompress_ctx;
770 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
773 goto out_vunmap_rbuf;
776 dic->clen = le32_to_cpu(dic->cbuf->clen);
777 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
779 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
781 goto out_vunmap_cbuf;
784 ret = cops->decompress_pages(dic);
786 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
787 u32 provided = le32_to_cpu(dic->cbuf->chksum);
788 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
790 if (provided != calculated) {
791 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
792 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
794 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
795 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
796 provided, calculated);
798 set_sbi_flag(sbi, SBI_NEED_FSCK);
803 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
805 vm_unmap_ram(dic->rbuf, dic->cluster_size);
806 destroy_decompress_ctx:
807 if (cops->destroy_decompress_ctx)
808 cops->destroy_decompress_ctx(dic);
811 f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
814 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
820 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
822 if (cc->cluster_idx == NULL_CLUSTER)
824 return cc->cluster_idx == cluster_idx(cc, index);
827 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
829 return cc->nr_rpages == 0;
832 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
834 return cc->cluster_size == cc->nr_rpages;
837 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
839 if (f2fs_cluster_is_empty(cc))
841 return is_page_in_cluster(cc, index);
844 static bool __cluster_may_compress(struct compress_ctx *cc)
846 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
847 loff_t i_size = i_size_read(cc->inode);
848 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
851 for (i = 0; i < cc->cluster_size; i++) {
852 struct page *page = cc->rpages[i];
854 f2fs_bug_on(sbi, !page);
856 if (unlikely(f2fs_cp_error(sbi)))
858 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
862 if (page->index >= nr_pages)
868 static int __f2fs_cluster_blocks(struct inode *inode,
869 unsigned int cluster_idx, bool compr)
871 struct dnode_of_data dn;
872 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
873 unsigned int start_idx = cluster_idx <<
874 F2FS_I(inode)->i_log_cluster_size;
877 set_new_dnode(&dn, inode, NULL, NULL, 0);
878 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
885 if (dn.data_blkaddr == COMPRESS_ADDR) {
889 for (i = 1; i < cluster_size; i++) {
892 blkaddr = data_blkaddr(dn.inode,
893 dn.node_page, dn.ofs_in_node + i);
895 if (__is_valid_data_blkaddr(blkaddr))
898 if (blkaddr != NULL_ADDR)
903 f2fs_bug_on(F2FS_I_SB(inode),
904 !compr && ret != cluster_size &&
905 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
912 /* return # of compressed blocks in compressed cluster */
913 static int f2fs_compressed_blocks(struct compress_ctx *cc)
915 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
918 /* return # of valid blocks in compressed cluster */
919 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
921 return __f2fs_cluster_blocks(inode,
922 index >> F2FS_I(inode)->i_log_cluster_size,
926 static bool cluster_may_compress(struct compress_ctx *cc)
928 if (!f2fs_need_compress_data(cc->inode))
930 if (f2fs_is_atomic_file(cc->inode))
932 if (f2fs_is_mmap_file(cc->inode))
934 if (!f2fs_cluster_is_full(cc))
936 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
938 return __cluster_may_compress(cc);
941 static void set_cluster_writeback(struct compress_ctx *cc)
945 for (i = 0; i < cc->cluster_size; i++) {
947 set_page_writeback(cc->rpages[i]);
951 static void set_cluster_dirty(struct compress_ctx *cc)
955 for (i = 0; i < cc->cluster_size; i++)
957 set_page_dirty(cc->rpages[i]);
960 static int prepare_compress_overwrite(struct compress_ctx *cc,
961 struct page **pagep, pgoff_t index, void **fsdata)
963 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
964 struct address_space *mapping = cc->inode->i_mapping;
966 sector_t last_block_in_bio;
967 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
968 pgoff_t start_idx = start_idx_of_cluster(cc);
972 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
976 ret = f2fs_init_compress_ctx(cc);
980 /* keep page reference to avoid page reclaim */
981 for (i = 0; i < cc->cluster_size; i++) {
982 page = f2fs_pagecache_get_page(mapping, start_idx + i,
989 if (PageUptodate(page))
990 f2fs_put_page(page, 1);
992 f2fs_compress_ctx_add_page(cc, page);
995 if (!f2fs_cluster_is_empty(cc)) {
996 struct bio *bio = NULL;
998 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
999 &last_block_in_bio, false, true);
1000 f2fs_put_rpages(cc);
1001 f2fs_destroy_compress_ctx(cc, true);
1005 f2fs_submit_bio(sbi, bio, DATA);
1007 ret = f2fs_init_compress_ctx(cc);
1012 for (i = 0; i < cc->cluster_size; i++) {
1013 f2fs_bug_on(sbi, cc->rpages[i]);
1015 page = find_lock_page(mapping, start_idx + i);
1017 /* page can be truncated */
1018 goto release_and_retry;
1021 f2fs_wait_on_page_writeback(page, DATA, true, true);
1022 f2fs_compress_ctx_add_page(cc, page);
1024 if (!PageUptodate(page)) {
1026 f2fs_put_rpages(cc);
1027 f2fs_unlock_rpages(cc, i + 1);
1028 f2fs_destroy_compress_ctx(cc, true);
1034 *fsdata = cc->rpages;
1035 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1036 return cc->cluster_size;
1040 f2fs_put_rpages(cc);
1041 f2fs_unlock_rpages(cc, i);
1042 f2fs_destroy_compress_ctx(cc, true);
1047 int f2fs_prepare_compress_overwrite(struct inode *inode,
1048 struct page **pagep, pgoff_t index, void **fsdata)
1050 struct compress_ctx cc = {
1052 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1053 .cluster_size = F2FS_I(inode)->i_cluster_size,
1054 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1059 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1062 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1063 pgoff_t index, unsigned copied)
1066 struct compress_ctx cc = {
1068 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1069 .cluster_size = F2FS_I(inode)->i_cluster_size,
1072 bool first_index = (index == cc.rpages[0]->index);
1075 set_cluster_dirty(&cc);
1077 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1078 f2fs_destroy_compress_ctx(&cc, false);
1083 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1085 void *fsdata = NULL;
1087 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1088 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1092 err = f2fs_is_compressed_cluster(inode, start_idx);
1096 /* truncate normal cluster */
1098 return f2fs_do_truncate_blocks(inode, from, lock);
1100 /* truncate compressed cluster */
1101 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1102 start_idx, &fsdata);
1104 /* should not be a normal cluster */
1105 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1111 struct page **rpages = fsdata;
1112 int cluster_size = F2FS_I(inode)->i_cluster_size;
1115 for (i = cluster_size - 1; i >= 0; i--) {
1116 loff_t start = rpages[i]->index << PAGE_SHIFT;
1118 if (from <= start) {
1119 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1121 zero_user_segment(rpages[i], from - start,
1127 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1132 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1134 struct writeback_control *wbc,
1135 enum iostat_type io_type)
1137 struct inode *inode = cc->inode;
1138 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1139 struct f2fs_inode_info *fi = F2FS_I(inode);
1140 struct f2fs_io_info fio = {
1142 .ino = cc->inode->i_ino,
1145 .op_flags = wbc_to_write_flags(wbc),
1146 .old_blkaddr = NEW_ADDR,
1148 .encrypted_page = NULL,
1149 .compressed_page = NULL,
1153 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1155 struct dnode_of_data dn;
1156 struct node_info ni;
1157 struct compress_io_ctx *cic;
1158 pgoff_t start_idx = start_idx_of_cluster(cc);
1159 unsigned int last_index = cc->cluster_size - 1;
1163 /* we should bypass data pages to proceed the kworker jobs */
1164 if (unlikely(f2fs_cp_error(sbi))) {
1165 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1169 if (IS_NOQUOTA(inode)) {
1171 * We need to wait for node_write to avoid block allocation during
1172 * checkpoint. This can only happen to quota writes which can cause
1173 * the below discard race condition.
1175 down_read(&sbi->node_write);
1176 } else if (!f2fs_trylock_op(sbi)) {
1180 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1182 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1186 for (i = 0; i < cc->cluster_size; i++) {
1187 if (data_blkaddr(dn.inode, dn.node_page,
1188 dn.ofs_in_node + i) == NULL_ADDR)
1192 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1194 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1198 fio.version = ni.version;
1200 cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1204 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1206 atomic_set(&cic->pending_pages, cc->nr_cpages);
1207 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1211 cic->nr_rpages = cc->cluster_size;
1213 for (i = 0; i < cc->nr_cpages; i++) {
1214 f2fs_set_compressed_page(cc->cpages[i], inode,
1215 cc->rpages[i + 1]->index, cic);
1216 fio.compressed_page = cc->cpages[i];
1218 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1219 dn.ofs_in_node + i + 1);
1221 /* wait for GCed page writeback via META_MAPPING */
1222 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1224 if (fio.encrypted) {
1225 fio.page = cc->rpages[i + 1];
1226 err = f2fs_encrypt_one_page(&fio);
1228 goto out_destroy_crypt;
1229 cc->cpages[i] = fio.encrypted_page;
1233 set_cluster_writeback(cc);
1235 for (i = 0; i < cc->cluster_size; i++)
1236 cic->rpages[i] = cc->rpages[i];
1238 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1241 blkaddr = f2fs_data_blkaddr(&dn);
1242 fio.page = cc->rpages[i];
1243 fio.old_blkaddr = blkaddr;
1245 /* cluster header */
1247 if (blkaddr == COMPRESS_ADDR)
1249 if (__is_valid_data_blkaddr(blkaddr))
1250 f2fs_invalidate_blocks(sbi, blkaddr);
1251 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1252 goto unlock_continue;
1255 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1258 if (i > cc->nr_cpages) {
1259 if (__is_valid_data_blkaddr(blkaddr)) {
1260 f2fs_invalidate_blocks(sbi, blkaddr);
1261 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1263 goto unlock_continue;
1266 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1269 fio.encrypted_page = cc->cpages[i - 1];
1271 fio.compressed_page = cc->cpages[i - 1];
1273 cc->cpages[i - 1] = NULL;
1274 f2fs_outplace_write_data(&dn, &fio);
1277 inode_dec_dirty_pages(cc->inode);
1278 unlock_page(fio.page);
1281 if (fio.compr_blocks)
1282 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1283 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1285 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1286 if (cc->cluster_idx == 0)
1287 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1289 f2fs_put_dnode(&dn);
1290 if (IS_NOQUOTA(inode))
1291 up_read(&sbi->node_write);
1293 f2fs_unlock_op(sbi);
1295 spin_lock(&fi->i_size_lock);
1296 if (fi->last_disk_size < psize)
1297 fi->last_disk_size = psize;
1298 spin_unlock(&fi->i_size_lock);
1300 f2fs_put_rpages(cc);
1301 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1303 f2fs_destroy_compress_ctx(cc, false);
1307 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1309 for (--i; i >= 0; i--)
1310 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1312 kmem_cache_free(cic_entry_slab, cic);
1314 f2fs_put_dnode(&dn);
1316 if (IS_NOQUOTA(inode))
1317 up_read(&sbi->node_write);
1319 f2fs_unlock_op(sbi);
1321 for (i = 0; i < cc->nr_cpages; i++) {
1324 f2fs_compress_free_page(cc->cpages[i]);
1325 cc->cpages[i] = NULL;
1327 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1332 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1334 struct f2fs_sb_info *sbi = bio->bi_private;
1335 struct compress_io_ctx *cic =
1336 (struct compress_io_ctx *)page_private(page);
1339 if (unlikely(bio->bi_status))
1340 mapping_set_error(cic->inode->i_mapping, -EIO);
1342 f2fs_compress_free_page(page);
1344 dec_page_count(sbi, F2FS_WB_DATA);
1346 if (atomic_dec_return(&cic->pending_pages))
1349 for (i = 0; i < cic->nr_rpages; i++) {
1350 WARN_ON(!cic->rpages[i]);
1351 clear_cold_data(cic->rpages[i]);
1352 end_page_writeback(cic->rpages[i]);
1355 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1356 kmem_cache_free(cic_entry_slab, cic);
1359 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1361 struct writeback_control *wbc,
1362 enum iostat_type io_type)
1364 struct address_space *mapping = cc->inode->i_mapping;
1365 int _submitted, compr_blocks, ret, i;
1367 compr_blocks = f2fs_compressed_blocks(cc);
1369 for (i = 0; i < cc->cluster_size; i++) {
1373 redirty_page_for_writepage(wbc, cc->rpages[i]);
1374 unlock_page(cc->rpages[i]);
1377 if (compr_blocks < 0)
1378 return compr_blocks;
1380 for (i = 0; i < cc->cluster_size; i++) {
1384 lock_page(cc->rpages[i]);
1386 if (cc->rpages[i]->mapping != mapping) {
1388 unlock_page(cc->rpages[i]);
1392 if (!PageDirty(cc->rpages[i]))
1393 goto continue_unlock;
1395 if (PageWriteback(cc->rpages[i])) {
1396 if (wbc->sync_mode == WB_SYNC_NONE)
1397 goto continue_unlock;
1398 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1401 if (!clear_page_dirty_for_io(cc->rpages[i]))
1402 goto continue_unlock;
1404 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1405 NULL, NULL, wbc, io_type,
1406 compr_blocks, false);
1408 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1409 unlock_page(cc->rpages[i]);
1411 } else if (ret == -EAGAIN) {
1413 * for quota file, just redirty left pages to
1414 * avoid deadlock caused by cluster update race
1415 * from foreground operation.
1417 if (IS_NOQUOTA(cc->inode))
1421 congestion_wait(BLK_RW_ASYNC,
1422 DEFAULT_IO_TIMEOUT);
1428 *submitted += _submitted;
1431 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1436 int f2fs_write_multi_pages(struct compress_ctx *cc,
1438 struct writeback_control *wbc,
1439 enum iostat_type io_type)
1444 if (cluster_may_compress(cc)) {
1445 err = f2fs_compress_pages(cc);
1446 if (err == -EAGAIN) {
1449 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1453 err = f2fs_write_compressed_pages(cc, submitted,
1457 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1460 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1462 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1463 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1465 f2fs_destroy_compress_ctx(cc, false);
1469 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1471 struct decompress_io_ctx *dic;
1472 pgoff_t start_idx = start_idx_of_cluster(cc);
1475 dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1477 return ERR_PTR(-ENOMEM);
1479 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1481 kmem_cache_free(dic_entry_slab, dic);
1482 return ERR_PTR(-ENOMEM);
1485 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1486 dic->inode = cc->inode;
1487 atomic_set(&dic->pending_pages, cc->nr_cpages);
1488 dic->cluster_idx = cc->cluster_idx;
1489 dic->cluster_size = cc->cluster_size;
1490 dic->log_cluster_size = cc->log_cluster_size;
1491 dic->nr_cpages = cc->nr_cpages;
1492 dic->failed = false;
1494 for (i = 0; i < dic->cluster_size; i++)
1495 dic->rpages[i] = cc->rpages[i];
1496 dic->nr_rpages = cc->cluster_size;
1498 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1502 for (i = 0; i < dic->nr_cpages; i++) {
1505 page = f2fs_compress_alloc_page();
1509 f2fs_set_compressed_page(page, cc->inode,
1510 start_idx + i + 1, dic);
1511 dic->cpages[i] = page;
1518 return ERR_PTR(-ENOMEM);
1521 void f2fs_free_dic(struct decompress_io_ctx *dic)
1526 for (i = 0; i < dic->cluster_size; i++) {
1529 if (!dic->tpages[i])
1531 f2fs_compress_free_page(dic->tpages[i]);
1533 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1537 for (i = 0; i < dic->nr_cpages; i++) {
1538 if (!dic->cpages[i])
1540 f2fs_compress_free_page(dic->cpages[i]);
1542 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1545 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1546 kmem_cache_free(dic_entry_slab, dic);
1549 void f2fs_decompress_end_io(struct page **rpages,
1550 unsigned int cluster_size, bool err, bool verity)
1554 for (i = 0; i < cluster_size; i++) {
1555 struct page *rpage = rpages[i];
1560 if (err || PageError(rpage))
1561 goto clear_uptodate;
1563 if (!verity || fsverity_verify_page(rpage)) {
1564 SetPageUptodate(rpage);
1568 ClearPageUptodate(rpage);
1569 ClearPageError(rpage);
1575 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1577 dev_t dev = sbi->sb->s_bdev->bd_dev;
1580 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1582 sbi->page_array_slab_size = sizeof(struct page *) <<
1583 F2FS_OPTION(sbi).compress_log_size;
1585 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1586 sbi->page_array_slab_size);
1587 if (!sbi->page_array_slab)
1592 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1594 kmem_cache_destroy(sbi->page_array_slab);
1597 static int __init f2fs_init_cic_cache(void)
1599 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1600 sizeof(struct compress_io_ctx));
1601 if (!cic_entry_slab)
1606 static void f2fs_destroy_cic_cache(void)
1608 kmem_cache_destroy(cic_entry_slab);
1611 static int __init f2fs_init_dic_cache(void)
1613 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1614 sizeof(struct decompress_io_ctx));
1615 if (!dic_entry_slab)
1620 static void f2fs_destroy_dic_cache(void)
1622 kmem_cache_destroy(dic_entry_slab);
1625 int __init f2fs_init_compress_cache(void)
1629 err = f2fs_init_cic_cache();
1632 err = f2fs_init_dic_cache();
1637 f2fs_destroy_cic_cache();
1642 void f2fs_destroy_compress_cache(void)
1644 f2fs_destroy_dic_cache();
1645 f2fs_destroy_cic_cache();