GNU Linux-libre 5.10.219-gnu1
[releases.git] / fs / f2fs / compress.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19
20 static struct kmem_cache *cic_entry_slab;
21 static struct kmem_cache *dic_entry_slab;
22
23 static void *page_array_alloc(struct inode *inode, int nr)
24 {
25         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26         unsigned int size = sizeof(struct page *) * nr;
27
28         if (likely(size <= sbi->page_array_slab_size))
29                 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30         return f2fs_kzalloc(sbi, size, GFP_NOFS);
31 }
32
33 static void page_array_free(struct inode *inode, void *pages, int nr)
34 {
35         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36         unsigned int size = sizeof(struct page *) * nr;
37
38         if (!pages)
39                 return;
40
41         if (likely(size <= sbi->page_array_slab_size))
42                 kmem_cache_free(sbi->page_array_slab, pages);
43         else
44                 kfree(pages);
45 }
46
47 struct f2fs_compress_ops {
48         int (*init_compress_ctx)(struct compress_ctx *cc);
49         void (*destroy_compress_ctx)(struct compress_ctx *cc);
50         int (*compress_pages)(struct compress_ctx *cc);
51         int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52         void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
53         int (*decompress_pages)(struct decompress_io_ctx *dic);
54 };
55
56 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
57 {
58         return index & (cc->cluster_size - 1);
59 }
60
61 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
62 {
63         return index >> cc->log_cluster_size;
64 }
65
66 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
67 {
68         return cc->cluster_idx << cc->log_cluster_size;
69 }
70
71 bool f2fs_is_compressed_page(struct page *page)
72 {
73         if (!PagePrivate(page))
74                 return false;
75         if (!page_private(page))
76                 return false;
77         if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
78                 return false;
79         /*
80          * page->private may be set with pid.
81          * pid_max is enough to check if it is traced.
82          */
83         if (IS_IO_TRACED_PAGE(page))
84                 return false;
85
86         f2fs_bug_on(F2FS_M_SB(page->mapping),
87                 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
88         return true;
89 }
90
91 static void f2fs_set_compressed_page(struct page *page,
92                 struct inode *inode, pgoff_t index, void *data)
93 {
94         SetPagePrivate(page);
95         set_page_private(page, (unsigned long)data);
96
97         /* i_crypto_info and iv index */
98         page->index = index;
99         page->mapping = inode->i_mapping;
100 }
101
102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104         int i;
105
106         for (i = 0; i < len; i++) {
107                 if (!cc->rpages[i])
108                         continue;
109                 if (unlock)
110                         unlock_page(cc->rpages[i]);
111                 else
112                         put_page(cc->rpages[i]);
113         }
114 }
115
116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118         f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120
121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123         f2fs_drop_rpages(cc, len, true);
124 }
125
126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127                 struct writeback_control *wbc, bool redirty, int unlock)
128 {
129         unsigned int i;
130
131         for (i = 0; i < cc->cluster_size; i++) {
132                 if (!cc->rpages[i])
133                         continue;
134                 if (redirty)
135                         redirty_page_for_writepage(wbc, cc->rpages[i]);
136                 f2fs_put_page(cc->rpages[i], unlock);
137         }
138 }
139
140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142         return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144
145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147         if (cc->rpages)
148                 return 0;
149
150         cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151         return cc->rpages ? 0 : -ENOMEM;
152 }
153
154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157         cc->rpages = NULL;
158         cc->nr_rpages = 0;
159         cc->nr_cpages = 0;
160         if (!reuse)
161                 cc->cluster_idx = NULL_CLUSTER;
162 }
163
164 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
165 {
166         unsigned int cluster_ofs;
167
168         if (!f2fs_cluster_can_merge_page(cc, page->index))
169                 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
170
171         cluster_ofs = offset_in_cluster(cc, page->index);
172         cc->rpages[cluster_ofs] = page;
173         cc->nr_rpages++;
174         cc->cluster_idx = cluster_idx(cc, page->index);
175 }
176
177 #ifdef CONFIG_F2FS_FS_LZO
178 static int lzo_init_compress_ctx(struct compress_ctx *cc)
179 {
180         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
181                                 LZO1X_MEM_COMPRESS, GFP_NOFS);
182         if (!cc->private)
183                 return -ENOMEM;
184
185         cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
186         return 0;
187 }
188
189 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
190 {
191         kvfree(cc->private);
192         cc->private = NULL;
193 }
194
195 static int lzo_compress_pages(struct compress_ctx *cc)
196 {
197         int ret;
198
199         ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
200                                         &cc->clen, cc->private);
201         if (ret != LZO_E_OK) {
202                 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
203                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
204                 return -EIO;
205         }
206         return 0;
207 }
208
209 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
210 {
211         int ret;
212
213         ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
214                                                 dic->rbuf, &dic->rlen);
215         if (ret != LZO_E_OK) {
216                 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
217                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
218                 return -EIO;
219         }
220
221         if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
222                 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
223                                         "expected:%lu\n", KERN_ERR,
224                                         F2FS_I_SB(dic->inode)->sb->s_id,
225                                         dic->rlen,
226                                         PAGE_SIZE << dic->log_cluster_size);
227                 return -EIO;
228         }
229         return 0;
230 }
231
232 static const struct f2fs_compress_ops f2fs_lzo_ops = {
233         .init_compress_ctx      = lzo_init_compress_ctx,
234         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
235         .compress_pages         = lzo_compress_pages,
236         .decompress_pages       = lzo_decompress_pages,
237 };
238 #endif
239
240 #ifdef CONFIG_F2FS_FS_LZ4
241 static int lz4_init_compress_ctx(struct compress_ctx *cc)
242 {
243         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
244                                 LZ4_MEM_COMPRESS, GFP_NOFS);
245         if (!cc->private)
246                 return -ENOMEM;
247
248         /*
249          * we do not change cc->clen to LZ4_compressBound(inputsize) to
250          * adapt worst compress case, because lz4 compressor can handle
251          * output budget properly.
252          */
253         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
254         return 0;
255 }
256
257 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
258 {
259         kvfree(cc->private);
260         cc->private = NULL;
261 }
262
263 static int lz4_compress_pages(struct compress_ctx *cc)
264 {
265         int len;
266
267         len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
268                                                 cc->clen, cc->private);
269         if (!len)
270                 return -EAGAIN;
271
272         cc->clen = len;
273         return 0;
274 }
275
276 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
277 {
278         int ret;
279
280         ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
281                                                 dic->clen, dic->rlen);
282         if (ret < 0) {
283                 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
284                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
285                 return -EIO;
286         }
287
288         if (ret != PAGE_SIZE << dic->log_cluster_size) {
289                 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
290                                         "expected:%lu\n", KERN_ERR,
291                                         F2FS_I_SB(dic->inode)->sb->s_id, ret,
292                                         PAGE_SIZE << dic->log_cluster_size);
293                 return -EIO;
294         }
295         return 0;
296 }
297
298 static const struct f2fs_compress_ops f2fs_lz4_ops = {
299         .init_compress_ctx      = lz4_init_compress_ctx,
300         .destroy_compress_ctx   = lz4_destroy_compress_ctx,
301         .compress_pages         = lz4_compress_pages,
302         .decompress_pages       = lz4_decompress_pages,
303 };
304 #endif
305
306 #ifdef CONFIG_F2FS_FS_ZSTD
307 #define F2FS_ZSTD_DEFAULT_CLEVEL        1
308
309 static int zstd_init_compress_ctx(struct compress_ctx *cc)
310 {
311         ZSTD_parameters params;
312         ZSTD_CStream *stream;
313         void *workspace;
314         unsigned int workspace_size;
315
316         params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
317         workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
318
319         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
320                                         workspace_size, GFP_NOFS);
321         if (!workspace)
322                 return -ENOMEM;
323
324         stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
325         if (!stream) {
326                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
327                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
328                                 __func__);
329                 kvfree(workspace);
330                 return -EIO;
331         }
332
333         cc->private = workspace;
334         cc->private2 = stream;
335
336         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
337         return 0;
338 }
339
340 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
341 {
342         kvfree(cc->private);
343         cc->private = NULL;
344         cc->private2 = NULL;
345 }
346
347 static int zstd_compress_pages(struct compress_ctx *cc)
348 {
349         ZSTD_CStream *stream = cc->private2;
350         ZSTD_inBuffer inbuf;
351         ZSTD_outBuffer outbuf;
352         int src_size = cc->rlen;
353         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
354         int ret;
355
356         inbuf.pos = 0;
357         inbuf.src = cc->rbuf;
358         inbuf.size = src_size;
359
360         outbuf.pos = 0;
361         outbuf.dst = cc->cbuf->cdata;
362         outbuf.size = dst_size;
363
364         ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
365         if (ZSTD_isError(ret)) {
366                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
367                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
368                                 __func__, ZSTD_getErrorCode(ret));
369                 return -EIO;
370         }
371
372         ret = ZSTD_endStream(stream, &outbuf);
373         if (ZSTD_isError(ret)) {
374                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
375                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
376                                 __func__, ZSTD_getErrorCode(ret));
377                 return -EIO;
378         }
379
380         /*
381          * there is compressed data remained in intermediate buffer due to
382          * no more space in cbuf.cdata
383          */
384         if (ret)
385                 return -EAGAIN;
386
387         cc->clen = outbuf.pos;
388         return 0;
389 }
390
391 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
392 {
393         ZSTD_DStream *stream;
394         void *workspace;
395         unsigned int workspace_size;
396         unsigned int max_window_size =
397                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
398
399         workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
400
401         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
402                                         workspace_size, GFP_NOFS);
403         if (!workspace)
404                 return -ENOMEM;
405
406         stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
407         if (!stream) {
408                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
409                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
410                                 __func__);
411                 kvfree(workspace);
412                 return -EIO;
413         }
414
415         dic->private = workspace;
416         dic->private2 = stream;
417
418         return 0;
419 }
420
421 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
422 {
423         kvfree(dic->private);
424         dic->private = NULL;
425         dic->private2 = NULL;
426 }
427
428 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
429 {
430         ZSTD_DStream *stream = dic->private2;
431         ZSTD_inBuffer inbuf;
432         ZSTD_outBuffer outbuf;
433         int ret;
434
435         inbuf.pos = 0;
436         inbuf.src = dic->cbuf->cdata;
437         inbuf.size = dic->clen;
438
439         outbuf.pos = 0;
440         outbuf.dst = dic->rbuf;
441         outbuf.size = dic->rlen;
442
443         ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
444         if (ZSTD_isError(ret)) {
445                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
446                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
447                                 __func__, ZSTD_getErrorCode(ret));
448                 return -EIO;
449         }
450
451         if (dic->rlen != outbuf.pos) {
452                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
453                                 "expected:%lu\n", KERN_ERR,
454                                 F2FS_I_SB(dic->inode)->sb->s_id,
455                                 __func__, dic->rlen,
456                                 PAGE_SIZE << dic->log_cluster_size);
457                 return -EIO;
458         }
459
460         return 0;
461 }
462
463 static const struct f2fs_compress_ops f2fs_zstd_ops = {
464         .init_compress_ctx      = zstd_init_compress_ctx,
465         .destroy_compress_ctx   = zstd_destroy_compress_ctx,
466         .compress_pages         = zstd_compress_pages,
467         .init_decompress_ctx    = zstd_init_decompress_ctx,
468         .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
469         .decompress_pages       = zstd_decompress_pages,
470 };
471 #endif
472
473 #ifdef CONFIG_F2FS_FS_LZO
474 #ifdef CONFIG_F2FS_FS_LZORLE
475 static int lzorle_compress_pages(struct compress_ctx *cc)
476 {
477         int ret;
478
479         ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
480                                         &cc->clen, cc->private);
481         if (ret != LZO_E_OK) {
482                 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
483                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
484                 return -EIO;
485         }
486         return 0;
487 }
488
489 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
490         .init_compress_ctx      = lzo_init_compress_ctx,
491         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
492         .compress_pages         = lzorle_compress_pages,
493         .decompress_pages       = lzo_decompress_pages,
494 };
495 #endif
496 #endif
497
498 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
499 #ifdef CONFIG_F2FS_FS_LZO
500         &f2fs_lzo_ops,
501 #else
502         NULL,
503 #endif
504 #ifdef CONFIG_F2FS_FS_LZ4
505         &f2fs_lz4_ops,
506 #else
507         NULL,
508 #endif
509 #ifdef CONFIG_F2FS_FS_ZSTD
510         &f2fs_zstd_ops,
511 #else
512         NULL,
513 #endif
514 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
515         &f2fs_lzorle_ops,
516 #else
517         NULL,
518 #endif
519 };
520
521 bool f2fs_is_compress_backend_ready(struct inode *inode)
522 {
523         if (!f2fs_compressed_file(inode))
524                 return true;
525         return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
526 }
527
528 static mempool_t *compress_page_pool;
529 static int num_compress_pages = 512;
530 module_param(num_compress_pages, uint, 0444);
531 MODULE_PARM_DESC(num_compress_pages,
532                 "Number of intermediate compress pages to preallocate");
533
534 int f2fs_init_compress_mempool(void)
535 {
536         compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
537         if (!compress_page_pool)
538                 return -ENOMEM;
539
540         return 0;
541 }
542
543 void f2fs_destroy_compress_mempool(void)
544 {
545         mempool_destroy(compress_page_pool);
546 }
547
548 static struct page *f2fs_compress_alloc_page(void)
549 {
550         struct page *page;
551
552         page = mempool_alloc(compress_page_pool, GFP_NOFS);
553         lock_page(page);
554
555         return page;
556 }
557
558 static void f2fs_compress_free_page(struct page *page)
559 {
560         if (!page)
561                 return;
562         set_page_private(page, (unsigned long)NULL);
563         ClearPagePrivate(page);
564         page->mapping = NULL;
565         unlock_page(page);
566         mempool_free(page, compress_page_pool);
567 }
568
569 #define MAX_VMAP_RETRIES        3
570
571 static void *f2fs_vmap(struct page **pages, unsigned int count)
572 {
573         int i;
574         void *buf = NULL;
575
576         for (i = 0; i < MAX_VMAP_RETRIES; i++) {
577                 buf = vm_map_ram(pages, count, -1);
578                 if (buf)
579                         break;
580                 vm_unmap_aliases();
581         }
582         return buf;
583 }
584
585 static int f2fs_compress_pages(struct compress_ctx *cc)
586 {
587         struct f2fs_inode_info *fi = F2FS_I(cc->inode);
588         const struct f2fs_compress_ops *cops =
589                                 f2fs_cops[fi->i_compress_algorithm];
590         unsigned int max_len, new_nr_cpages;
591         struct page **new_cpages;
592         u32 chksum = 0;
593         int i, ret;
594
595         trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
596                                 cc->cluster_size, fi->i_compress_algorithm);
597
598         if (cops->init_compress_ctx) {
599                 ret = cops->init_compress_ctx(cc);
600                 if (ret)
601                         goto out;
602         }
603
604         max_len = COMPRESS_HEADER_SIZE + cc->clen;
605         cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
606
607         cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
608         if (!cc->cpages) {
609                 ret = -ENOMEM;
610                 goto destroy_compress_ctx;
611         }
612
613         for (i = 0; i < cc->nr_cpages; i++) {
614                 cc->cpages[i] = f2fs_compress_alloc_page();
615                 if (!cc->cpages[i]) {
616                         ret = -ENOMEM;
617                         goto out_free_cpages;
618                 }
619         }
620
621         cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
622         if (!cc->rbuf) {
623                 ret = -ENOMEM;
624                 goto out_free_cpages;
625         }
626
627         cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
628         if (!cc->cbuf) {
629                 ret = -ENOMEM;
630                 goto out_vunmap_rbuf;
631         }
632
633         ret = cops->compress_pages(cc);
634         if (ret)
635                 goto out_vunmap_cbuf;
636
637         max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
638
639         if (cc->clen > max_len) {
640                 ret = -EAGAIN;
641                 goto out_vunmap_cbuf;
642         }
643
644         cc->cbuf->clen = cpu_to_le32(cc->clen);
645
646         if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
647                 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
648                                         cc->cbuf->cdata, cc->clen);
649         cc->cbuf->chksum = cpu_to_le32(chksum);
650
651         for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
652                 cc->cbuf->reserved[i] = cpu_to_le32(0);
653
654         new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
655
656         /* Now we're going to cut unnecessary tail pages */
657         new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
658         if (!new_cpages) {
659                 ret = -ENOMEM;
660                 goto out_vunmap_cbuf;
661         }
662
663         /* zero out any unused part of the last page */
664         memset(&cc->cbuf->cdata[cc->clen], 0,
665                         (new_nr_cpages * PAGE_SIZE) -
666                         (cc->clen + COMPRESS_HEADER_SIZE));
667
668         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
669         vm_unmap_ram(cc->rbuf, cc->cluster_size);
670
671         for (i = 0; i < cc->nr_cpages; i++) {
672                 if (i < new_nr_cpages) {
673                         new_cpages[i] = cc->cpages[i];
674                         continue;
675                 }
676                 f2fs_compress_free_page(cc->cpages[i]);
677                 cc->cpages[i] = NULL;
678         }
679
680         if (cops->destroy_compress_ctx)
681                 cops->destroy_compress_ctx(cc);
682
683         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
684         cc->cpages = new_cpages;
685         cc->nr_cpages = new_nr_cpages;
686
687         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
688                                                         cc->clen, ret);
689         return 0;
690
691 out_vunmap_cbuf:
692         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
693 out_vunmap_rbuf:
694         vm_unmap_ram(cc->rbuf, cc->cluster_size);
695 out_free_cpages:
696         for (i = 0; i < cc->nr_cpages; i++) {
697                 if (cc->cpages[i])
698                         f2fs_compress_free_page(cc->cpages[i]);
699         }
700         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
701         cc->cpages = NULL;
702 destroy_compress_ctx:
703         if (cops->destroy_compress_ctx)
704                 cops->destroy_compress_ctx(cc);
705 out:
706         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
707                                                         cc->clen, ret);
708         return ret;
709 }
710
711 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
712 {
713         struct decompress_io_ctx *dic =
714                         (struct decompress_io_ctx *)page_private(page);
715         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
716         struct f2fs_inode_info *fi= F2FS_I(dic->inode);
717         const struct f2fs_compress_ops *cops =
718                         f2fs_cops[fi->i_compress_algorithm];
719         int ret;
720         int i;
721
722         dec_page_count(sbi, F2FS_RD_DATA);
723
724         if (bio->bi_status || PageError(page))
725                 dic->failed = true;
726
727         if (atomic_dec_return(&dic->pending_pages))
728                 return;
729
730         trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
731                                 dic->cluster_size, fi->i_compress_algorithm);
732
733         /* submit partial compressed pages */
734         if (dic->failed) {
735                 ret = -EIO;
736                 goto out_free_dic;
737         }
738
739         dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
740         if (!dic->tpages) {
741                 ret = -ENOMEM;
742                 goto out_free_dic;
743         }
744
745         for (i = 0; i < dic->cluster_size; i++) {
746                 if (dic->rpages[i]) {
747                         dic->tpages[i] = dic->rpages[i];
748                         continue;
749                 }
750
751                 dic->tpages[i] = f2fs_compress_alloc_page();
752                 if (!dic->tpages[i]) {
753                         ret = -ENOMEM;
754                         goto out_free_dic;
755                 }
756         }
757
758         if (cops->init_decompress_ctx) {
759                 ret = cops->init_decompress_ctx(dic);
760                 if (ret)
761                         goto out_free_dic;
762         }
763
764         dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
765         if (!dic->rbuf) {
766                 ret = -ENOMEM;
767                 goto destroy_decompress_ctx;
768         }
769
770         dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
771         if (!dic->cbuf) {
772                 ret = -ENOMEM;
773                 goto out_vunmap_rbuf;
774         }
775
776         dic->clen = le32_to_cpu(dic->cbuf->clen);
777         dic->rlen = PAGE_SIZE << dic->log_cluster_size;
778
779         if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
780                 ret = -EFSCORRUPTED;
781                 goto out_vunmap_cbuf;
782         }
783
784         ret = cops->decompress_pages(dic);
785
786         if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
787                 u32 provided = le32_to_cpu(dic->cbuf->chksum);
788                 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
789
790                 if (provided != calculated) {
791                         if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
792                                 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
793                                 printk_ratelimited(
794                                         "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
795                                         KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
796                                         provided, calculated);
797                         }
798                         set_sbi_flag(sbi, SBI_NEED_FSCK);
799                 }
800         }
801
802 out_vunmap_cbuf:
803         vm_unmap_ram(dic->cbuf, dic->nr_cpages);
804 out_vunmap_rbuf:
805         vm_unmap_ram(dic->rbuf, dic->cluster_size);
806 destroy_decompress_ctx:
807         if (cops->destroy_decompress_ctx)
808                 cops->destroy_decompress_ctx(dic);
809 out_free_dic:
810         if (!verity)
811                 f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
812                                                                 ret, false);
813
814         trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
815                                                         dic->clen, ret);
816         if (!verity)
817                 f2fs_free_dic(dic);
818 }
819
820 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
821 {
822         if (cc->cluster_idx == NULL_CLUSTER)
823                 return true;
824         return cc->cluster_idx == cluster_idx(cc, index);
825 }
826
827 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
828 {
829         return cc->nr_rpages == 0;
830 }
831
832 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
833 {
834         return cc->cluster_size == cc->nr_rpages;
835 }
836
837 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
838 {
839         if (f2fs_cluster_is_empty(cc))
840                 return true;
841         return is_page_in_cluster(cc, index);
842 }
843
844 static bool __cluster_may_compress(struct compress_ctx *cc)
845 {
846         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
847         loff_t i_size = i_size_read(cc->inode);
848         unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
849         int i;
850
851         for (i = 0; i < cc->cluster_size; i++) {
852                 struct page *page = cc->rpages[i];
853
854                 f2fs_bug_on(sbi, !page);
855
856                 if (unlikely(f2fs_cp_error(sbi)))
857                         return false;
858                 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
859                         return false;
860
861                 /* beyond EOF */
862                 if (page->index >= nr_pages)
863                         return false;
864         }
865         return true;
866 }
867
868 static int __f2fs_cluster_blocks(struct inode *inode,
869                                 unsigned int cluster_idx, bool compr)
870 {
871         struct dnode_of_data dn;
872         unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
873         unsigned int start_idx = cluster_idx <<
874                                 F2FS_I(inode)->i_log_cluster_size;
875         int ret;
876
877         set_new_dnode(&dn, inode, NULL, NULL, 0);
878         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
879         if (ret) {
880                 if (ret == -ENOENT)
881                         ret = 0;
882                 goto fail;
883         }
884
885         if (dn.data_blkaddr == COMPRESS_ADDR) {
886                 int i;
887
888                 ret = 1;
889                 for (i = 1; i < cluster_size; i++) {
890                         block_t blkaddr;
891
892                         blkaddr = data_blkaddr(dn.inode,
893                                         dn.node_page, dn.ofs_in_node + i);
894                         if (compr) {
895                                 if (__is_valid_data_blkaddr(blkaddr))
896                                         ret++;
897                         } else {
898                                 if (blkaddr != NULL_ADDR)
899                                         ret++;
900                         }
901                 }
902
903                 f2fs_bug_on(F2FS_I_SB(inode),
904                         !compr && ret != cluster_size &&
905                         !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
906         }
907 fail:
908         f2fs_put_dnode(&dn);
909         return ret;
910 }
911
912 /* return # of compressed blocks in compressed cluster */
913 static int f2fs_compressed_blocks(struct compress_ctx *cc)
914 {
915         return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
916 }
917
918 /* return # of valid blocks in compressed cluster */
919 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
920 {
921         return __f2fs_cluster_blocks(inode,
922                 index >> F2FS_I(inode)->i_log_cluster_size,
923                 false);
924 }
925
926 static bool cluster_may_compress(struct compress_ctx *cc)
927 {
928         if (!f2fs_need_compress_data(cc->inode))
929                 return false;
930         if (f2fs_is_atomic_file(cc->inode))
931                 return false;
932         if (f2fs_is_mmap_file(cc->inode))
933                 return false;
934         if (!f2fs_cluster_is_full(cc))
935                 return false;
936         if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
937                 return false;
938         return __cluster_may_compress(cc);
939 }
940
941 static void set_cluster_writeback(struct compress_ctx *cc)
942 {
943         int i;
944
945         for (i = 0; i < cc->cluster_size; i++) {
946                 if (cc->rpages[i])
947                         set_page_writeback(cc->rpages[i]);
948         }
949 }
950
951 static void set_cluster_dirty(struct compress_ctx *cc)
952 {
953         int i;
954
955         for (i = 0; i < cc->cluster_size; i++)
956                 if (cc->rpages[i])
957                         set_page_dirty(cc->rpages[i]);
958 }
959
960 static int prepare_compress_overwrite(struct compress_ctx *cc,
961                 struct page **pagep, pgoff_t index, void **fsdata)
962 {
963         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
964         struct address_space *mapping = cc->inode->i_mapping;
965         struct page *page;
966         sector_t last_block_in_bio;
967         unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
968         pgoff_t start_idx = start_idx_of_cluster(cc);
969         int i, ret;
970
971 retry:
972         ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
973         if (ret <= 0)
974                 return ret;
975
976         ret = f2fs_init_compress_ctx(cc);
977         if (ret)
978                 return ret;
979
980         /* keep page reference to avoid page reclaim */
981         for (i = 0; i < cc->cluster_size; i++) {
982                 page = f2fs_pagecache_get_page(mapping, start_idx + i,
983                                                         fgp_flag, GFP_NOFS);
984                 if (!page) {
985                         ret = -ENOMEM;
986                         goto unlock_pages;
987                 }
988
989                 if (PageUptodate(page))
990                         f2fs_put_page(page, 1);
991                 else
992                         f2fs_compress_ctx_add_page(cc, page);
993         }
994
995         if (!f2fs_cluster_is_empty(cc)) {
996                 struct bio *bio = NULL;
997
998                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
999                                         &last_block_in_bio, false, true);
1000                 f2fs_put_rpages(cc);
1001                 f2fs_destroy_compress_ctx(cc, true);
1002                 if (ret)
1003                         goto out;
1004                 if (bio)
1005                         f2fs_submit_bio(sbi, bio, DATA);
1006
1007                 ret = f2fs_init_compress_ctx(cc);
1008                 if (ret)
1009                         goto out;
1010         }
1011
1012         for (i = 0; i < cc->cluster_size; i++) {
1013                 f2fs_bug_on(sbi, cc->rpages[i]);
1014
1015                 page = find_lock_page(mapping, start_idx + i);
1016                 if (!page) {
1017                         /* page can be truncated */
1018                         goto release_and_retry;
1019                 }
1020
1021                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1022                 f2fs_compress_ctx_add_page(cc, page);
1023
1024                 if (!PageUptodate(page)) {
1025 release_and_retry:
1026                         f2fs_put_rpages(cc);
1027                         f2fs_unlock_rpages(cc, i + 1);
1028                         f2fs_destroy_compress_ctx(cc, true);
1029                         goto retry;
1030                 }
1031         }
1032
1033         if (likely(!ret)) {
1034                 *fsdata = cc->rpages;
1035                 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1036                 return cc->cluster_size;
1037         }
1038
1039 unlock_pages:
1040         f2fs_put_rpages(cc);
1041         f2fs_unlock_rpages(cc, i);
1042         f2fs_destroy_compress_ctx(cc, true);
1043 out:
1044         return ret;
1045 }
1046
1047 int f2fs_prepare_compress_overwrite(struct inode *inode,
1048                 struct page **pagep, pgoff_t index, void **fsdata)
1049 {
1050         struct compress_ctx cc = {
1051                 .inode = inode,
1052                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1053                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1054                 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1055                 .rpages = NULL,
1056                 .nr_rpages = 0,
1057         };
1058
1059         return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1060 }
1061
1062 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1063                                         pgoff_t index, unsigned copied)
1064
1065 {
1066         struct compress_ctx cc = {
1067                 .inode = inode,
1068                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1069                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1070                 .rpages = fsdata,
1071         };
1072         bool first_index = (index == cc.rpages[0]->index);
1073
1074         if (copied)
1075                 set_cluster_dirty(&cc);
1076
1077         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1078         f2fs_destroy_compress_ctx(&cc, false);
1079
1080         return first_index;
1081 }
1082
1083 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1084 {
1085         void *fsdata = NULL;
1086         struct page *pagep;
1087         int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1088         pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1089                                                         log_cluster_size;
1090         int err;
1091
1092         err = f2fs_is_compressed_cluster(inode, start_idx);
1093         if (err < 0)
1094                 return err;
1095
1096         /* truncate normal cluster */
1097         if (!err)
1098                 return f2fs_do_truncate_blocks(inode, from, lock);
1099
1100         /* truncate compressed cluster */
1101         err = f2fs_prepare_compress_overwrite(inode, &pagep,
1102                                                 start_idx, &fsdata);
1103
1104         /* should not be a normal cluster */
1105         f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1106
1107         if (err <= 0)
1108                 return err;
1109
1110         if (err > 0) {
1111                 struct page **rpages = fsdata;
1112                 int cluster_size = F2FS_I(inode)->i_cluster_size;
1113                 int i;
1114
1115                 for (i = cluster_size - 1; i >= 0; i--) {
1116                         loff_t start = rpages[i]->index << PAGE_SHIFT;
1117
1118                         if (from <= start) {
1119                                 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1120                         } else {
1121                                 zero_user_segment(rpages[i], from - start,
1122                                                                 PAGE_SIZE);
1123                                 break;
1124                         }
1125                 }
1126
1127                 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1128         }
1129         return 0;
1130 }
1131
1132 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1133                                         int *submitted,
1134                                         struct writeback_control *wbc,
1135                                         enum iostat_type io_type)
1136 {
1137         struct inode *inode = cc->inode;
1138         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1139         struct f2fs_inode_info *fi = F2FS_I(inode);
1140         struct f2fs_io_info fio = {
1141                 .sbi = sbi,
1142                 .ino = cc->inode->i_ino,
1143                 .type = DATA,
1144                 .op = REQ_OP_WRITE,
1145                 .op_flags = wbc_to_write_flags(wbc),
1146                 .old_blkaddr = NEW_ADDR,
1147                 .page = NULL,
1148                 .encrypted_page = NULL,
1149                 .compressed_page = NULL,
1150                 .submitted = false,
1151                 .io_type = io_type,
1152                 .io_wbc = wbc,
1153                 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1154         };
1155         struct dnode_of_data dn;
1156         struct node_info ni;
1157         struct compress_io_ctx *cic;
1158         pgoff_t start_idx = start_idx_of_cluster(cc);
1159         unsigned int last_index = cc->cluster_size - 1;
1160         loff_t psize;
1161         int i, err;
1162
1163         /* we should bypass data pages to proceed the kworker jobs */
1164         if (unlikely(f2fs_cp_error(sbi))) {
1165                 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1166                 goto out_free;
1167         }
1168
1169         if (IS_NOQUOTA(inode)) {
1170                 /*
1171                  * We need to wait for node_write to avoid block allocation during
1172                  * checkpoint. This can only happen to quota writes which can cause
1173                  * the below discard race condition.
1174                  */
1175                 down_read(&sbi->node_write);
1176         } else if (!f2fs_trylock_op(sbi)) {
1177                 goto out_free;
1178         }
1179
1180         set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1181
1182         err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1183         if (err)
1184                 goto out_unlock_op;
1185
1186         for (i = 0; i < cc->cluster_size; i++) {
1187                 if (data_blkaddr(dn.inode, dn.node_page,
1188                                         dn.ofs_in_node + i) == NULL_ADDR)
1189                         goto out_put_dnode;
1190         }
1191
1192         psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1193
1194         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1195         if (err)
1196                 goto out_put_dnode;
1197
1198         fio.version = ni.version;
1199
1200         cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1201         if (!cic)
1202                 goto out_put_dnode;
1203
1204         cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1205         cic->inode = inode;
1206         atomic_set(&cic->pending_pages, cc->nr_cpages);
1207         cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1208         if (!cic->rpages)
1209                 goto out_put_cic;
1210
1211         cic->nr_rpages = cc->cluster_size;
1212
1213         for (i = 0; i < cc->nr_cpages; i++) {
1214                 f2fs_set_compressed_page(cc->cpages[i], inode,
1215                                         cc->rpages[i + 1]->index, cic);
1216                 fio.compressed_page = cc->cpages[i];
1217
1218                 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1219                                                 dn.ofs_in_node + i + 1);
1220
1221                 /* wait for GCed page writeback via META_MAPPING */
1222                 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1223
1224                 if (fio.encrypted) {
1225                         fio.page = cc->rpages[i + 1];
1226                         err = f2fs_encrypt_one_page(&fio);
1227                         if (err)
1228                                 goto out_destroy_crypt;
1229                         cc->cpages[i] = fio.encrypted_page;
1230                 }
1231         }
1232
1233         set_cluster_writeback(cc);
1234
1235         for (i = 0; i < cc->cluster_size; i++)
1236                 cic->rpages[i] = cc->rpages[i];
1237
1238         for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1239                 block_t blkaddr;
1240
1241                 blkaddr = f2fs_data_blkaddr(&dn);
1242                 fio.page = cc->rpages[i];
1243                 fio.old_blkaddr = blkaddr;
1244
1245                 /* cluster header */
1246                 if (i == 0) {
1247                         if (blkaddr == COMPRESS_ADDR)
1248                                 fio.compr_blocks++;
1249                         if (__is_valid_data_blkaddr(blkaddr))
1250                                 f2fs_invalidate_blocks(sbi, blkaddr);
1251                         f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1252                         goto unlock_continue;
1253                 }
1254
1255                 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1256                         fio.compr_blocks++;
1257
1258                 if (i > cc->nr_cpages) {
1259                         if (__is_valid_data_blkaddr(blkaddr)) {
1260                                 f2fs_invalidate_blocks(sbi, blkaddr);
1261                                 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1262                         }
1263                         goto unlock_continue;
1264                 }
1265
1266                 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1267
1268                 if (fio.encrypted)
1269                         fio.encrypted_page = cc->cpages[i - 1];
1270                 else
1271                         fio.compressed_page = cc->cpages[i - 1];
1272
1273                 cc->cpages[i - 1] = NULL;
1274                 f2fs_outplace_write_data(&dn, &fio);
1275                 (*submitted)++;
1276 unlock_continue:
1277                 inode_dec_dirty_pages(cc->inode);
1278                 unlock_page(fio.page);
1279         }
1280
1281         if (fio.compr_blocks)
1282                 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1283         f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1284
1285         set_inode_flag(cc->inode, FI_APPEND_WRITE);
1286         if (cc->cluster_idx == 0)
1287                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1288
1289         f2fs_put_dnode(&dn);
1290         if (IS_NOQUOTA(inode))
1291                 up_read(&sbi->node_write);
1292         else
1293                 f2fs_unlock_op(sbi);
1294
1295         spin_lock(&fi->i_size_lock);
1296         if (fi->last_disk_size < psize)
1297                 fi->last_disk_size = psize;
1298         spin_unlock(&fi->i_size_lock);
1299
1300         f2fs_put_rpages(cc);
1301         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1302         cc->cpages = NULL;
1303         f2fs_destroy_compress_ctx(cc, false);
1304         return 0;
1305
1306 out_destroy_crypt:
1307         page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1308
1309         for (--i; i >= 0; i--)
1310                 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1311 out_put_cic:
1312         kmem_cache_free(cic_entry_slab, cic);
1313 out_put_dnode:
1314         f2fs_put_dnode(&dn);
1315 out_unlock_op:
1316         if (IS_NOQUOTA(inode))
1317                 up_read(&sbi->node_write);
1318         else
1319                 f2fs_unlock_op(sbi);
1320 out_free:
1321         for (i = 0; i < cc->nr_cpages; i++) {
1322                 if (!cc->cpages[i])
1323                         continue;
1324                 f2fs_compress_free_page(cc->cpages[i]);
1325                 cc->cpages[i] = NULL;
1326         }
1327         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1328         cc->cpages = NULL;
1329         return -EAGAIN;
1330 }
1331
1332 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1333 {
1334         struct f2fs_sb_info *sbi = bio->bi_private;
1335         struct compress_io_ctx *cic =
1336                         (struct compress_io_ctx *)page_private(page);
1337         int i;
1338
1339         if (unlikely(bio->bi_status))
1340                 mapping_set_error(cic->inode->i_mapping, -EIO);
1341
1342         f2fs_compress_free_page(page);
1343
1344         dec_page_count(sbi, F2FS_WB_DATA);
1345
1346         if (atomic_dec_return(&cic->pending_pages))
1347                 return;
1348
1349         for (i = 0; i < cic->nr_rpages; i++) {
1350                 WARN_ON(!cic->rpages[i]);
1351                 clear_cold_data(cic->rpages[i]);
1352                 end_page_writeback(cic->rpages[i]);
1353         }
1354
1355         page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1356         kmem_cache_free(cic_entry_slab, cic);
1357 }
1358
1359 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1360                                         int *submitted,
1361                                         struct writeback_control *wbc,
1362                                         enum iostat_type io_type)
1363 {
1364         struct address_space *mapping = cc->inode->i_mapping;
1365         int _submitted, compr_blocks, ret, i;
1366
1367         compr_blocks = f2fs_compressed_blocks(cc);
1368
1369         for (i = 0; i < cc->cluster_size; i++) {
1370                 if (!cc->rpages[i])
1371                         continue;
1372
1373                 redirty_page_for_writepage(wbc, cc->rpages[i]);
1374                 unlock_page(cc->rpages[i]);
1375         }
1376
1377         if (compr_blocks < 0)
1378                 return compr_blocks;
1379
1380         for (i = 0; i < cc->cluster_size; i++) {
1381                 if (!cc->rpages[i])
1382                         continue;
1383 retry_write:
1384                 lock_page(cc->rpages[i]);
1385
1386                 if (cc->rpages[i]->mapping != mapping) {
1387 continue_unlock:
1388                         unlock_page(cc->rpages[i]);
1389                         continue;
1390                 }
1391
1392                 if (!PageDirty(cc->rpages[i]))
1393                         goto continue_unlock;
1394
1395                 if (PageWriteback(cc->rpages[i])) {
1396                         if (wbc->sync_mode == WB_SYNC_NONE)
1397                                 goto continue_unlock;
1398                         f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1399                 }
1400
1401                 if (!clear_page_dirty_for_io(cc->rpages[i]))
1402                         goto continue_unlock;
1403
1404                 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1405                                                 NULL, NULL, wbc, io_type,
1406                                                 compr_blocks, false);
1407                 if (ret) {
1408                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
1409                                 unlock_page(cc->rpages[i]);
1410                                 ret = 0;
1411                         } else if (ret == -EAGAIN) {
1412                                 /*
1413                                  * for quota file, just redirty left pages to
1414                                  * avoid deadlock caused by cluster update race
1415                                  * from foreground operation.
1416                                  */
1417                                 if (IS_NOQUOTA(cc->inode))
1418                                         return 0;
1419                                 ret = 0;
1420                                 cond_resched();
1421                                 congestion_wait(BLK_RW_ASYNC,
1422                                                 DEFAULT_IO_TIMEOUT);
1423                                 goto retry_write;
1424                         }
1425                         return ret;
1426                 }
1427
1428                 *submitted += _submitted;
1429         }
1430
1431         f2fs_balance_fs(F2FS_M_SB(mapping), true);
1432
1433         return 0;
1434 }
1435
1436 int f2fs_write_multi_pages(struct compress_ctx *cc,
1437                                         int *submitted,
1438                                         struct writeback_control *wbc,
1439                                         enum iostat_type io_type)
1440 {
1441         int err;
1442
1443         *submitted = 0;
1444         if (cluster_may_compress(cc)) {
1445                 err = f2fs_compress_pages(cc);
1446                 if (err == -EAGAIN) {
1447                         goto write;
1448                 } else if (err) {
1449                         f2fs_put_rpages_wbc(cc, wbc, true, 1);
1450                         goto destroy_out;
1451                 }
1452
1453                 err = f2fs_write_compressed_pages(cc, submitted,
1454                                                         wbc, io_type);
1455                 if (!err)
1456                         return 0;
1457                 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1458         }
1459 write:
1460         f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1461
1462         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1463         f2fs_put_rpages_wbc(cc, wbc, false, 0);
1464 destroy_out:
1465         f2fs_destroy_compress_ctx(cc, false);
1466         return err;
1467 }
1468
1469 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1470 {
1471         struct decompress_io_ctx *dic;
1472         pgoff_t start_idx = start_idx_of_cluster(cc);
1473         int i;
1474
1475         dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1476         if (!dic)
1477                 return ERR_PTR(-ENOMEM);
1478
1479         dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1480         if (!dic->rpages) {
1481                 kmem_cache_free(dic_entry_slab, dic);
1482                 return ERR_PTR(-ENOMEM);
1483         }
1484
1485         dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1486         dic->inode = cc->inode;
1487         atomic_set(&dic->pending_pages, cc->nr_cpages);
1488         dic->cluster_idx = cc->cluster_idx;
1489         dic->cluster_size = cc->cluster_size;
1490         dic->log_cluster_size = cc->log_cluster_size;
1491         dic->nr_cpages = cc->nr_cpages;
1492         dic->failed = false;
1493
1494         for (i = 0; i < dic->cluster_size; i++)
1495                 dic->rpages[i] = cc->rpages[i];
1496         dic->nr_rpages = cc->cluster_size;
1497
1498         dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1499         if (!dic->cpages)
1500                 goto out_free;
1501
1502         for (i = 0; i < dic->nr_cpages; i++) {
1503                 struct page *page;
1504
1505                 page = f2fs_compress_alloc_page();
1506                 if (!page)
1507                         goto out_free;
1508
1509                 f2fs_set_compressed_page(page, cc->inode,
1510                                         start_idx + i + 1, dic);
1511                 dic->cpages[i] = page;
1512         }
1513
1514         return dic;
1515
1516 out_free:
1517         f2fs_free_dic(dic);
1518         return ERR_PTR(-ENOMEM);
1519 }
1520
1521 void f2fs_free_dic(struct decompress_io_ctx *dic)
1522 {
1523         int i;
1524
1525         if (dic->tpages) {
1526                 for (i = 0; i < dic->cluster_size; i++) {
1527                         if (dic->rpages[i])
1528                                 continue;
1529                         if (!dic->tpages[i])
1530                                 continue;
1531                         f2fs_compress_free_page(dic->tpages[i]);
1532                 }
1533                 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1534         }
1535
1536         if (dic->cpages) {
1537                 for (i = 0; i < dic->nr_cpages; i++) {
1538                         if (!dic->cpages[i])
1539                                 continue;
1540                         f2fs_compress_free_page(dic->cpages[i]);
1541                 }
1542                 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1543         }
1544
1545         page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1546         kmem_cache_free(dic_entry_slab, dic);
1547 }
1548
1549 void f2fs_decompress_end_io(struct page **rpages,
1550                         unsigned int cluster_size, bool err, bool verity)
1551 {
1552         int i;
1553
1554         for (i = 0; i < cluster_size; i++) {
1555                 struct page *rpage = rpages[i];
1556
1557                 if (!rpage)
1558                         continue;
1559
1560                 if (err || PageError(rpage))
1561                         goto clear_uptodate;
1562
1563                 if (!verity || fsverity_verify_page(rpage)) {
1564                         SetPageUptodate(rpage);
1565                         goto unlock;
1566                 }
1567 clear_uptodate:
1568                 ClearPageUptodate(rpage);
1569                 ClearPageError(rpage);
1570 unlock:
1571                 unlock_page(rpage);
1572         }
1573 }
1574
1575 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1576 {
1577         dev_t dev = sbi->sb->s_bdev->bd_dev;
1578         char slab_name[35];
1579
1580         sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1581
1582         sbi->page_array_slab_size = sizeof(struct page *) <<
1583                                         F2FS_OPTION(sbi).compress_log_size;
1584
1585         sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1586                                         sbi->page_array_slab_size);
1587         if (!sbi->page_array_slab)
1588                 return -ENOMEM;
1589         return 0;
1590 }
1591
1592 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1593 {
1594         kmem_cache_destroy(sbi->page_array_slab);
1595 }
1596
1597 static int __init f2fs_init_cic_cache(void)
1598 {
1599         cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1600                                         sizeof(struct compress_io_ctx));
1601         if (!cic_entry_slab)
1602                 return -ENOMEM;
1603         return 0;
1604 }
1605
1606 static void f2fs_destroy_cic_cache(void)
1607 {
1608         kmem_cache_destroy(cic_entry_slab);
1609 }
1610
1611 static int __init f2fs_init_dic_cache(void)
1612 {
1613         dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1614                                         sizeof(struct decompress_io_ctx));
1615         if (!dic_entry_slab)
1616                 return -ENOMEM;
1617         return 0;
1618 }
1619
1620 static void f2fs_destroy_dic_cache(void)
1621 {
1622         kmem_cache_destroy(dic_entry_slab);
1623 }
1624
1625 int __init f2fs_init_compress_cache(void)
1626 {
1627         int err;
1628
1629         err = f2fs_init_cic_cache();
1630         if (err)
1631                 goto out;
1632         err = f2fs_init_dic_cache();
1633         if (err)
1634                 goto free_cic;
1635         return 0;
1636 free_cic:
1637         f2fs_destroy_cic_cache();
1638 out:
1639         return -ENOMEM;
1640 }
1641
1642 void f2fs_destroy_compress_cache(void)
1643 {
1644         f2fs_destroy_dic_cache();
1645         f2fs_destroy_cic_cache();
1646 }