GNU Linux-libre 6.8.9-gnu
[releases.git] / fs / erofs / zdata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
11
12 #define Z_EROFS_PCLUSTER_MAX_PAGES      (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS            2
14
15 /*
16  * let's leave a type here in case of introducing
17  * another tagged pointer later.
18  */
19 typedef void *z_erofs_next_pcluster_t;
20
21 struct z_erofs_bvec {
22         struct page *page;
23         int offset;
24         unsigned int end;
25 };
26
27 #define __Z_EROFS_BVSET(name, total) \
28 struct name { \
29         /* point to the next page which contains the following bvecs */ \
30         struct page *nextpage; \
31         struct z_erofs_bvec bvec[total]; \
32 }
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
35
36 /*
37  * Structure fields follow one of the following exclusion rules.
38  *
39  * I: Modifiable by initialization/destruction paths and read-only
40  *    for everyone else;
41  *
42  * L: Field should be protected by the pcluster lock;
43  *
44  * A: Field should be accessed / updated in atomic for parallelized code.
45  */
46 struct z_erofs_pcluster {
47         struct erofs_workgroup obj;
48         struct mutex lock;
49
50         /* A: point to next chained pcluster or TAILs */
51         z_erofs_next_pcluster_t next;
52
53         /* L: the maximum decompression size of this round */
54         unsigned int length;
55
56         /* L: total number of bvecs */
57         unsigned int vcnt;
58
59         /* I: pcluster size (compressed size) in bytes */
60         unsigned int pclustersize;
61
62         /* I: page offset of start position of decompression */
63         unsigned short pageofs_out;
64
65         /* I: page offset of inline compressed data */
66         unsigned short pageofs_in;
67
68         union {
69                 /* L: inline a certain number of bvec for bootstrap */
70                 struct z_erofs_bvset_inline bvset;
71
72                 /* I: can be used to free the pcluster by RCU. */
73                 struct rcu_head rcu;
74         };
75
76         /* I: compression algorithm format */
77         unsigned char algorithmformat;
78
79         /* L: whether partial decompression or not */
80         bool partial;
81
82         /* L: indicate several pageofs_outs or not */
83         bool multibases;
84
85         /* L: whether extra buffer allocations are best-effort */
86         bool besteffort;
87
88         /* A: compressed bvecs (can be cached or inplaced pages) */
89         struct z_erofs_bvec compressed_bvecs[];
90 };
91
92 /* the end of a chain of pclusters */
93 #define Z_EROFS_PCLUSTER_TAIL           ((void *) 0x700 + POISON_POINTER_DELTA)
94 #define Z_EROFS_PCLUSTER_NIL            (NULL)
95
96 struct z_erofs_decompressqueue {
97         struct super_block *sb;
98         atomic_t pending_bios;
99         z_erofs_next_pcluster_t head;
100
101         union {
102                 struct completion done;
103                 struct work_struct work;
104                 struct kthread_work kthread_work;
105         } u;
106         bool eio, sync;
107 };
108
109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110 {
111         return !pcl->obj.index;
112 }
113
114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
115 {
116         return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
117 }
118
119 /*
120  * bit 30: I/O error occurred on this page
121  * bit 0 - 29: remaining parts to complete this page
122  */
123 #define Z_EROFS_PAGE_EIO                        (1 << 30)
124
125 static inline void z_erofs_onlinepage_init(struct page *page)
126 {
127         union {
128                 atomic_t o;
129                 unsigned long v;
130         } u = { .o = ATOMIC_INIT(1) };
131
132         set_page_private(page, u.v);
133         smp_wmb();
134         SetPagePrivate(page);
135 }
136
137 static inline void z_erofs_onlinepage_split(struct page *page)
138 {
139         atomic_inc((atomic_t *)&page->private);
140 }
141
142 static void z_erofs_onlinepage_endio(struct page *page, int err)
143 {
144         int orig, v;
145
146         DBG_BUGON(!PagePrivate(page));
147
148         do {
149                 orig = atomic_read((atomic_t *)&page->private);
150                 v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0);
151         } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig);
152
153         if (!(v & ~Z_EROFS_PAGE_EIO)) {
154                 set_page_private(page, 0);
155                 ClearPagePrivate(page);
156                 if (!(v & Z_EROFS_PAGE_EIO))
157                         SetPageUptodate(page);
158                 unlock_page(page);
159         }
160 }
161
162 #define Z_EROFS_ONSTACK_PAGES           32
163
164 /*
165  * since pclustersize is variable for big pcluster feature, introduce slab
166  * pools implementation for different pcluster sizes.
167  */
168 struct z_erofs_pcluster_slab {
169         struct kmem_cache *slab;
170         unsigned int maxpages;
171         char name[48];
172 };
173
174 #define _PCLP(n) { .maxpages = n }
175
176 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
177         _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
178         _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
179 };
180
181 struct z_erofs_bvec_iter {
182         struct page *bvpage;
183         struct z_erofs_bvset *bvset;
184         unsigned int nr, cur;
185 };
186
187 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
188 {
189         if (iter->bvpage)
190                 kunmap_local(iter->bvset);
191         return iter->bvpage;
192 }
193
194 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
195 {
196         unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
197         /* have to access nextpage in advance, otherwise it will be unmapped */
198         struct page *nextpage = iter->bvset->nextpage;
199         struct page *oldpage;
200
201         DBG_BUGON(!nextpage);
202         oldpage = z_erofs_bvec_iter_end(iter);
203         iter->bvpage = nextpage;
204         iter->bvset = kmap_local_page(nextpage);
205         iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
206         iter->cur = 0;
207         return oldpage;
208 }
209
210 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
211                                     struct z_erofs_bvset_inline *bvset,
212                                     unsigned int bootstrap_nr,
213                                     unsigned int cur)
214 {
215         *iter = (struct z_erofs_bvec_iter) {
216                 .nr = bootstrap_nr,
217                 .bvset = (struct z_erofs_bvset *)bvset,
218         };
219
220         while (cur > iter->nr) {
221                 cur -= iter->nr;
222                 z_erofs_bvset_flip(iter);
223         }
224         iter->cur = cur;
225 }
226
227 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
228                                 struct z_erofs_bvec *bvec,
229                                 struct page **candidate_bvpage,
230                                 struct page **pagepool)
231 {
232         if (iter->cur >= iter->nr) {
233                 struct page *nextpage = *candidate_bvpage;
234
235                 if (!nextpage) {
236                         nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
237                         if (!nextpage)
238                                 return -ENOMEM;
239                         set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
240                 }
241                 DBG_BUGON(iter->bvset->nextpage);
242                 iter->bvset->nextpage = nextpage;
243                 z_erofs_bvset_flip(iter);
244
245                 iter->bvset->nextpage = NULL;
246                 *candidate_bvpage = NULL;
247         }
248         iter->bvset->bvec[iter->cur++] = *bvec;
249         return 0;
250 }
251
252 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
253                                  struct z_erofs_bvec *bvec,
254                                  struct page **old_bvpage)
255 {
256         if (iter->cur == iter->nr)
257                 *old_bvpage = z_erofs_bvset_flip(iter);
258         else
259                 *old_bvpage = NULL;
260         *bvec = iter->bvset->bvec[iter->cur++];
261 }
262
263 static void z_erofs_destroy_pcluster_pool(void)
264 {
265         int i;
266
267         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
268                 if (!pcluster_pool[i].slab)
269                         continue;
270                 kmem_cache_destroy(pcluster_pool[i].slab);
271                 pcluster_pool[i].slab = NULL;
272         }
273 }
274
275 static int z_erofs_create_pcluster_pool(void)
276 {
277         struct z_erofs_pcluster_slab *pcs;
278         struct z_erofs_pcluster *a;
279         unsigned int size;
280
281         for (pcs = pcluster_pool;
282              pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
283                 size = struct_size(a, compressed_bvecs, pcs->maxpages);
284
285                 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
286                 pcs->slab = kmem_cache_create(pcs->name, size, 0,
287                                               SLAB_RECLAIM_ACCOUNT, NULL);
288                 if (pcs->slab)
289                         continue;
290
291                 z_erofs_destroy_pcluster_pool();
292                 return -ENOMEM;
293         }
294         return 0;
295 }
296
297 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
298 {
299         unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
300         struct z_erofs_pcluster_slab *pcs = pcluster_pool;
301
302         for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
303                 struct z_erofs_pcluster *pcl;
304
305                 if (nrpages > pcs->maxpages)
306                         continue;
307
308                 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
309                 if (!pcl)
310                         return ERR_PTR(-ENOMEM);
311                 pcl->pclustersize = size;
312                 return pcl;
313         }
314         return ERR_PTR(-EINVAL);
315 }
316
317 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
318 {
319         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
320         int i;
321
322         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
323                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
324
325                 if (pclusterpages > pcs->maxpages)
326                         continue;
327
328                 kmem_cache_free(pcs->slab, pcl);
329                 return;
330         }
331         DBG_BUGON(1);
332 }
333
334 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
335
336 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
337 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
338
339 static void erofs_destroy_percpu_workers(void)
340 {
341         struct kthread_worker *worker;
342         unsigned int cpu;
343
344         for_each_possible_cpu(cpu) {
345                 worker = rcu_dereference_protected(
346                                         z_erofs_pcpu_workers[cpu], 1);
347                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
348                 if (worker)
349                         kthread_destroy_worker(worker);
350         }
351         kfree(z_erofs_pcpu_workers);
352 }
353
354 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
355 {
356         struct kthread_worker *worker =
357                 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
358
359         if (IS_ERR(worker))
360                 return worker;
361         if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
362                 sched_set_fifo_low(worker->task);
363         return worker;
364 }
365
366 static int erofs_init_percpu_workers(void)
367 {
368         struct kthread_worker *worker;
369         unsigned int cpu;
370
371         z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
372                         sizeof(struct kthread_worker *), GFP_ATOMIC);
373         if (!z_erofs_pcpu_workers)
374                 return -ENOMEM;
375
376         for_each_online_cpu(cpu) {      /* could miss cpu{off,on}line? */
377                 worker = erofs_init_percpu_worker(cpu);
378                 if (!IS_ERR(worker))
379                         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
380         }
381         return 0;
382 }
383 #else
384 static inline void erofs_destroy_percpu_workers(void) {}
385 static inline int erofs_init_percpu_workers(void) { return 0; }
386 #endif
387
388 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
389 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
390 static enum cpuhp_state erofs_cpuhp_state;
391
392 static int erofs_cpu_online(unsigned int cpu)
393 {
394         struct kthread_worker *worker, *old;
395
396         worker = erofs_init_percpu_worker(cpu);
397         if (IS_ERR(worker))
398                 return PTR_ERR(worker);
399
400         spin_lock(&z_erofs_pcpu_worker_lock);
401         old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
402                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
403         if (!old)
404                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
405         spin_unlock(&z_erofs_pcpu_worker_lock);
406         if (old)
407                 kthread_destroy_worker(worker);
408         return 0;
409 }
410
411 static int erofs_cpu_offline(unsigned int cpu)
412 {
413         struct kthread_worker *worker;
414
415         spin_lock(&z_erofs_pcpu_worker_lock);
416         worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
417                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
418         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
419         spin_unlock(&z_erofs_pcpu_worker_lock);
420
421         synchronize_rcu();
422         if (worker)
423                 kthread_destroy_worker(worker);
424         return 0;
425 }
426
427 static int erofs_cpu_hotplug_init(void)
428 {
429         int state;
430
431         state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
432                         "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
433         if (state < 0)
434                 return state;
435
436         erofs_cpuhp_state = state;
437         return 0;
438 }
439
440 static void erofs_cpu_hotplug_destroy(void)
441 {
442         if (erofs_cpuhp_state)
443                 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
444 }
445 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
446 static inline int erofs_cpu_hotplug_init(void) { return 0; }
447 static inline void erofs_cpu_hotplug_destroy(void) {}
448 #endif
449
450 void z_erofs_exit_zip_subsystem(void)
451 {
452         erofs_cpu_hotplug_destroy();
453         erofs_destroy_percpu_workers();
454         destroy_workqueue(z_erofs_workqueue);
455         z_erofs_destroy_pcluster_pool();
456 }
457
458 int __init z_erofs_init_zip_subsystem(void)
459 {
460         int err = z_erofs_create_pcluster_pool();
461
462         if (err)
463                 goto out_error_pcluster_pool;
464
465         z_erofs_workqueue = alloc_workqueue("erofs_worker",
466                         WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
467         if (!z_erofs_workqueue) {
468                 err = -ENOMEM;
469                 goto out_error_workqueue_init;
470         }
471
472         err = erofs_init_percpu_workers();
473         if (err)
474                 goto out_error_pcpu_worker;
475
476         err = erofs_cpu_hotplug_init();
477         if (err < 0)
478                 goto out_error_cpuhp_init;
479         return err;
480
481 out_error_cpuhp_init:
482         erofs_destroy_percpu_workers();
483 out_error_pcpu_worker:
484         destroy_workqueue(z_erofs_workqueue);
485 out_error_workqueue_init:
486         z_erofs_destroy_pcluster_pool();
487 out_error_pcluster_pool:
488         return err;
489 }
490
491 enum z_erofs_pclustermode {
492         Z_EROFS_PCLUSTER_INFLIGHT,
493         /*
494          * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
495          * could be dispatched into bypass queue later due to uptodated managed
496          * pages. All related online pages cannot be reused for inplace I/O (or
497          * bvpage) since it can be directly decoded without I/O submission.
498          */
499         Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
500         /*
501          * The pcluster was just linked to a decompression chain by us.  It can
502          * also be linked with the remaining pclusters, which means if the
503          * processing page is the tail page of a pcluster, this pcluster can
504          * safely use the whole page (since the previous pcluster is within the
505          * same chain) for in-place I/O, as illustrated below:
506          *  ___________________________________________________
507          * |  tail (partial) page  |    head (partial) page    |
508          * |  (of the current pcl) |   (of the previous pcl)   |
509          * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
510          *
511          * [  (*) the page above can be used as inplace I/O.   ]
512          */
513         Z_EROFS_PCLUSTER_FOLLOWED,
514 };
515
516 struct z_erofs_decompress_frontend {
517         struct inode *const inode;
518         struct erofs_map_blocks map;
519         struct z_erofs_bvec_iter biter;
520
521         struct page *pagepool;
522         struct page *candidate_bvpage;
523         struct z_erofs_pcluster *pcl;
524         z_erofs_next_pcluster_t owned_head;
525         enum z_erofs_pclustermode mode;
526
527         erofs_off_t headoffset;
528
529         /* a pointer used to pick up inplace I/O pages */
530         unsigned int icur;
531 };
532
533 #define DECOMPRESS_FRONTEND_INIT(__i) { \
534         .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
535         .mode = Z_EROFS_PCLUSTER_FOLLOWED }
536
537 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
538 {
539         unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
540
541         if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
542                 return false;
543
544         if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
545                 return true;
546
547         if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
548             fe->map.m_la < fe->headoffset)
549                 return true;
550
551         return false;
552 }
553
554 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
555 {
556         struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
557         struct z_erofs_pcluster *pcl = fe->pcl;
558         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
559         bool shouldalloc = z_erofs_should_alloc_cache(fe);
560         bool standalone = true;
561         /*
562          * optimistic allocation without direct reclaim since inplace I/O
563          * can be used if low memory otherwise.
564          */
565         gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
566                         __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
567         unsigned int i;
568
569         if (i_blocksize(fe->inode) != PAGE_SIZE ||
570             fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
571                 return;
572
573         for (i = 0; i < pclusterpages; ++i) {
574                 struct page *page, *newpage;
575                 void *t;        /* mark pages just found for debugging */
576
577                 /* Inaccurate check w/o locking to avoid unneeded lookups */
578                 if (READ_ONCE(pcl->compressed_bvecs[i].page))
579                         continue;
580
581                 page = find_get_page(mc, pcl->obj.index + i);
582                 if (page) {
583                         t = (void *)((unsigned long)page | 1);
584                         newpage = NULL;
585                 } else {
586                         /* I/O is needed, no possible to decompress directly */
587                         standalone = false;
588                         if (!shouldalloc)
589                                 continue;
590
591                         /*
592                          * Try cached I/O if allocation succeeds or fallback to
593                          * in-place I/O instead to avoid any direct reclaim.
594                          */
595                         newpage = erofs_allocpage(&fe->pagepool, gfp);
596                         if (!newpage)
597                                 continue;
598                         set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
599                         t = (void *)((unsigned long)newpage | 1);
600                 }
601                 spin_lock(&pcl->obj.lockref.lock);
602                 if (!pcl->compressed_bvecs[i].page) {
603                         pcl->compressed_bvecs[i].page = t;
604                         spin_unlock(&pcl->obj.lockref.lock);
605                         continue;
606                 }
607                 spin_unlock(&pcl->obj.lockref.lock);
608
609                 if (page)
610                         put_page(page);
611                 else if (newpage)
612                         erofs_pagepool_add(&fe->pagepool, newpage);
613         }
614
615         /*
616          * don't do inplace I/O if all compressed pages are available in
617          * managed cache since it can be moved to the bypass queue instead.
618          */
619         if (standalone)
620                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
621 }
622
623 /* called by erofs_shrinker to get rid of all compressed_pages */
624 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
625                                        struct erofs_workgroup *grp)
626 {
627         struct z_erofs_pcluster *const pcl =
628                 container_of(grp, struct z_erofs_pcluster, obj);
629         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
630         int i;
631
632         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
633         /*
634          * refcount of workgroup is now freezed as 0,
635          * therefore no need to worry about available decompression users.
636          */
637         for (i = 0; i < pclusterpages; ++i) {
638                 struct page *page = pcl->compressed_bvecs[i].page;
639
640                 if (!page)
641                         continue;
642
643                 /* block other users from reclaiming or migrating the page */
644                 if (!trylock_page(page))
645                         return -EBUSY;
646
647                 if (!erofs_page_is_managed(sbi, page))
648                         continue;
649
650                 /* barrier is implied in the following 'unlock_page' */
651                 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
652                 detach_page_private(page);
653                 unlock_page(page);
654         }
655         return 0;
656 }
657
658 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
659 {
660         struct z_erofs_pcluster *pcl = folio_get_private(folio);
661         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
662         bool ret;
663         int i;
664
665         if (!folio_test_private(folio))
666                 return true;
667
668         ret = false;
669         spin_lock(&pcl->obj.lockref.lock);
670         if (pcl->obj.lockref.count > 0)
671                 goto out;
672
673         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
674         for (i = 0; i < pclusterpages; ++i) {
675                 if (pcl->compressed_bvecs[i].page == &folio->page) {
676                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
677                         ret = true;
678                         break;
679                 }
680         }
681         if (ret)
682                 folio_detach_private(folio);
683 out:
684         spin_unlock(&pcl->obj.lockref.lock);
685         return ret;
686 }
687
688 /*
689  * It will be called only on inode eviction. In case that there are still some
690  * decompression requests in progress, wait with rescheduling for a bit here.
691  * An extra lock could be introduced instead but it seems unnecessary.
692  */
693 static void z_erofs_cache_invalidate_folio(struct folio *folio,
694                                            size_t offset, size_t length)
695 {
696         const size_t stop = length + offset;
697
698         /* Check for potential overflow in debug mode */
699         DBG_BUGON(stop > folio_size(folio) || stop < length);
700
701         if (offset == 0 && stop == folio_size(folio))
702                 while (!z_erofs_cache_release_folio(folio, 0))
703                         cond_resched();
704 }
705
706 static const struct address_space_operations z_erofs_cache_aops = {
707         .release_folio = z_erofs_cache_release_folio,
708         .invalidate_folio = z_erofs_cache_invalidate_folio,
709 };
710
711 int erofs_init_managed_cache(struct super_block *sb)
712 {
713         struct inode *const inode = new_inode(sb);
714
715         if (!inode)
716                 return -ENOMEM;
717
718         set_nlink(inode, 1);
719         inode->i_size = OFFSET_MAX;
720         inode->i_mapping->a_ops = &z_erofs_cache_aops;
721         mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
722         EROFS_SB(sb)->managed_cache = inode;
723         return 0;
724 }
725
726 /* callers must be with pcluster lock held */
727 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
728                                struct z_erofs_bvec *bvec, bool exclusive)
729 {
730         struct z_erofs_pcluster *pcl = fe->pcl;
731         int ret;
732
733         if (exclusive) {
734                 /* give priority for inplaceio to use file pages first */
735                 spin_lock(&pcl->obj.lockref.lock);
736                 while (fe->icur > 0) {
737                         if (pcl->compressed_bvecs[--fe->icur].page)
738                                 continue;
739                         pcl->compressed_bvecs[fe->icur] = *bvec;
740                         spin_unlock(&pcl->obj.lockref.lock);
741                         return 0;
742                 }
743                 spin_unlock(&pcl->obj.lockref.lock);
744
745                 /* otherwise, check if it can be used as a bvpage */
746                 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
747                     !fe->candidate_bvpage)
748                         fe->candidate_bvpage = bvec->page;
749         }
750         ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
751                                    &fe->pagepool);
752         fe->pcl->vcnt += (ret >= 0);
753         return ret;
754 }
755
756 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
757 {
758         struct z_erofs_pcluster *pcl = f->pcl;
759         z_erofs_next_pcluster_t *owned_head = &f->owned_head;
760
761         /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
762         if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
763                     *owned_head) == Z_EROFS_PCLUSTER_NIL) {
764                 *owned_head = &pcl->next;
765                 /* so we can attach this pcluster to our submission chain. */
766                 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
767                 return;
768         }
769
770         /* type 2, it belongs to an ongoing chain */
771         f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
772 }
773
774 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
775 {
776         struct erofs_map_blocks *map = &fe->map;
777         struct super_block *sb = fe->inode->i_sb;
778         bool ztailpacking = map->m_flags & EROFS_MAP_META;
779         struct z_erofs_pcluster *pcl;
780         struct erofs_workgroup *grp;
781         int err;
782
783         if (!(map->m_flags & EROFS_MAP_ENCODED) ||
784             (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
785                 DBG_BUGON(1);
786                 return -EFSCORRUPTED;
787         }
788
789         /* no available pcluster, let's allocate one */
790         pcl = z_erofs_alloc_pcluster(map->m_plen);
791         if (IS_ERR(pcl))
792                 return PTR_ERR(pcl);
793
794         spin_lock_init(&pcl->obj.lockref.lock);
795         pcl->obj.lockref.count = 1;     /* one ref for this request */
796         pcl->algorithmformat = map->m_algorithmformat;
797         pcl->length = 0;
798         pcl->partial = true;
799
800         /* new pclusters should be claimed as type 1, primary and followed */
801         pcl->next = fe->owned_head;
802         pcl->pageofs_out = map->m_la & ~PAGE_MASK;
803         fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
804
805         /*
806          * lock all primary followed works before visible to others
807          * and mutex_trylock *never* fails for a new pcluster.
808          */
809         mutex_init(&pcl->lock);
810         DBG_BUGON(!mutex_trylock(&pcl->lock));
811
812         if (ztailpacking) {
813                 pcl->obj.index = 0;     /* which indicates ztailpacking */
814         } else {
815                 pcl->obj.index = erofs_blknr(sb, map->m_pa);
816
817                 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
818                 if (IS_ERR(grp)) {
819                         err = PTR_ERR(grp);
820                         goto err_out;
821                 }
822
823                 if (grp != &pcl->obj) {
824                         fe->pcl = container_of(grp,
825                                         struct z_erofs_pcluster, obj);
826                         err = -EEXIST;
827                         goto err_out;
828                 }
829         }
830         fe->owned_head = &pcl->next;
831         fe->pcl = pcl;
832         return 0;
833
834 err_out:
835         mutex_unlock(&pcl->lock);
836         z_erofs_free_pcluster(pcl);
837         return err;
838 }
839
840 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
841 {
842         struct erofs_map_blocks *map = &fe->map;
843         struct super_block *sb = fe->inode->i_sb;
844         erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
845         struct erofs_workgroup *grp = NULL;
846         int ret;
847
848         DBG_BUGON(fe->pcl);
849
850         /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
851         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
852
853         if (!(map->m_flags & EROFS_MAP_META)) {
854                 grp = erofs_find_workgroup(sb, blknr);
855         } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
856                 DBG_BUGON(1);
857                 return -EFSCORRUPTED;
858         }
859
860         if (grp) {
861                 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
862                 ret = -EEXIST;
863         } else {
864                 ret = z_erofs_register_pcluster(fe);
865         }
866
867         if (ret == -EEXIST) {
868                 mutex_lock(&fe->pcl->lock);
869                 z_erofs_try_to_claim_pcluster(fe);
870         } else if (ret) {
871                 return ret;
872         }
873
874         z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
875                                 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
876         if (!z_erofs_is_inline_pcluster(fe->pcl)) {
877                 /* bind cache first when cached decompression is preferred */
878                 z_erofs_bind_cache(fe);
879         } else {
880                 void *mptr;
881
882                 mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
883                 if (IS_ERR(mptr)) {
884                         ret = PTR_ERR(mptr);
885                         erofs_err(sb, "failed to get inline data %d", ret);
886                         return ret;
887                 }
888                 get_page(map->buf.page);
889                 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
890                 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
891                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
892         }
893         /* file-backed inplace I/O pages are traversed in reverse order */
894         fe->icur = z_erofs_pclusterpages(fe->pcl);
895         return 0;
896 }
897
898 /*
899  * keep in mind that no referenced pclusters will be freed
900  * only after a RCU grace period.
901  */
902 static void z_erofs_rcu_callback(struct rcu_head *head)
903 {
904         z_erofs_free_pcluster(container_of(head,
905                         struct z_erofs_pcluster, rcu));
906 }
907
908 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
909 {
910         struct z_erofs_pcluster *const pcl =
911                 container_of(grp, struct z_erofs_pcluster, obj);
912
913         call_rcu(&pcl->rcu, z_erofs_rcu_callback);
914 }
915
916 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
917 {
918         struct z_erofs_pcluster *pcl = fe->pcl;
919
920         if (!pcl)
921                 return;
922
923         z_erofs_bvec_iter_end(&fe->biter);
924         mutex_unlock(&pcl->lock);
925
926         if (fe->candidate_bvpage)
927                 fe->candidate_bvpage = NULL;
928
929         /*
930          * if all pending pages are added, don't hold its reference
931          * any longer if the pcluster isn't hosted by ourselves.
932          */
933         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
934                 erofs_workgroup_put(&pcl->obj);
935
936         fe->pcl = NULL;
937 }
938
939 static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
940                         unsigned int cur, unsigned int end, erofs_off_t pos)
941 {
942         struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
943         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
944         unsigned int cnt;
945         u8 *src;
946
947         if (!packed_inode)
948                 return -EFSCORRUPTED;
949
950         buf.inode = packed_inode;
951         for (; cur < end; cur += cnt, pos += cnt) {
952                 cnt = min_t(unsigned int, end - cur,
953                             sb->s_blocksize - erofs_blkoff(sb, pos));
954                 src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
955                 if (IS_ERR(src)) {
956                         erofs_put_metabuf(&buf);
957                         return PTR_ERR(src);
958                 }
959                 memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
960         }
961         erofs_put_metabuf(&buf);
962         return 0;
963 }
964
965 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
966                                 struct page *page, bool ra)
967 {
968         struct inode *const inode = fe->inode;
969         struct erofs_map_blocks *const map = &fe->map;
970         const loff_t offset = page_offset(page);
971         const unsigned int bs = i_blocksize(inode);
972         bool tight = true, exclusive;
973         unsigned int cur, end, len, split;
974         int err = 0;
975
976         z_erofs_onlinepage_init(page);
977         split = 0;
978         end = PAGE_SIZE;
979 repeat:
980         if (offset + end - 1 < map->m_la ||
981             offset + end - 1 >= map->m_la + map->m_llen) {
982                 z_erofs_pcluster_end(fe);
983                 map->m_la = offset + end - 1;
984                 map->m_llen = 0;
985                 err = z_erofs_map_blocks_iter(inode, map, 0);
986                 if (err)
987                         goto out;
988         }
989
990         cur = offset > map->m_la ? 0 : map->m_la - offset;
991         /* bump split parts first to avoid several separate cases */
992         ++split;
993
994         if (!(map->m_flags & EROFS_MAP_MAPPED)) {
995                 zero_user_segment(page, cur, end);
996                 tight = false;
997                 goto next_part;
998         }
999
1000         if (map->m_flags & EROFS_MAP_FRAGMENT) {
1001                 erofs_off_t fpos = offset + cur - map->m_la;
1002
1003                 len = min_t(unsigned int, map->m_llen - fpos, end - cur);
1004                 err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
1005                                 EROFS_I(inode)->z_fragmentoff + fpos);
1006                 if (err)
1007                         goto out;
1008                 tight = false;
1009                 goto next_part;
1010         }
1011
1012         if (!fe->pcl) {
1013                 err = z_erofs_pcluster_begin(fe);
1014                 if (err)
1015                         goto out;
1016                 fe->pcl->besteffort |= !ra;
1017         }
1018
1019         /*
1020          * Ensure the current partial page belongs to this submit chain rather
1021          * than other concurrent submit chains or the noio(bypass) chain since
1022          * those chains are handled asynchronously thus the page cannot be used
1023          * for inplace I/O or bvpage (should be processed in a strict order.)
1024          */
1025         tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
1026         exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE)));
1027         if (cur)
1028                 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1029
1030         err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
1031                                         .page = page,
1032                                         .offset = offset - map->m_la,
1033                                         .end = end,
1034                                   }), exclusive);
1035         if (err)
1036                 goto out;
1037
1038         z_erofs_onlinepage_split(page);
1039         if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1040                 fe->pcl->multibases = true;
1041         if (fe->pcl->length < offset + end - map->m_la) {
1042                 fe->pcl->length = offset + end - map->m_la;
1043                 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1044         }
1045         if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1046             !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1047             fe->pcl->length == map->m_llen)
1048                 fe->pcl->partial = false;
1049 next_part:
1050         /* shorten the remaining extent to update progress */
1051         map->m_llen = offset + cur - map->m_la;
1052         map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1053
1054         end = cur;
1055         if (end > 0)
1056                 goto repeat;
1057
1058 out:
1059         z_erofs_onlinepage_endio(page, err);
1060         return err;
1061 }
1062
1063 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1064                                        unsigned int readahead_pages)
1065 {
1066         /* auto: enable for read_folio, disable for readahead */
1067         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1068             !readahead_pages)
1069                 return true;
1070
1071         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1072             (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1073                 return true;
1074
1075         return false;
1076 }
1077
1078 static bool z_erofs_page_is_invalidated(struct page *page)
1079 {
1080         return !page->mapping && !z_erofs_is_shortlived_page(page);
1081 }
1082
1083 struct z_erofs_decompress_backend {
1084         struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1085         struct super_block *sb;
1086         struct z_erofs_pcluster *pcl;
1087
1088         /* pages with the longest decompressed length for deduplication */
1089         struct page **decompressed_pages;
1090         /* pages to keep the compressed data */
1091         struct page **compressed_pages;
1092
1093         struct list_head decompressed_secondary_bvecs;
1094         struct page **pagepool;
1095         unsigned int onstack_used, nr_pages;
1096 };
1097
1098 struct z_erofs_bvec_item {
1099         struct z_erofs_bvec bvec;
1100         struct list_head list;
1101 };
1102
1103 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1104                                          struct z_erofs_bvec *bvec)
1105 {
1106         struct z_erofs_bvec_item *item;
1107         unsigned int pgnr;
1108
1109         if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1110             (bvec->end == PAGE_SIZE ||
1111              bvec->offset + bvec->end == be->pcl->length)) {
1112                 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1113                 DBG_BUGON(pgnr >= be->nr_pages);
1114                 if (!be->decompressed_pages[pgnr]) {
1115                         be->decompressed_pages[pgnr] = bvec->page;
1116                         return;
1117                 }
1118         }
1119
1120         /* (cold path) one pcluster is requested multiple times */
1121         item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1122         item->bvec = *bvec;
1123         list_add(&item->list, &be->decompressed_secondary_bvecs);
1124 }
1125
1126 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1127                                       int err)
1128 {
1129         unsigned int off0 = be->pcl->pageofs_out;
1130         struct list_head *p, *n;
1131
1132         list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1133                 struct z_erofs_bvec_item *bvi;
1134                 unsigned int end, cur;
1135                 void *dst, *src;
1136
1137                 bvi = container_of(p, struct z_erofs_bvec_item, list);
1138                 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1139                 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1140                             bvi->bvec.end);
1141                 dst = kmap_local_page(bvi->bvec.page);
1142                 while (cur < end) {
1143                         unsigned int pgnr, scur, len;
1144
1145                         pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1146                         DBG_BUGON(pgnr >= be->nr_pages);
1147
1148                         scur = bvi->bvec.offset + cur -
1149                                         ((pgnr << PAGE_SHIFT) - off0);
1150                         len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1151                         if (!be->decompressed_pages[pgnr]) {
1152                                 err = -EFSCORRUPTED;
1153                                 cur += len;
1154                                 continue;
1155                         }
1156                         src = kmap_local_page(be->decompressed_pages[pgnr]);
1157                         memcpy(dst + cur, src + scur, len);
1158                         kunmap_local(src);
1159                         cur += len;
1160                 }
1161                 kunmap_local(dst);
1162                 z_erofs_onlinepage_endio(bvi->bvec.page, err);
1163                 list_del(p);
1164                 kfree(bvi);
1165         }
1166 }
1167
1168 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1169 {
1170         struct z_erofs_pcluster *pcl = be->pcl;
1171         struct z_erofs_bvec_iter biter;
1172         struct page *old_bvpage;
1173         int i;
1174
1175         z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1176         for (i = 0; i < pcl->vcnt; ++i) {
1177                 struct z_erofs_bvec bvec;
1178
1179                 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1180
1181                 if (old_bvpage)
1182                         z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1183
1184                 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1185                 z_erofs_do_decompressed_bvec(be, &bvec);
1186         }
1187
1188         old_bvpage = z_erofs_bvec_iter_end(&biter);
1189         if (old_bvpage)
1190                 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1191 }
1192
1193 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1194                                   bool *overlapped)
1195 {
1196         struct z_erofs_pcluster *pcl = be->pcl;
1197         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1198         int i, err = 0;
1199
1200         *overlapped = false;
1201         for (i = 0; i < pclusterpages; ++i) {
1202                 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1203                 struct page *page = bvec->page;
1204
1205                 /* compressed data ought to be valid before decompressing */
1206                 if (!page) {
1207                         err = -EIO;
1208                         continue;
1209                 }
1210                 be->compressed_pages[i] = page;
1211
1212                 if (z_erofs_is_inline_pcluster(pcl) ||
1213                     erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1214                         if (!PageUptodate(page))
1215                                 err = -EIO;
1216                         continue;
1217                 }
1218
1219                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1220                 if (z_erofs_is_shortlived_page(page))
1221                         continue;
1222                 z_erofs_do_decompressed_bvec(be, bvec);
1223                 *overlapped = true;
1224         }
1225         return err;
1226 }
1227
1228 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1229                                        int err)
1230 {
1231         struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1232         struct z_erofs_pcluster *pcl = be->pcl;
1233         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1234         const struct z_erofs_decompressor *decomp =
1235                                 &erofs_decompressors[pcl->algorithmformat];
1236         int i, err2;
1237         struct page *page;
1238         bool overlapped;
1239
1240         mutex_lock(&pcl->lock);
1241         be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1242
1243         /* allocate (de)compressed page arrays if cannot be kept on stack */
1244         be->decompressed_pages = NULL;
1245         be->compressed_pages = NULL;
1246         be->onstack_used = 0;
1247         if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1248                 be->decompressed_pages = be->onstack_pages;
1249                 be->onstack_used = be->nr_pages;
1250                 memset(be->decompressed_pages, 0,
1251                        sizeof(struct page *) * be->nr_pages);
1252         }
1253
1254         if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1255                 be->compressed_pages = be->onstack_pages + be->onstack_used;
1256
1257         if (!be->decompressed_pages)
1258                 be->decompressed_pages =
1259                         kvcalloc(be->nr_pages, sizeof(struct page *),
1260                                  GFP_KERNEL | __GFP_NOFAIL);
1261         if (!be->compressed_pages)
1262                 be->compressed_pages =
1263                         kvcalloc(pclusterpages, sizeof(struct page *),
1264                                  GFP_KERNEL | __GFP_NOFAIL);
1265
1266         z_erofs_parse_out_bvecs(be);
1267         err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1268         if (err2)
1269                 err = err2;
1270         if (!err)
1271                 err = decomp->decompress(&(struct z_erofs_decompress_req) {
1272                                         .sb = be->sb,
1273                                         .in = be->compressed_pages,
1274                                         .out = be->decompressed_pages,
1275                                         .pageofs_in = pcl->pageofs_in,
1276                                         .pageofs_out = pcl->pageofs_out,
1277                                         .inputsize = pcl->pclustersize,
1278                                         .outputsize = pcl->length,
1279                                         .alg = pcl->algorithmformat,
1280                                         .inplace_io = overlapped,
1281                                         .partial_decoding = pcl->partial,
1282                                         .fillgaps = pcl->multibases,
1283                                         .gfp = pcl->besteffort ?
1284                                                 GFP_KERNEL | __GFP_NOFAIL :
1285                                                 GFP_NOWAIT | __GFP_NORETRY
1286                                  }, be->pagepool);
1287
1288         /* must handle all compressed pages before actual file pages */
1289         if (z_erofs_is_inline_pcluster(pcl)) {
1290                 page = pcl->compressed_bvecs[0].page;
1291                 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1292                 put_page(page);
1293         } else {
1294                 for (i = 0; i < pclusterpages; ++i) {
1295                         /* consider shortlived pages added when decompressing */
1296                         page = be->compressed_pages[i];
1297
1298                         if (!page || erofs_page_is_managed(sbi, page))
1299                                 continue;
1300                         (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1301                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1302                 }
1303         }
1304         if (be->compressed_pages < be->onstack_pages ||
1305             be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1306                 kvfree(be->compressed_pages);
1307         z_erofs_fill_other_copies(be, err);
1308
1309         for (i = 0; i < be->nr_pages; ++i) {
1310                 page = be->decompressed_pages[i];
1311                 if (!page)
1312                         continue;
1313
1314                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1315
1316                 /* recycle all individual short-lived pages */
1317                 if (z_erofs_put_shortlivedpage(be->pagepool, page))
1318                         continue;
1319                 z_erofs_onlinepage_endio(page, err);
1320         }
1321
1322         if (be->decompressed_pages != be->onstack_pages)
1323                 kvfree(be->decompressed_pages);
1324
1325         pcl->length = 0;
1326         pcl->partial = true;
1327         pcl->multibases = false;
1328         pcl->besteffort = false;
1329         pcl->bvset.nextpage = NULL;
1330         pcl->vcnt = 0;
1331
1332         /* pcluster lock MUST be taken before the following line */
1333         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1334         mutex_unlock(&pcl->lock);
1335         return err;
1336 }
1337
1338 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1339                                      struct page **pagepool)
1340 {
1341         struct z_erofs_decompress_backend be = {
1342                 .sb = io->sb,
1343                 .pagepool = pagepool,
1344                 .decompressed_secondary_bvecs =
1345                         LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1346         };
1347         z_erofs_next_pcluster_t owned = io->head;
1348
1349         while (owned != Z_EROFS_PCLUSTER_TAIL) {
1350                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1351
1352                 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1353                 owned = READ_ONCE(be.pcl->next);
1354
1355                 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1356                 if (z_erofs_is_inline_pcluster(be.pcl))
1357                         z_erofs_free_pcluster(be.pcl);
1358                 else
1359                         erofs_workgroup_put(&be.pcl->obj);
1360         }
1361 }
1362
1363 static void z_erofs_decompressqueue_work(struct work_struct *work)
1364 {
1365         struct z_erofs_decompressqueue *bgq =
1366                 container_of(work, struct z_erofs_decompressqueue, u.work);
1367         struct page *pagepool = NULL;
1368
1369         DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1370         z_erofs_decompress_queue(bgq, &pagepool);
1371         erofs_release_pages(&pagepool);
1372         kvfree(bgq);
1373 }
1374
1375 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1376 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1377 {
1378         z_erofs_decompressqueue_work((struct work_struct *)work);
1379 }
1380 #endif
1381
1382 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1383                                        int bios)
1384 {
1385         struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1386
1387         /* wake up the caller thread for sync decompression */
1388         if (io->sync) {
1389                 if (!atomic_add_return(bios, &io->pending_bios))
1390                         complete(&io->u.done);
1391                 return;
1392         }
1393
1394         if (atomic_add_return(bios, &io->pending_bios))
1395                 return;
1396         /* Use (kthread_)work and sync decompression for atomic contexts only */
1397         if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1398 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1399                 struct kthread_worker *worker;
1400
1401                 rcu_read_lock();
1402                 worker = rcu_dereference(
1403                                 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1404                 if (!worker) {
1405                         INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1406                         queue_work(z_erofs_workqueue, &io->u.work);
1407                 } else {
1408                         kthread_queue_work(worker, &io->u.kthread_work);
1409                 }
1410                 rcu_read_unlock();
1411 #else
1412                 queue_work(z_erofs_workqueue, &io->u.work);
1413 #endif
1414                 /* enable sync decompression for readahead */
1415                 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1416                         sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1417                 return;
1418         }
1419         z_erofs_decompressqueue_work(&io->u.work);
1420 }
1421
1422 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1423                                  struct z_erofs_decompress_frontend *f,
1424                                  struct z_erofs_pcluster *pcl,
1425                                  unsigned int nr,
1426                                  struct address_space *mc)
1427 {
1428         gfp_t gfp = mapping_gfp_mask(mc);
1429         bool tocache = false;
1430         struct z_erofs_bvec zbv;
1431         struct address_space *mapping;
1432         struct page *page;
1433         int justfound, bs = i_blocksize(f->inode);
1434
1435         /* Except for inplace pages, the entire page can be used for I/Os */
1436         bvec->bv_offset = 0;
1437         bvec->bv_len = PAGE_SIZE;
1438 repeat:
1439         spin_lock(&pcl->obj.lockref.lock);
1440         zbv = pcl->compressed_bvecs[nr];
1441         page = zbv.page;
1442         justfound = (unsigned long)page & 1UL;
1443         page = (struct page *)((unsigned long)page & ~1UL);
1444         pcl->compressed_bvecs[nr].page = page;
1445         spin_unlock(&pcl->obj.lockref.lock);
1446         if (!page)
1447                 goto out_allocpage;
1448
1449         bvec->bv_page = page;
1450         DBG_BUGON(z_erofs_is_shortlived_page(page));
1451         /*
1452          * Handle preallocated cached pages.  We tried to allocate such pages
1453          * without triggering direct reclaim.  If allocation failed, inplace
1454          * file-backed pages will be used instead.
1455          */
1456         if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1457                 set_page_private(page, 0);
1458                 tocache = true;
1459                 goto out_tocache;
1460         }
1461
1462         mapping = READ_ONCE(page->mapping);
1463         /*
1464          * File-backed pages for inplace I/Os are all locked steady,
1465          * therefore it is impossible for `mapping` to be NULL.
1466          */
1467         if (mapping && mapping != mc) {
1468                 if (zbv.offset < 0)
1469                         bvec->bv_offset = round_up(-zbv.offset, bs);
1470                 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1471                 return;
1472         }
1473
1474         lock_page(page);
1475         /* only true if page reclaim goes wrong, should never happen */
1476         DBG_BUGON(justfound && PagePrivate(page));
1477
1478         /* the cached page is still in managed cache */
1479         if (page->mapping == mc) {
1480                 /*
1481                  * The cached page is still available but without a valid
1482                  * `->private` pcluster hint.  Let's reconnect them.
1483                  */
1484                 if (!PagePrivate(page)) {
1485                         DBG_BUGON(!justfound);
1486                         /* compressed_bvecs[] already takes a ref */
1487                         attach_page_private(page, pcl);
1488                         put_page(page);
1489                 }
1490
1491                 /* no need to submit if it is already up-to-date */
1492                 if (PageUptodate(page)) {
1493                         unlock_page(page);
1494                         bvec->bv_page = NULL;
1495                 }
1496                 return;
1497         }
1498
1499         /*
1500          * It has been truncated, so it's unsafe to reuse this one. Let's
1501          * allocate a new page for compressed data.
1502          */
1503         DBG_BUGON(page->mapping);
1504         DBG_BUGON(!justfound);
1505
1506         tocache = true;
1507         unlock_page(page);
1508         put_page(page);
1509 out_allocpage:
1510         page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
1511         spin_lock(&pcl->obj.lockref.lock);
1512         if (pcl->compressed_bvecs[nr].page) {
1513                 erofs_pagepool_add(&f->pagepool, page);
1514                 spin_unlock(&pcl->obj.lockref.lock);
1515                 cond_resched();
1516                 goto repeat;
1517         }
1518         pcl->compressed_bvecs[nr].page = page;
1519         spin_unlock(&pcl->obj.lockref.lock);
1520         bvec->bv_page = page;
1521 out_tocache:
1522         if (!tocache || bs != PAGE_SIZE ||
1523             add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) {
1524                 /* turn into a temporary shortlived page (1 ref) */
1525                 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1526                 return;
1527         }
1528         attach_page_private(page, pcl);
1529         /* drop a refcount added by allocpage (then 2 refs in total here) */
1530         put_page(page);
1531 }
1532
1533 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1534                               struct z_erofs_decompressqueue *fgq, bool *fg)
1535 {
1536         struct z_erofs_decompressqueue *q;
1537
1538         if (fg && !*fg) {
1539                 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1540                 if (!q) {
1541                         *fg = true;
1542                         goto fg_out;
1543                 }
1544 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1545                 kthread_init_work(&q->u.kthread_work,
1546                                   z_erofs_decompressqueue_kthread_work);
1547 #else
1548                 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1549 #endif
1550         } else {
1551 fg_out:
1552                 q = fgq;
1553                 init_completion(&fgq->u.done);
1554                 atomic_set(&fgq->pending_bios, 0);
1555                 q->eio = false;
1556                 q->sync = true;
1557         }
1558         q->sb = sb;
1559         q->head = Z_EROFS_PCLUSTER_TAIL;
1560         return q;
1561 }
1562
1563 /* define decompression jobqueue types */
1564 enum {
1565         JQ_BYPASS,
1566         JQ_SUBMIT,
1567         NR_JOBQUEUES,
1568 };
1569
1570 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1571                                     z_erofs_next_pcluster_t qtail[],
1572                                     z_erofs_next_pcluster_t owned_head)
1573 {
1574         z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1575         z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1576
1577         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1578
1579         WRITE_ONCE(*submit_qtail, owned_head);
1580         WRITE_ONCE(*bypass_qtail, &pcl->next);
1581
1582         qtail[JQ_BYPASS] = &pcl->next;
1583 }
1584
1585 static void z_erofs_submissionqueue_endio(struct bio *bio)
1586 {
1587         struct z_erofs_decompressqueue *q = bio->bi_private;
1588         blk_status_t err = bio->bi_status;
1589         struct bio_vec *bvec;
1590         struct bvec_iter_all iter_all;
1591
1592         bio_for_each_segment_all(bvec, bio, iter_all) {
1593                 struct page *page = bvec->bv_page;
1594
1595                 DBG_BUGON(PageUptodate(page));
1596                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1597                 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1598                         if (!err)
1599                                 SetPageUptodate(page);
1600                         unlock_page(page);
1601                 }
1602         }
1603         if (err)
1604                 q->eio = true;
1605         z_erofs_decompress_kickoff(q, -1);
1606         bio_put(bio);
1607 }
1608
1609 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1610                                  struct z_erofs_decompressqueue *fgq,
1611                                  bool *force_fg, bool readahead)
1612 {
1613         struct super_block *sb = f->inode->i_sb;
1614         struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1615         z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1616         struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1617         z_erofs_next_pcluster_t owned_head = f->owned_head;
1618         /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1619         erofs_off_t last_pa;
1620         struct block_device *last_bdev;
1621         unsigned int nr_bios = 0;
1622         struct bio *bio = NULL;
1623         unsigned long pflags;
1624         int memstall = 0;
1625
1626         /* No need to read from device for pclusters in the bypass queue. */
1627         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1628         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1629
1630         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1631         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1632
1633         /* by default, all need io submission */
1634         q[JQ_SUBMIT]->head = owned_head;
1635
1636         do {
1637                 struct erofs_map_dev mdev;
1638                 struct z_erofs_pcluster *pcl;
1639                 erofs_off_t cur, end;
1640                 struct bio_vec bvec;
1641                 unsigned int i = 0;
1642                 bool bypass = true;
1643
1644                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1645                 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1646                 owned_head = READ_ONCE(pcl->next);
1647
1648                 if (z_erofs_is_inline_pcluster(pcl)) {
1649                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1650                         continue;
1651                 }
1652
1653                 /* no device id here, thus it will always succeed */
1654                 mdev = (struct erofs_map_dev) {
1655                         .m_pa = erofs_pos(sb, pcl->obj.index),
1656                 };
1657                 (void)erofs_map_dev(sb, &mdev);
1658
1659                 cur = mdev.m_pa;
1660                 end = cur + pcl->pclustersize;
1661                 do {
1662                         z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1663                         if (!bvec.bv_page)
1664                                 continue;
1665
1666                         if (bio && (cur != last_pa ||
1667                                     last_bdev != mdev.m_bdev)) {
1668 submit_bio_retry:
1669                                 submit_bio(bio);
1670                                 if (memstall) {
1671                                         psi_memstall_leave(&pflags);
1672                                         memstall = 0;
1673                                 }
1674                                 bio = NULL;
1675                         }
1676
1677                         if (unlikely(PageWorkingset(bvec.bv_page)) &&
1678                             !memstall) {
1679                                 psi_memstall_enter(&pflags);
1680                                 memstall = 1;
1681                         }
1682
1683                         if (!bio) {
1684                                 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1685                                                 REQ_OP_READ, GFP_NOIO);
1686                                 bio->bi_end_io = z_erofs_submissionqueue_endio;
1687                                 bio->bi_iter.bi_sector = cur >> 9;
1688                                 bio->bi_private = q[JQ_SUBMIT];
1689                                 if (readahead)
1690                                         bio->bi_opf |= REQ_RAHEAD;
1691                                 ++nr_bios;
1692                                 last_bdev = mdev.m_bdev;
1693                         }
1694
1695                         if (cur + bvec.bv_len > end)
1696                                 bvec.bv_len = end - cur;
1697                         DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1698                         if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1699                                           bvec.bv_offset))
1700                                 goto submit_bio_retry;
1701
1702                         last_pa = cur + bvec.bv_len;
1703                         bypass = false;
1704                 } while ((cur += bvec.bv_len) < end);
1705
1706                 if (!bypass)
1707                         qtail[JQ_SUBMIT] = &pcl->next;
1708                 else
1709                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1710         } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1711
1712         if (bio) {
1713                 submit_bio(bio);
1714                 if (memstall)
1715                         psi_memstall_leave(&pflags);
1716         }
1717
1718         /*
1719          * although background is preferred, no one is pending for submission.
1720          * don't issue decompression but drop it directly instead.
1721          */
1722         if (!*force_fg && !nr_bios) {
1723                 kvfree(q[JQ_SUBMIT]);
1724                 return;
1725         }
1726         z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1727 }
1728
1729 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1730                              bool force_fg, bool ra)
1731 {
1732         struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1733
1734         if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1735                 return;
1736         z_erofs_submit_queue(f, io, &force_fg, ra);
1737
1738         /* handle bypass queue (no i/o pclusters) immediately */
1739         z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1740
1741         if (!force_fg)
1742                 return;
1743
1744         /* wait until all bios are completed */
1745         wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1746
1747         /* handle synchronous decompress queue in the caller context */
1748         z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1749 }
1750
1751 /*
1752  * Since partial uptodate is still unimplemented for now, we have to use
1753  * approximate readmore strategies as a start.
1754  */
1755 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1756                 struct readahead_control *rac, bool backmost)
1757 {
1758         struct inode *inode = f->inode;
1759         struct erofs_map_blocks *map = &f->map;
1760         erofs_off_t cur, end, headoffset = f->headoffset;
1761         int err;
1762
1763         if (backmost) {
1764                 if (rac)
1765                         end = headoffset + readahead_length(rac) - 1;
1766                 else
1767                         end = headoffset + PAGE_SIZE - 1;
1768                 map->m_la = end;
1769                 err = z_erofs_map_blocks_iter(inode, map,
1770                                               EROFS_GET_BLOCKS_READMORE);
1771                 if (err)
1772                         return;
1773
1774                 /* expand ra for the trailing edge if readahead */
1775                 if (rac) {
1776                         cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1777                         readahead_expand(rac, headoffset, cur - headoffset);
1778                         return;
1779                 }
1780                 end = round_up(end, PAGE_SIZE);
1781         } else {
1782                 end = round_up(map->m_la, PAGE_SIZE);
1783
1784                 if (!map->m_llen)
1785                         return;
1786         }
1787
1788         cur = map->m_la + map->m_llen - 1;
1789         while ((cur >= end) && (cur < i_size_read(inode))) {
1790                 pgoff_t index = cur >> PAGE_SHIFT;
1791                 struct page *page;
1792
1793                 page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1794                 if (page) {
1795                         if (PageUptodate(page))
1796                                 unlock_page(page);
1797                         else
1798                                 (void)z_erofs_do_read_page(f, page, !!rac);
1799                         put_page(page);
1800                 }
1801
1802                 if (cur < PAGE_SIZE)
1803                         break;
1804                 cur = (index << PAGE_SHIFT) - 1;
1805         }
1806 }
1807
1808 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1809 {
1810         struct inode *const inode = folio->mapping->host;
1811         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1812         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1813         int err;
1814
1815         trace_erofs_read_folio(folio, false);
1816         f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1817
1818         z_erofs_pcluster_readmore(&f, NULL, true);
1819         err = z_erofs_do_read_page(&f, &folio->page, false);
1820         z_erofs_pcluster_readmore(&f, NULL, false);
1821         z_erofs_pcluster_end(&f);
1822
1823         /* if some compressed cluster ready, need submit them anyway */
1824         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1825
1826         if (err && err != -EINTR)
1827                 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1828                           err, folio->index, EROFS_I(inode)->nid);
1829
1830         erofs_put_metabuf(&f.map.buf);
1831         erofs_release_pages(&f.pagepool);
1832         return err;
1833 }
1834
1835 static void z_erofs_readahead(struct readahead_control *rac)
1836 {
1837         struct inode *const inode = rac->mapping->host;
1838         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1839         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1840         struct folio *head = NULL, *folio;
1841         unsigned int nr_folios;
1842         int err;
1843
1844         f.headoffset = readahead_pos(rac);
1845
1846         z_erofs_pcluster_readmore(&f, rac, true);
1847         nr_folios = readahead_count(rac);
1848         trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1849
1850         while ((folio = readahead_folio(rac))) {
1851                 folio->private = head;
1852                 head = folio;
1853         }
1854
1855         /* traverse in reverse order for best metadata I/O performance */
1856         while (head) {
1857                 folio = head;
1858                 head = folio_get_private(folio);
1859
1860                 err = z_erofs_do_read_page(&f, &folio->page, true);
1861                 if (err && err != -EINTR)
1862                         erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
1863                                   folio->index, EROFS_I(inode)->nid);
1864         }
1865         z_erofs_pcluster_readmore(&f, rac, false);
1866         z_erofs_pcluster_end(&f);
1867
1868         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
1869         erofs_put_metabuf(&f.map.buf);
1870         erofs_release_pages(&f.pagepool);
1871 }
1872
1873 const struct address_space_operations z_erofs_aops = {
1874         .read_folio = z_erofs_read_folio,
1875         .readahead = z_erofs_readahead,
1876 };