1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2022, Alibaba Cloud
5 #include <linux/fscache.h>
8 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9 loff_t start, size_t len)
11 struct netfs_io_request *rreq;
13 rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
15 return ERR_PTR(-ENOMEM);
19 rreq->mapping = mapping;
20 rreq->inode = mapping->host;
21 INIT_LIST_HEAD(&rreq->subrequests);
22 refcount_set(&rreq->ref, 1);
26 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
28 if (!refcount_dec_and_test(&rreq->ref))
30 if (rreq->cache_resources.ops)
31 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
35 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
37 if (!refcount_dec_and_test(&subreq->ref))
39 erofs_fscache_put_request(subreq->rreq);
43 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
45 struct netfs_io_subrequest *subreq;
47 while (!list_empty(&rreq->subrequests)) {
48 subreq = list_first_entry(&rreq->subrequests,
49 struct netfs_io_subrequest, rreq_link);
50 list_del(&subreq->rreq_link);
51 erofs_fscache_put_subrequest(subreq);
55 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
57 struct netfs_io_subrequest *subreq;
59 unsigned int iopos = 0;
60 pgoff_t start_page = rreq->start / PAGE_SIZE;
61 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
62 bool subreq_failed = false;
64 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
66 subreq = list_first_entry(&rreq->subrequests,
67 struct netfs_io_subrequest, rreq_link);
68 subreq_failed = (subreq->error < 0);
71 xas_for_each(&xas, folio, last_page) {
73 (folio_index(folio) - start_page) * PAGE_SIZE;
74 unsigned int pgend = pgpos + folio_size(folio);
75 bool pg_failed = false;
83 pg_failed |= subreq_failed;
84 if (pgend < iopos + subreq->len)
88 if (!list_is_last(&subreq->rreq_link,
89 &rreq->subrequests)) {
90 subreq = list_next_entry(subreq, rreq_link);
91 subreq_failed = (subreq->error < 0);
94 subreq_failed = false;
101 folio_mark_uptodate(folio);
108 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
110 erofs_fscache_rreq_unlock_folios(rreq);
111 erofs_fscache_clear_subrequests(rreq);
112 erofs_fscache_put_request(rreq);
115 static void erofc_fscache_subreq_complete(void *priv,
116 ssize_t transferred_or_error, bool was_async)
118 struct netfs_io_subrequest *subreq = priv;
119 struct netfs_io_request *rreq = subreq->rreq;
121 if (IS_ERR_VALUE(transferred_or_error))
122 subreq->error = transferred_or_error;
124 if (atomic_dec_and_test(&rreq->nr_outstanding))
125 erofs_fscache_rreq_complete(rreq);
127 erofs_fscache_put_subrequest(subreq);
131 * Read data from fscache and fill the read data into page cache described by
132 * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
133 * the start physical address in the cache file.
135 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
136 struct netfs_io_request *rreq, loff_t pstart)
138 enum netfs_io_source source;
139 struct super_block *sb = rreq->mapping->host->i_sb;
140 struct netfs_io_subrequest *subreq;
141 struct netfs_cache_resources *cres = &rreq->cache_resources;
142 struct iov_iter iter;
143 loff_t start = rreq->start;
144 size_t len = rreq->len;
148 atomic_set(&rreq->nr_outstanding, 1);
150 ret = fscache_begin_read_operation(cres, cookie);
155 subreq = kzalloc(sizeof(struct netfs_io_subrequest),
158 INIT_LIST_HEAD(&subreq->rreq_link);
159 refcount_set(&subreq->ref, 2);
161 refcount_inc(&rreq->ref);
167 subreq->start = pstart + done;
168 subreq->len = len - done;
169 subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
171 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
173 source = cres->ops->prepare_read(subreq, LLONG_MAX);
174 if (WARN_ON(subreq->len == 0))
175 source = NETFS_INVALID_READ;
176 if (source != NETFS_READ_FROM_CACHE) {
177 erofs_err(sb, "failed to fscache prepare_read (source %d)",
181 erofs_fscache_put_subrequest(subreq);
185 atomic_inc(&rreq->nr_outstanding);
187 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
188 start + done, subreq->len);
190 ret = fscache_read(cres, subreq->start, &iter,
191 NETFS_READ_HOLE_FAIL,
192 erofc_fscache_subreq_complete, subreq);
193 if (ret == -EIOCBQUEUED)
196 erofs_err(sb, "failed to fscache_read (ret %d)", ret);
203 if (atomic_dec_and_test(&rreq->nr_outstanding))
204 erofs_fscache_rreq_complete(rreq);
209 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
212 struct super_block *sb = folio_mapping(folio)->host->i_sb;
213 struct netfs_io_request *rreq;
214 struct erofs_map_dev mdev = {
216 .m_pa = folio_pos(folio),
219 ret = erofs_map_dev(sb, &mdev);
223 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224 folio_pos(folio), folio_size(folio));
230 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
237 static int erofs_fscache_read_folio_inline(struct folio *folio,
238 struct erofs_map_blocks *map)
240 struct super_block *sb = folio_mapping(folio)->host->i_sb;
241 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
246 /* For tail packing layout, the offset may be non-zero. */
247 offset = erofs_blkoff(map->m_pa);
248 blknr = erofs_blknr(map->m_pa);
251 src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
255 dst = kmap_local_folio(folio, 0);
256 memcpy(dst, src + offset, len);
257 memset(dst + len, 0, PAGE_SIZE - len);
260 erofs_put_metabuf(&buf);
264 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
266 struct inode *inode = folio_mapping(folio)->host;
267 struct super_block *sb = inode->i_sb;
268 struct erofs_map_blocks map;
269 struct erofs_map_dev mdev;
270 struct netfs_io_request *rreq;
275 DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
277 pos = folio_pos(folio);
280 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
284 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
285 folio_zero_range(folio, 0, folio_size(folio));
289 if (map.m_flags & EROFS_MAP_META) {
290 ret = erofs_fscache_read_folio_inline(folio, &map);
294 mdev = (struct erofs_map_dev) {
295 .m_deviceid = map.m_deviceid,
299 ret = erofs_map_dev(sb, &mdev);
304 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
305 folio_pos(folio), folio_size(folio));
311 pstart = mdev.m_pa + (pos - map.m_la);
312 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
317 folio_mark_uptodate(folio);
323 static void erofs_fscache_advance_folios(struct readahead_control *rac,
324 size_t len, bool unlock)
327 struct folio *folio = readahead_folio(rac);
328 len -= folio_size(folio);
330 folio_mark_uptodate(folio);
336 static void erofs_fscache_readahead(struct readahead_control *rac)
338 struct inode *inode = rac->mapping->host;
339 struct super_block *sb = inode->i_sb;
340 size_t len, count, done = 0;
342 loff_t start, offset;
345 if (!readahead_count(rac))
348 start = readahead_pos(rac);
349 len = readahead_length(rac);
352 struct erofs_map_blocks map;
353 struct erofs_map_dev mdev;
354 struct netfs_io_request *rreq;
359 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
363 offset = start + done;
364 count = min_t(size_t, map.m_llen - (pos - map.m_la),
367 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
368 struct iov_iter iter;
370 iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
372 iov_iter_zero(count, &iter);
374 erofs_fscache_advance_folios(rac, count, true);
379 if (map.m_flags & EROFS_MAP_META) {
380 struct folio *folio = readahead_folio(rac);
382 ret = erofs_fscache_read_folio_inline(folio, &map);
384 folio_mark_uptodate(folio);
385 ret = folio_size(folio);
392 mdev = (struct erofs_map_dev) {
393 .m_deviceid = map.m_deviceid,
396 ret = erofs_map_dev(sb, &mdev);
400 rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
404 * Drop the ref of folios here. Unlock them in
405 * rreq_unlock_folios() when rreq complete.
407 erofs_fscache_advance_folios(rac, count, false);
408 ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
409 rreq, mdev.m_pa + (pos - map.m_la));
412 } while (ret > 0 && ((done += ret) < len));
415 static const struct address_space_operations erofs_fscache_meta_aops = {
416 .read_folio = erofs_fscache_meta_read_folio,
419 const struct address_space_operations erofs_fscache_access_aops = {
420 .read_folio = erofs_fscache_read_folio,
421 .readahead = erofs_fscache_readahead,
424 int erofs_fscache_register_cookie(struct super_block *sb,
425 struct erofs_fscache **fscache,
426 char *name, bool need_inode)
428 struct fscache_volume *volume = EROFS_SB(sb)->volume;
429 struct erofs_fscache *ctx;
430 struct fscache_cookie *cookie;
433 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
437 cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
438 name, strlen(name), NULL, 0, 0);
440 erofs_err(sb, "failed to get cookie for %s", name);
445 fscache_use_cookie(cookie, false);
446 ctx->cookie = cookie;
449 struct inode *const inode = new_inode(sb);
452 erofs_err(sb, "failed to get anon inode for %s", name);
458 inode->i_size = OFFSET_MAX;
459 inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
460 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
469 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
470 fscache_relinquish_cookie(ctx->cookie, false);
477 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
479 struct erofs_fscache *ctx = *fscache;
484 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
485 fscache_relinquish_cookie(ctx->cookie, false);
495 int erofs_fscache_register_fs(struct super_block *sb)
497 struct erofs_sb_info *sbi = EROFS_SB(sb);
498 struct fscache_volume *volume;
502 name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
506 volume = fscache_acquire_volume(name, NULL, NULL, 0);
507 if (IS_ERR_OR_NULL(volume)) {
508 erofs_err(sb, "failed to register volume for %s", name);
509 ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
513 sbi->volume = volume;
518 void erofs_fscache_unregister_fs(struct super_block *sb)
520 struct erofs_sb_info *sbi = EROFS_SB(sb);
522 fscache_relinquish_volume(sbi->volume, NULL, false);