1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2022, Alibaba Cloud
5 #include <linux/fscache.h>
8 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9 loff_t start, size_t len)
11 struct netfs_io_request *rreq;
13 rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
15 return ERR_PTR(-ENOMEM);
19 rreq->mapping = mapping;
20 rreq->inode = mapping->host;
21 INIT_LIST_HEAD(&rreq->subrequests);
22 refcount_set(&rreq->ref, 1);
26 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
28 if (!refcount_dec_and_test(&rreq->ref))
30 if (rreq->cache_resources.ops)
31 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
35 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
37 if (!refcount_dec_and_test(&subreq->ref))
39 erofs_fscache_put_request(subreq->rreq);
43 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
45 struct netfs_io_subrequest *subreq;
47 while (!list_empty(&rreq->subrequests)) {
48 subreq = list_first_entry(&rreq->subrequests,
49 struct netfs_io_subrequest, rreq_link);
50 list_del(&subreq->rreq_link);
51 erofs_fscache_put_subrequest(subreq);
55 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
57 struct netfs_io_subrequest *subreq;
59 unsigned int iopos = 0;
60 pgoff_t start_page = rreq->start / PAGE_SIZE;
61 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
62 bool subreq_failed = false;
64 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
66 subreq = list_first_entry(&rreq->subrequests,
67 struct netfs_io_subrequest, rreq_link);
68 subreq_failed = (subreq->error < 0);
71 xas_for_each(&xas, folio, last_page) {
73 (folio_index(folio) - start_page) * PAGE_SIZE;
74 unsigned int pgend = pgpos + folio_size(folio);
75 bool pg_failed = false;
83 pg_failed |= subreq_failed;
84 if (pgend < iopos + subreq->len)
88 if (!list_is_last(&subreq->rreq_link,
89 &rreq->subrequests)) {
90 subreq = list_next_entry(subreq, rreq_link);
91 subreq_failed = (subreq->error < 0);
94 subreq_failed = false;
101 folio_mark_uptodate(folio);
108 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
110 erofs_fscache_rreq_unlock_folios(rreq);
111 erofs_fscache_clear_subrequests(rreq);
112 erofs_fscache_put_request(rreq);
115 static void erofc_fscache_subreq_complete(void *priv,
116 ssize_t transferred_or_error, bool was_async)
118 struct netfs_io_subrequest *subreq = priv;
119 struct netfs_io_request *rreq = subreq->rreq;
121 if (IS_ERR_VALUE(transferred_or_error))
122 subreq->error = transferred_or_error;
124 if (atomic_dec_and_test(&rreq->nr_outstanding))
125 erofs_fscache_rreq_complete(rreq);
127 erofs_fscache_put_subrequest(subreq);
131 * Read data from fscache and fill the read data into page cache described by
132 * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
133 * the start physical address in the cache file.
135 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
136 struct netfs_io_request *rreq, loff_t pstart)
138 enum netfs_io_source source;
139 struct super_block *sb = rreq->mapping->host->i_sb;
140 struct netfs_io_subrequest *subreq;
141 struct netfs_cache_resources *cres = &rreq->cache_resources;
142 struct iov_iter iter;
143 loff_t start = rreq->start;
144 size_t len = rreq->len;
148 atomic_set(&rreq->nr_outstanding, 1);
150 ret = fscache_begin_read_operation(cres, cookie);
155 subreq = kzalloc(sizeof(struct netfs_io_subrequest),
158 INIT_LIST_HEAD(&subreq->rreq_link);
159 refcount_set(&subreq->ref, 2);
161 refcount_inc(&rreq->ref);
167 subreq->start = pstart + done;
168 subreq->len = len - done;
169 subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
171 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
173 source = cres->ops->prepare_read(subreq, LLONG_MAX);
174 if (WARN_ON(subreq->len == 0))
175 source = NETFS_INVALID_READ;
176 if (source != NETFS_READ_FROM_CACHE) {
177 erofs_err(sb, "failed to fscache prepare_read (source %d)",
181 erofs_fscache_put_subrequest(subreq);
185 atomic_inc(&rreq->nr_outstanding);
187 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
188 start + done, subreq->len);
190 ret = fscache_read(cres, subreq->start, &iter,
191 NETFS_READ_HOLE_FAIL,
192 erofc_fscache_subreq_complete, subreq);
193 if (ret == -EIOCBQUEUED)
196 erofs_err(sb, "failed to fscache_read (ret %d)", ret);
203 if (atomic_dec_and_test(&rreq->nr_outstanding))
204 erofs_fscache_rreq_complete(rreq);
209 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
212 struct super_block *sb = folio_mapping(folio)->host->i_sb;
213 struct netfs_io_request *rreq;
214 struct erofs_map_dev mdev = {
216 .m_pa = folio_pos(folio),
219 ret = erofs_map_dev(sb, &mdev);
223 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224 folio_pos(folio), folio_size(folio));
228 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
235 static int erofs_fscache_read_folio_inline(struct folio *folio,
236 struct erofs_map_blocks *map)
238 struct super_block *sb = folio_mapping(folio)->host->i_sb;
239 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
244 /* For tail packing layout, the offset may be non-zero. */
245 offset = erofs_blkoff(map->m_pa);
246 blknr = erofs_blknr(map->m_pa);
249 src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
253 dst = kmap_local_folio(folio, 0);
254 memcpy(dst, src + offset, len);
255 memset(dst + len, 0, PAGE_SIZE - len);
258 erofs_put_metabuf(&buf);
262 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
264 struct inode *inode = folio_mapping(folio)->host;
265 struct super_block *sb = inode->i_sb;
266 struct erofs_map_blocks map;
267 struct erofs_map_dev mdev;
268 struct netfs_io_request *rreq;
273 DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
275 pos = folio_pos(folio);
278 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
282 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
283 folio_zero_range(folio, 0, folio_size(folio));
287 if (map.m_flags & EROFS_MAP_META) {
288 ret = erofs_fscache_read_folio_inline(folio, &map);
292 mdev = (struct erofs_map_dev) {
293 .m_deviceid = map.m_deviceid,
297 ret = erofs_map_dev(sb, &mdev);
302 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
303 folio_pos(folio), folio_size(folio));
307 pstart = mdev.m_pa + (pos - map.m_la);
308 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
313 folio_mark_uptodate(folio);
319 static void erofs_fscache_advance_folios(struct readahead_control *rac,
320 size_t len, bool unlock)
323 struct folio *folio = readahead_folio(rac);
324 len -= folio_size(folio);
326 folio_mark_uptodate(folio);
332 static void erofs_fscache_readahead(struct readahead_control *rac)
334 struct inode *inode = rac->mapping->host;
335 struct super_block *sb = inode->i_sb;
336 size_t len, count, done = 0;
338 loff_t start, offset;
341 if (!readahead_count(rac))
344 start = readahead_pos(rac);
345 len = readahead_length(rac);
348 struct erofs_map_blocks map;
349 struct erofs_map_dev mdev;
350 struct netfs_io_request *rreq;
355 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
359 offset = start + done;
360 count = min_t(size_t, map.m_llen - (pos - map.m_la),
363 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
364 struct iov_iter iter;
366 iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
368 iov_iter_zero(count, &iter);
370 erofs_fscache_advance_folios(rac, count, true);
375 if (map.m_flags & EROFS_MAP_META) {
376 struct folio *folio = readahead_folio(rac);
378 ret = erofs_fscache_read_folio_inline(folio, &map);
380 folio_mark_uptodate(folio);
381 ret = folio_size(folio);
388 mdev = (struct erofs_map_dev) {
389 .m_deviceid = map.m_deviceid,
392 ret = erofs_map_dev(sb, &mdev);
396 rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
400 * Drop the ref of folios here. Unlock them in
401 * rreq_unlock_folios() when rreq complete.
403 erofs_fscache_advance_folios(rac, count, false);
404 ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
405 rreq, mdev.m_pa + (pos - map.m_la));
408 } while (ret > 0 && ((done += ret) < len));
411 static const struct address_space_operations erofs_fscache_meta_aops = {
412 .read_folio = erofs_fscache_meta_read_folio,
415 const struct address_space_operations erofs_fscache_access_aops = {
416 .read_folio = erofs_fscache_read_folio,
417 .readahead = erofs_fscache_readahead,
420 int erofs_fscache_register_cookie(struct super_block *sb,
421 struct erofs_fscache **fscache,
422 char *name, bool need_inode)
424 struct fscache_volume *volume = EROFS_SB(sb)->volume;
425 struct erofs_fscache *ctx;
426 struct fscache_cookie *cookie;
429 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
433 cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
434 name, strlen(name), NULL, 0, 0);
436 erofs_err(sb, "failed to get cookie for %s", name);
441 fscache_use_cookie(cookie, false);
442 ctx->cookie = cookie;
445 struct inode *const inode = new_inode(sb);
448 erofs_err(sb, "failed to get anon inode for %s", name);
454 inode->i_size = OFFSET_MAX;
455 inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
456 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
465 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
466 fscache_relinquish_cookie(ctx->cookie, false);
473 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
475 struct erofs_fscache *ctx = *fscache;
480 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
481 fscache_relinquish_cookie(ctx->cookie, false);
491 int erofs_fscache_register_fs(struct super_block *sb)
493 struct erofs_sb_info *sbi = EROFS_SB(sb);
494 struct fscache_volume *volume;
498 name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
502 volume = fscache_acquire_volume(name, NULL, NULL, 0);
503 if (IS_ERR_OR_NULL(volume)) {
504 erofs_err(sb, "failed to register volume for %s", name);
505 ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
509 sbi->volume = volume;
514 void erofs_fscache_unregister_fs(struct super_block *sb)
516 struct erofs_sb_info *sbi = EROFS_SB(sb);
518 fscache_relinquish_volume(sbi->volume, NULL, false);