1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
9 #include <linux/task_io_accounting_ops.h>
13 * Unlock the folios in a read operation. We need to set PG_fscache on any
14 * folios we're going to write back before we unlock them.
16 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
18 struct netfs_io_subrequest *subreq;
20 pgoff_t start_page = rreq->start / PAGE_SIZE;
21 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
23 bool subreq_failed = false;
25 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
27 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
28 __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
29 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
30 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
34 /* Walk through the pagecache and the I/O request lists simultaneously.
35 * We may have a mixture of cached and uncached sections and we only
36 * really want to write out the uncached sections. This is slightly
37 * complicated by the possibility that we might have huge pages with a
40 subreq = list_first_entry(&rreq->subrequests,
41 struct netfs_io_subrequest, rreq_link);
42 subreq_failed = (subreq->error < 0);
44 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
47 xas_for_each(&xas, folio, last_page) {
49 bool pg_failed = false;
52 if (xas_retry(&xas, folio))
55 pg_end = folio_pos(folio) + folio_size(folio) - 1;
57 folio_started = false;
65 if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
66 folio_start_fscache(folio);
69 pg_failed |= subreq_failed;
70 sreq_end = subreq->start + subreq->len - 1;
71 if (pg_end < sreq_end)
74 account += subreq->transferred;
75 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
76 subreq = list_next_entry(subreq, rreq_link);
77 subreq_failed = (subreq->error < 0);
80 subreq_failed = false;
83 if (pg_end == sreq_end)
88 flush_dcache_folio(folio);
89 folio_mark_uptodate(folio);
92 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
93 if (folio_index(folio) == rreq->no_unlock_folio &&
94 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
102 task_io_account_read(account);
103 if (rreq->netfs_ops->done)
104 rreq->netfs_ops->done(rreq);
107 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
108 loff_t *_start, size_t *_len, loff_t i_size)
110 struct netfs_cache_resources *cres = &rreq->cache_resources;
112 if (cres->ops && cres->ops->expand_readahead)
113 cres->ops->expand_readahead(cres, _start, _len, i_size);
116 static void netfs_rreq_expand(struct netfs_io_request *rreq,
117 struct readahead_control *ractl)
119 /* Give the cache a chance to change the request parameters. The
120 * resultant request must contain the original region.
122 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
124 /* Give the netfs a chance to change the request parameters. The
125 * resultant request must contain the original region.
127 if (rreq->netfs_ops->expand_readahead)
128 rreq->netfs_ops->expand_readahead(rreq);
130 /* Expand the request if the cache wants it to start earlier. Note
131 * that the expansion may get further extended if the VM wishes to
132 * insert THPs and the preferred start and/or end wind up in the middle
135 * If this is the case, however, the THP size should be an integer
136 * multiple of the cache granule size, so we get a whole number of
137 * granules to deal with.
139 if (rreq->start != readahead_pos(ractl) ||
140 rreq->len != readahead_length(ractl)) {
141 readahead_expand(ractl, rreq->start, rreq->len);
142 rreq->start = readahead_pos(ractl);
143 rreq->len = readahead_length(ractl);
145 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
146 netfs_read_trace_expanded);
151 * netfs_readahead - Helper to manage a read request
152 * @ractl: The description of the readahead request
154 * Fulfil a readahead request by drawing data from the cache if possible, or
155 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
156 * requests from different sources will get munged together. If necessary, the
157 * readahead window can be expanded in either direction to a more convenient
158 * alighment for RPC efficiency or to make storage in the cache feasible.
160 * The calling netfs must initialise a netfs context contiguous to the vfs
161 * inode before calling this.
163 * This is usable whether or not caching is enabled.
165 void netfs_readahead(struct readahead_control *ractl)
167 struct netfs_io_request *rreq;
168 struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
171 _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
173 if (readahead_count(ractl) == 0)
176 rreq = netfs_alloc_request(ractl->mapping, ractl->file,
177 readahead_pos(ractl),
178 readahead_length(ractl),
183 if (ctx->ops->begin_cache_operation) {
184 ret = ctx->ops->begin_cache_operation(rreq);
185 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
189 netfs_stat(&netfs_n_rh_readahead);
190 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
191 netfs_read_trace_readahead);
193 netfs_rreq_expand(rreq, ractl);
195 /* Drop the refs on the folios here rather than in the cache or
196 * filesystem. The locks will be dropped in netfs_rreq_unlock().
198 while (readahead_folio(ractl))
201 netfs_begin_read(rreq, false);
205 netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
208 EXPORT_SYMBOL(netfs_readahead);
211 * netfs_read_folio - Helper to manage a read_folio request
212 * @file: The file to read from
213 * @folio: The folio to read
215 * Fulfil a read_folio request by drawing data from the cache if
216 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
217 * Multiple I/O requests from different sources will get munged together.
219 * The calling netfs must initialise a netfs context contiguous to the vfs
220 * inode before calling this.
222 * This is usable whether or not caching is enabled.
224 int netfs_read_folio(struct file *file, struct folio *folio)
226 struct address_space *mapping = folio_file_mapping(folio);
227 struct netfs_io_request *rreq;
228 struct netfs_inode *ctx = netfs_inode(mapping->host);
231 _enter("%lx", folio_index(folio));
233 rreq = netfs_alloc_request(mapping, file,
234 folio_file_pos(folio), folio_size(folio),
241 if (ctx->ops->begin_cache_operation) {
242 ret = ctx->ops->begin_cache_operation(rreq);
243 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
247 netfs_stat(&netfs_n_rh_readpage);
248 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
249 return netfs_begin_read(rreq, true);
252 netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
257 EXPORT_SYMBOL(netfs_read_folio);
260 * Prepare a folio for writing without reading first
261 * @folio: The folio being prepared
262 * @pos: starting position for the write
263 * @len: length of write
264 * @always_fill: T if the folio should always be completely filled/cleared
266 * In some cases, write_begin doesn't need to read at all:
268 * - write that lies in a folio that is completely beyond EOF
269 * - write that covers the folio from start to EOF or beyond it
271 * If any of these criteria are met, then zero out the unwritten parts
272 * of the folio and return true. Otherwise, return false.
274 static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
277 struct inode *inode = folio_inode(folio);
278 loff_t i_size = i_size_read(inode);
279 size_t offset = offset_in_folio(folio, pos);
280 size_t plen = folio_size(folio);
282 if (unlikely(always_fill)) {
283 if (pos - offset + len <= i_size)
284 return false; /* Page entirely before EOF */
285 zero_user_segment(&folio->page, 0, plen);
286 folio_mark_uptodate(folio);
290 /* Full folio write */
291 if (offset == 0 && len >= plen)
294 /* Page entirely beyond the end of the file */
295 if (pos - offset >= i_size)
298 /* Write that covers from the start of the folio to EOF or beyond */
299 if (offset == 0 && (pos + len) >= i_size)
304 zero_user_segments(&folio->page, 0, offset, offset + len, plen);
309 * netfs_write_begin - Helper to prepare for writing
310 * @ctx: The netfs context
311 * @file: The file to read from
312 * @mapping: The mapping to read from
313 * @pos: File position at which the write will begin
314 * @len: The length of the write (may extend beyond the end of the folio chosen)
315 * @_folio: Where to put the resultant folio
316 * @_fsdata: Place for the netfs to store a cookie
318 * Pre-read data for a write-begin request by drawing data from the cache if
319 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
320 * Multiple I/O requests from different sources will get munged together. If
321 * necessary, the readahead window can be expanded in either direction to a
322 * more convenient alighment for RPC efficiency or to make storage in the cache
325 * The calling netfs must provide a table of operations, only one of which,
326 * issue_op, is mandatory.
328 * The check_write_begin() operation can be provided to check for and flush
329 * conflicting writes once the folio is grabbed and locked. It is passed a
330 * pointer to the fsdata cookie that gets returned to the VM to be passed to
331 * write_end. It is permitted to sleep. It should return 0 if the request
332 * should go ahead or it may return an error. It may also unlock and put the
333 * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
334 * will cause the folio to be re-got and the process to be retried.
336 * The calling netfs must initialise a netfs context contiguous to the vfs
337 * inode before calling this.
339 * This is usable whether or not caching is enabled.
341 int netfs_write_begin(struct netfs_inode *ctx,
342 struct file *file, struct address_space *mapping,
343 loff_t pos, unsigned int len, struct folio **_folio,
346 struct netfs_io_request *rreq;
348 unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
349 pgoff_t index = pos >> PAGE_SHIFT;
352 DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
355 folio = __filemap_get_folio(mapping, index, fgp_flags,
356 mapping_gfp_mask(mapping));
360 if (ctx->ops->check_write_begin) {
361 /* Allow the netfs (eg. ceph) to flush conflicts. */
362 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
364 trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
371 if (folio_test_uptodate(folio))
374 /* If the page is beyond the EOF, we want to clear it - unless it's
375 * within the cache granule containing the EOF, in which case we need
376 * to preload the granule.
378 if (!netfs_is_cache_enabled(ctx) &&
379 netfs_skip_folio_read(folio, pos, len, false)) {
380 netfs_stat(&netfs_n_rh_write_zskip);
381 goto have_folio_no_wait;
384 rreq = netfs_alloc_request(mapping, file,
385 folio_file_pos(folio), folio_size(folio),
386 NETFS_READ_FOR_WRITE);
391 rreq->no_unlock_folio = folio_index(folio);
392 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
394 if (ctx->ops->begin_cache_operation) {
395 ret = ctx->ops->begin_cache_operation(rreq);
396 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
400 netfs_stat(&netfs_n_rh_write_begin);
401 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
403 /* Expand the request to meet caching requirements and download
406 ractl._nr_pages = folio_nr_pages(folio);
407 netfs_rreq_expand(rreq, &ractl);
409 /* We hold the folio locks, so we can drop the references */
411 while (readahead_folio(&ractl))
414 ret = netfs_begin_read(rreq, true);
419 ret = folio_wait_fscache_killable(folio);
428 netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
434 _leave(" = %d", ret);
437 EXPORT_SYMBOL(netfs_write_begin);