1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level read support.
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
9 #include <linux/export.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/uio.h>
15 #include <linux/sched/mm.h>
16 #include <linux/task_io_accounting_ops.h>
20 * Clear the unread part of an I/O request.
22 static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
26 iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
27 subreq->start + subreq->transferred,
28 subreq->len - subreq->transferred);
29 iov_iter_zero(iov_iter_count(&iter), &iter);
32 static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
35 struct netfs_io_subrequest *subreq = priv;
37 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
41 * Issue a read against the cache.
42 * - Eats the caller's ref on subreq.
44 static void netfs_read_from_cache(struct netfs_io_request *rreq,
45 struct netfs_io_subrequest *subreq,
46 enum netfs_read_from_hole read_hole)
48 struct netfs_cache_resources *cres = &rreq->cache_resources;
51 netfs_stat(&netfs_n_rh_read);
52 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
53 subreq->start + subreq->transferred,
54 subreq->len - subreq->transferred);
56 cres->ops->read(cres, subreq->start, &iter, read_hole,
57 netfs_cache_read_terminated, subreq);
61 * Fill a subrequest region with zeroes.
63 static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
64 struct netfs_io_subrequest *subreq)
66 netfs_stat(&netfs_n_rh_zero);
67 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
68 netfs_subreq_terminated(subreq, 0, false);
72 * Ask the netfs to issue a read request to the server for us.
74 * The netfs is expected to read from subreq->pos + subreq->transferred to
75 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
76 * buffer prior to the transferred point as it might clobber dirty data
77 * obtained from the cache.
79 * Alternatively, the netfs is allowed to indicate one of two things:
81 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
84 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
87 static void netfs_read_from_server(struct netfs_io_request *rreq,
88 struct netfs_io_subrequest *subreq)
90 netfs_stat(&netfs_n_rh_download);
91 rreq->netfs_ops->issue_read(subreq);
95 * Release those waiting.
97 static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
99 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
100 netfs_clear_subrequests(rreq, was_async);
101 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
105 * Deal with the completion of writing the data to the cache. We have to clear
106 * the PG_fscache bits on the folios involved and release the caller's ref.
108 * May be called in softirq mode and we inherit a ref from the caller.
110 static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
113 struct netfs_io_subrequest *subreq;
115 pgoff_t unlocked = 0;
116 bool have_unlocked = false;
120 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
121 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
123 xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
124 if (xas_retry(&xas, folio))
127 /* We might have multiple writes from the same huge
128 * folio, but we mustn't unlock a folio more than once.
130 if (have_unlocked && folio_index(folio) <= unlocked)
132 unlocked = folio_index(folio);
133 folio_end_fscache(folio);
134 have_unlocked = true;
139 netfs_rreq_completed(rreq, was_async);
142 static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
145 struct netfs_io_subrequest *subreq = priv;
146 struct netfs_io_request *rreq = subreq->rreq;
148 if (IS_ERR_VALUE(transferred_or_error)) {
149 netfs_stat(&netfs_n_rh_write_failed);
150 trace_netfs_failure(rreq, subreq, transferred_or_error,
151 netfs_fail_copy_to_cache);
153 netfs_stat(&netfs_n_rh_write_done);
156 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
158 /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
159 if (atomic_dec_and_test(&rreq->nr_copy_ops))
160 netfs_rreq_unmark_after_write(rreq, was_async);
162 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
166 * Perform any outstanding writes to the cache. We inherit a ref from the
169 static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
171 struct netfs_cache_resources *cres = &rreq->cache_resources;
172 struct netfs_io_subrequest *subreq, *next, *p;
173 struct iov_iter iter;
176 trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
178 /* We don't want terminating writes trying to wake us up whilst we're
179 * still going through the list.
181 atomic_inc(&rreq->nr_copy_ops);
183 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
184 if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
185 list_del_init(&subreq->rreq_link);
186 netfs_put_subrequest(subreq, false,
187 netfs_sreq_trace_put_no_copy);
191 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
192 /* Amalgamate adjacent writes */
193 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
194 next = list_next_entry(subreq, rreq_link);
195 if (next->start != subreq->start + subreq->len)
197 subreq->len += next->len;
198 list_del_init(&next->rreq_link);
199 netfs_put_subrequest(next, false,
200 netfs_sreq_trace_put_merged);
203 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
206 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
207 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
211 iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
212 subreq->start, subreq->len);
214 atomic_inc(&rreq->nr_copy_ops);
215 netfs_stat(&netfs_n_rh_write);
216 netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
217 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
218 cres->ops->write(cres, subreq->start, &iter,
219 netfs_rreq_copy_terminated, subreq);
222 /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
223 if (atomic_dec_and_test(&rreq->nr_copy_ops))
224 netfs_rreq_unmark_after_write(rreq, false);
227 static void netfs_rreq_write_to_cache_work(struct work_struct *work)
229 struct netfs_io_request *rreq =
230 container_of(work, struct netfs_io_request, work);
232 netfs_rreq_do_write_to_cache(rreq);
235 static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
237 rreq->work.func = netfs_rreq_write_to_cache_work;
238 if (!queue_work(system_unbound_wq, &rreq->work))
243 * Handle a short read.
245 static void netfs_rreq_short_read(struct netfs_io_request *rreq,
246 struct netfs_io_subrequest *subreq)
248 __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
249 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
251 netfs_stat(&netfs_n_rh_short_read);
252 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
254 netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
255 atomic_inc(&rreq->nr_outstanding);
256 if (subreq->source == NETFS_READ_FROM_CACHE)
257 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
259 netfs_read_from_server(rreq, subreq);
263 * Resubmit any short or failed operations. Returns true if we got the rreq
266 static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
268 struct netfs_io_subrequest *subreq;
270 WARN_ON(in_interrupt());
272 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
274 /* We don't want terminating submissions trying to wake us up whilst
275 * we're still going through the list.
277 atomic_inc(&rreq->nr_outstanding);
279 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
280 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
282 if (subreq->source != NETFS_READ_FROM_CACHE)
284 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
286 netfs_stat(&netfs_n_rh_download_instead);
287 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
288 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
289 atomic_inc(&rreq->nr_outstanding);
290 netfs_read_from_server(rreq, subreq);
291 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
292 netfs_rreq_short_read(rreq, subreq);
296 /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
297 if (atomic_dec_and_test(&rreq->nr_outstanding))
300 wake_up_var(&rreq->nr_outstanding);
305 * Check to see if the data read is still valid.
307 static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
309 struct netfs_io_subrequest *subreq;
311 if (!rreq->netfs_ops->is_still_valid ||
312 rreq->netfs_ops->is_still_valid(rreq))
315 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
316 if (subreq->source == NETFS_READ_FROM_CACHE) {
317 subreq->error = -ESTALE;
318 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
324 * Assess the state of a read request and decide what to do next.
326 * Note that we could be in an ordinary kernel thread, on a workqueue or in
327 * softirq context at this point. We inherit a ref from the caller.
329 static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
331 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
334 netfs_rreq_is_still_valid(rreq);
336 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
337 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
338 if (netfs_rreq_perform_resubmissions(rreq))
343 netfs_rreq_unlock_folios(rreq);
345 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
346 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
348 if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
349 return netfs_rreq_write_to_cache(rreq);
351 netfs_rreq_completed(rreq, was_async);
354 static void netfs_rreq_work(struct work_struct *work)
356 struct netfs_io_request *rreq =
357 container_of(work, struct netfs_io_request, work);
358 netfs_rreq_assess(rreq, false);
362 * Handle the completion of all outstanding I/O operations on a read request.
363 * We inherit a ref from the caller.
365 static void netfs_rreq_terminated(struct netfs_io_request *rreq,
368 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
370 if (!queue_work(system_unbound_wq, &rreq->work))
373 netfs_rreq_assess(rreq, was_async);
378 * netfs_subreq_terminated - Note the termination of an I/O operation.
379 * @subreq: The I/O request that has terminated.
380 * @transferred_or_error: The amount of data transferred or an error code.
381 * @was_async: The termination was asynchronous
383 * This tells the read helper that a contributory I/O operation has terminated,
384 * one way or another, and that it should integrate the results.
386 * The caller indicates in @transferred_or_error the outcome of the operation,
387 * supplying a positive value to indicate the number of bytes transferred, 0 to
388 * indicate a failure to transfer anything that should be retried or a negative
389 * error code. The helper will look after reissuing I/O operations as
390 * appropriate and writing downloaded data to the cache.
392 * If @was_async is true, the caller might be running in softirq or interrupt
393 * context and we can't sleep.
395 void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
396 ssize_t transferred_or_error,
399 struct netfs_io_request *rreq = subreq->rreq;
402 _enter("[%u]{%llx,%lx},%zd",
403 subreq->debug_index, subreq->start, subreq->flags,
404 transferred_or_error);
406 switch (subreq->source) {
407 case NETFS_READ_FROM_CACHE:
408 netfs_stat(&netfs_n_rh_read_done);
410 case NETFS_DOWNLOAD_FROM_SERVER:
411 netfs_stat(&netfs_n_rh_download_done);
417 if (IS_ERR_VALUE(transferred_or_error)) {
418 subreq->error = transferred_or_error;
419 trace_netfs_failure(rreq, subreq, transferred_or_error,
424 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
425 "Subreq overread: R%x[%x] %zd > %zu - %zu",
426 rreq->debug_id, subreq->debug_index,
427 transferred_or_error, subreq->len, subreq->transferred))
428 transferred_or_error = subreq->len - subreq->transferred;
431 subreq->transferred += transferred_or_error;
432 if (subreq->transferred < subreq->len)
436 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
437 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
438 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
441 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
443 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
444 u = atomic_dec_return(&rreq->nr_outstanding);
446 netfs_rreq_terminated(rreq, was_async);
448 wake_up_var(&rreq->nr_outstanding);
450 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
454 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
455 netfs_clear_unread(subreq);
456 subreq->transferred = subreq->len;
460 if (transferred_or_error == 0) {
461 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
462 subreq->error = -ENODATA;
466 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
469 __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
470 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
474 if (subreq->source == NETFS_READ_FROM_CACHE) {
475 netfs_stat(&netfs_n_rh_read_failed);
476 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
478 netfs_stat(&netfs_n_rh_download_failed);
479 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
480 rreq->error = subreq->error;
484 EXPORT_SYMBOL(netfs_subreq_terminated);
486 static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
489 struct netfs_io_request *rreq = subreq->rreq;
490 struct netfs_cache_resources *cres = &rreq->cache_resources;
493 return cres->ops->prepare_read(subreq, i_size);
494 if (subreq->start >= rreq->i_size)
495 return NETFS_FILL_WITH_ZEROES;
496 return NETFS_DOWNLOAD_FROM_SERVER;
500 * Work out what sort of subrequest the next one will be.
502 static enum netfs_io_source
503 netfs_rreq_prepare_read(struct netfs_io_request *rreq,
504 struct netfs_io_subrequest *subreq)
506 enum netfs_io_source source;
508 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
510 source = netfs_cache_prepare_read(subreq, rreq->i_size);
511 if (source == NETFS_INVALID_READ)
514 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
515 /* Call out to the netfs to let it shrink the request to fit
516 * its own I/O sizes and boundaries. If it shinks it here, it
517 * will be called again to make simultaneous calls; if it wants
518 * to make serial calls, it can indicate a short read and then
519 * we will call it again.
521 if (subreq->len > rreq->i_size - subreq->start)
522 subreq->len = rreq->i_size - subreq->start;
524 if (rreq->netfs_ops->clamp_length &&
525 !rreq->netfs_ops->clamp_length(subreq)) {
526 source = NETFS_INVALID_READ;
531 if (WARN_ON(subreq->len == 0))
532 source = NETFS_INVALID_READ;
535 subreq->source = source;
536 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
541 * Slice off a piece of a read request and submit an I/O request for it.
543 static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
544 unsigned int *_debug_index)
546 struct netfs_io_subrequest *subreq;
547 enum netfs_io_source source;
549 subreq = netfs_alloc_subrequest(rreq);
553 subreq->debug_index = (*_debug_index)++;
554 subreq->start = rreq->start + rreq->submitted;
555 subreq->len = rreq->len - rreq->submitted;
557 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
558 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
560 /* Call out to the cache to find out what it can do with the remaining
561 * subset. It tells us in subreq->flags what it decided should be done
562 * and adjusts subreq->len down if the subset crosses a cache boundary.
564 * Then when we hand the subset, it can choose to take a subset of that
565 * (the starts must coincide), in which case, we go around the loop
566 * again and ask it to download the next piece.
568 source = netfs_rreq_prepare_read(rreq, subreq);
569 if (source == NETFS_INVALID_READ)
572 atomic_inc(&rreq->nr_outstanding);
574 rreq->submitted += subreq->len;
576 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
578 case NETFS_FILL_WITH_ZEROES:
579 netfs_fill_with_zeroes(rreq, subreq);
581 case NETFS_DOWNLOAD_FROM_SERVER:
582 netfs_read_from_server(rreq, subreq);
584 case NETFS_READ_FROM_CACHE:
585 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
594 rreq->error = subreq->error;
595 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
600 * Begin the process of reading in a chunk of data, where that data may be
601 * stitched together from multiple sources, including multiple servers and the
604 int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
606 unsigned int debug_index = 0;
609 _enter("R=%x %llx-%llx",
610 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
612 if (rreq->len == 0) {
613 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
614 netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
618 INIT_WORK(&rreq->work, netfs_rreq_work);
621 netfs_get_request(rreq, netfs_rreq_trace_get_hold);
623 /* Chop the read into slices according to what the cache and the netfs
624 * want and submit each one.
626 atomic_set(&rreq->nr_outstanding, 1);
628 if (!netfs_rreq_submit_slice(rreq, &debug_index))
631 } while (rreq->submitted < rreq->len);
634 /* Keep nr_outstanding incremented so that the ref always belongs to
635 * us, and the service code isn't punted off to a random thread pool to
639 wait_var_event(&rreq->nr_outstanding,
640 atomic_read(&rreq->nr_outstanding) == 1);
641 netfs_rreq_assess(rreq, false);
642 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
648 if (ret == 0 && rreq->submitted < rreq->len) {
649 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
652 netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
654 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
655 if (atomic_dec_and_test(&rreq->nr_outstanding))
656 netfs_rreq_assess(rreq, false);