1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level write support.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
17 * Determined write method. Adjust netfs_folio_traces if this is changed.
19 enum netfs_how_to_modify {
20 NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
21 NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
22 NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
23 NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
25 NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
26 NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
29 static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
31 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
33 if (netfs_group && !folio_get_private(folio))
34 folio_attach_private(folio, netfs_get_group(netfs_group));
37 #if IS_ENABLED(CONFIG_FSCACHE)
38 static void netfs_folio_start_fscache(bool caching, struct folio *folio)
41 folio_start_fscache(folio);
44 static void netfs_folio_start_fscache(bool caching, struct folio *folio)
50 * Decide how we should modify a folio. We might be attempting to do
51 * write-streaming, in which case we don't want to a local RMW cycle if we can
52 * avoid it. If we're doing local caching or content crypto, we award that
53 * priority over avoiding RMW. If the file is open readably, then we also
54 * assume that we may want to read what we wrote.
56 static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
65 struct netfs_folio *finfo = netfs_folio_info(folio);
66 loff_t pos = folio_file_pos(folio);
70 if (netfs_folio_group(folio) != netfs_group)
71 return NETFS_FLUSH_CONTENT;
73 if (folio_test_uptodate(folio))
74 return NETFS_FOLIO_IS_UPTODATE;
76 if (pos >= ctx->zero_point)
77 return NETFS_MODIFY_AND_CLEAR;
79 if (!maybe_trouble && offset == 0 && len >= flen)
80 return NETFS_WHOLE_FOLIO_MODIFY;
82 if (file->f_mode & FMODE_READ)
83 goto no_write_streaming;
84 if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
85 goto no_write_streaming;
87 if (netfs_is_cache_enabled(ctx)) {
88 /* We don't want to get a streaming write on a file that loses
89 * caching service temporarily because the backing store got
92 if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
93 set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
94 goto no_write_streaming;
98 return NETFS_STREAMING_WRITE;
100 /* We can continue a streaming write only if it continues on from the
101 * previous. If it overlaps, we must flush lest we suffer a partial
102 * copy and disjoint dirty regions.
104 if (offset == finfo->dirty_offset + finfo->dirty_len)
105 return NETFS_STREAMING_WRITE_CONT;
106 return NETFS_FLUSH_CONTENT;
110 netfs_stat(&netfs_n_wh_wstream_conflict);
111 return NETFS_FLUSH_CONTENT;
113 return NETFS_JUST_PREFETCH;
117 * Grab a folio for writing and lock it. Attempt to allocate as large a folio
118 * as possible to hold as much of the remaining length as possible in one go.
120 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
121 loff_t pos, size_t part)
123 pgoff_t index = pos / PAGE_SIZE;
124 fgf_t fgp_flags = FGP_WRITEBEGIN;
126 if (mapping_large_folio_support(mapping))
127 fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
129 return __filemap_get_folio(mapping, index, fgp_flags,
130 mapping_gfp_mask(mapping));
134 * netfs_perform_write - Copy data into the pagecache.
135 * @iocb: The operation parameters
136 * @iter: The source buffer
137 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
139 * Copy data into pagecache pages attached to the inode specified by @iocb.
140 * The caller must hold appropriate inode locks.
142 * Dirty pages are tagged with a netfs_folio struct if they're not up to date
143 * to indicate the range modified. Dirty pages may also be tagged with a
144 * netfs-specific grouping such that data from an old group gets flushed before
145 * a new one is started.
147 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
148 struct netfs_group *netfs_group)
150 struct file *file = iocb->ki_filp;
151 struct inode *inode = file_inode(file);
152 struct address_space *mapping = inode->i_mapping;
153 struct netfs_inode *ctx = netfs_inode(inode);
154 struct writeback_control wbc = {
155 .sync_mode = WB_SYNC_NONE,
157 .nr_to_write = LONG_MAX,
158 .range_start = iocb->ki_pos,
159 .range_end = iocb->ki_pos + iter->count,
161 struct netfs_io_request *wreq = NULL;
162 struct netfs_folio *finfo;
164 enum netfs_how_to_modify howto;
165 enum netfs_folio_trace trace;
166 unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
167 ssize_t written = 0, ret, ret2;
168 loff_t i_size, pos = iocb->ki_pos, from, to;
169 size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
170 bool maybe_trouble = false;
172 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
173 iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
175 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
177 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
179 wbc_detach_inode(&wbc);
183 wreq = netfs_begin_writethrough(iocb, iter->count);
185 wbc_detach_inode(&wbc);
190 if (!is_sync_kiocb(iocb))
192 wreq->cleanup = netfs_cleanup_buffered_write;
197 size_t offset; /* Offset into pagecache folio */
198 size_t part; /* Bytes to write to folio */
199 size_t copied; /* Bytes copied from user */
201 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
202 if (unlikely(ret < 0))
205 offset = pos & (max_chunk - 1);
206 part = min(max_chunk - offset, iov_iter_count(iter));
208 /* Bring in the user pages that we will copy from _first_ lest
209 * we hit a nasty deadlock on copying from the same page as
210 * we're writing to, without it being marked uptodate.
212 * Not only is this an optimisation, but it is also required to
213 * check that the address is actually valid, when atomic
214 * usercopies are used below.
216 * We rely on the page being held onto long enough by the LRU
217 * that we can grab it below if this causes it to be read.
220 if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
223 folio = netfs_grab_folio_for_write(mapping, pos, part);
225 ret = PTR_ERR(folio);
229 flen = folio_size(folio);
230 offset = pos & (flen - 1);
231 part = min_t(size_t, flen - offset, part);
233 if (signal_pending(current)) {
234 ret = written ? -EINTR : -ERESTARTSYS;
235 goto error_folio_unlock;
238 /* See if we need to prefetch the area we're going to modify.
239 * We need to do this before we get a lock on the folio in case
240 * there's more than one writer competing for the same cache
243 howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
244 flen, offset, part, maybe_trouble);
245 _debug("howto %u", howto);
247 case NETFS_JUST_PREFETCH:
248 ret = netfs_prefetch_for_write(file, folio, offset, part);
250 _debug("prefetch = %zd", ret);
251 goto error_folio_unlock;
254 case NETFS_FOLIO_IS_UPTODATE:
255 case NETFS_WHOLE_FOLIO_MODIFY:
256 case NETFS_STREAMING_WRITE_CONT:
258 case NETFS_MODIFY_AND_CLEAR:
259 zero_user_segment(&folio->page, 0, offset);
261 case NETFS_STREAMING_WRITE:
263 if (WARN_ON(folio_get_private(folio)))
264 goto error_folio_unlock;
266 case NETFS_FLUSH_CONTENT:
267 trace_netfs_folio(folio, netfs_flush_content);
268 from = folio_pos(folio);
269 to = from + folio_size(folio) - 1;
272 ret = filemap_write_and_wait_range(mapping, from, to);
274 goto error_folio_unlock;
278 if (mapping_writably_mapped(mapping))
279 flush_dcache_folio(folio);
281 copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
283 flush_dcache_folio(folio);
285 /* Deal with a (partially) failed copy */
288 goto error_folio_unlock;
291 trace = (enum netfs_folio_trace)howto;
293 case NETFS_FOLIO_IS_UPTODATE:
294 case NETFS_JUST_PREFETCH:
295 netfs_set_group(folio, netfs_group);
297 case NETFS_MODIFY_AND_CLEAR:
298 zero_user_segment(&folio->page, offset + copied, flen);
299 netfs_set_group(folio, netfs_group);
300 folio_mark_uptodate(folio);
302 case NETFS_WHOLE_FOLIO_MODIFY:
303 if (unlikely(copied < part)) {
304 maybe_trouble = true;
305 iov_iter_revert(iter, copied);
309 netfs_set_group(folio, netfs_group);
310 folio_mark_uptodate(folio);
312 case NETFS_STREAMING_WRITE:
313 if (offset == 0 && copied == flen) {
314 netfs_set_group(folio, netfs_group);
315 folio_mark_uptodate(folio);
316 trace = netfs_streaming_filled_page;
319 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
321 iov_iter_revert(iter, copied);
323 goto error_folio_unlock;
325 finfo->netfs_group = netfs_get_group(netfs_group);
326 finfo->dirty_offset = offset;
327 finfo->dirty_len = copied;
328 folio_attach_private(folio, (void *)((unsigned long)finfo |
331 case NETFS_STREAMING_WRITE_CONT:
332 finfo = netfs_folio_info(folio);
333 finfo->dirty_len += copied;
334 if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
335 if (finfo->netfs_group)
336 folio_change_private(folio, finfo->netfs_group);
338 folio_detach_private(folio);
339 folio_mark_uptodate(folio);
341 trace = netfs_streaming_cont_filled_page;
345 WARN(true, "Unexpected modify type %u ix=%lx\n",
346 howto, folio->index);
348 goto error_folio_unlock;
351 trace_netfs_folio(folio, trace);
353 /* Update the inode size if we moved the EOF marker */
354 i_size = i_size_read(inode);
357 if (ctx->ops->update_i_size) {
358 ctx->ops->update_i_size(inode, pos);
360 i_size_write(inode, pos);
361 #if IS_ENABLED(CONFIG_FSCACHE)
362 fscache_update_cookie(ctx->cache, NULL, &pos);
369 folio_mark_dirty(folio);
371 if (folio_test_dirty(folio))
373 folio_clear_dirty_for_io(folio);
374 /* We make multiple writes to the folio... */
375 if (!folio_test_writeback(folio)) {
376 folio_wait_fscache(folio);
377 folio_start_writeback(folio);
378 folio_start_fscache(folio);
379 if (wreq->iter.count == 0)
380 trace_netfs_folio(folio, netfs_folio_trace_wthru);
382 trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
384 netfs_advance_writethrough(wreq, copied,
385 offset + copied == flen);
393 } while (iov_iter_count(iter));
396 if (unlikely(wreq)) {
397 ret2 = netfs_end_writethrough(wreq, iocb);
398 wbc_detach_inode(&wbc);
399 if (ret2 == -EIOCBQUEUED)
405 iocb->ki_pos += written;
406 _leave(" = %zd [%zd]", written, ret);
407 return written ? written : ret;
414 EXPORT_SYMBOL(netfs_perform_write);
417 * netfs_buffered_write_iter_locked - write data to a file
418 * @iocb: IO state structure (file, offset, etc.)
419 * @from: iov_iter with data to write
420 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
422 * This function does all the work needed for actually writing data to a
423 * file. It does all basic checks, removes SUID from the file, updates
424 * modification times and calls proper subroutines depending on whether we
425 * do direct IO or a standard buffered write.
427 * The caller must hold appropriate locks around this function and have called
428 * generic_write_checks() already. The caller is also responsible for doing
429 * any necessary syncing afterwards.
431 * This function does *not* take care of syncing data in case of O_SYNC write.
432 * A caller has to handle it. This is mainly due to the fact that we want to
433 * avoid syncing under i_rwsem.
436 * * number of bytes written, even for truncated writes
437 * * negative error code if no data has been written at all
439 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
440 struct netfs_group *netfs_group)
442 struct file *file = iocb->ki_filp;
445 trace_netfs_write_iter(iocb, from);
447 ret = file_remove_privs(file);
451 ret = file_update_time(file);
455 return netfs_perform_write(iocb, from, netfs_group);
457 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
460 * netfs_file_write_iter - write data to a file
461 * @iocb: IO state structure
462 * @from: iov_iter with data to write
464 * Perform a write to a file, writing into the pagecache if possible and doing
465 * an unbuffered write instead if not.
468 * * Negative error code if no data has been written at all of
469 * vfs_fsync_range() failed for a synchronous write
470 * * Number of bytes written, even for truncated writes
472 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
474 struct file *file = iocb->ki_filp;
475 struct inode *inode = file->f_mapping->host;
476 struct netfs_inode *ictx = netfs_inode(inode);
479 _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
481 if (!iov_iter_count(from))
484 if ((iocb->ki_flags & IOCB_DIRECT) ||
485 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
486 return netfs_unbuffered_write_iter(iocb, from);
488 ret = netfs_start_io_write(inode);
492 ret = generic_write_checks(iocb, from);
494 ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
495 netfs_end_io_write(inode);
497 ret = generic_write_sync(iocb, ret);
500 EXPORT_SYMBOL(netfs_file_write_iter);
503 * Notification that a previously read-only page is about to become writable.
504 * Note that the caller indicates a single page of a multipage folio.
506 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
508 struct folio *folio = page_folio(vmf->page);
509 struct file *file = vmf->vma->vm_file;
510 struct inode *inode = file_inode(file);
511 vm_fault_t ret = VM_FAULT_RETRY;
514 _enter("%lx", folio->index);
516 sb_start_pagefault(inode->i_sb);
518 if (folio_wait_writeback_killable(folio))
521 if (folio_lock_killable(folio) < 0)
524 /* Can we see a streaming write here? */
525 if (WARN_ON(!folio_test_uptodate(folio))) {
526 ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
530 if (netfs_folio_group(folio) != netfs_group) {
532 err = filemap_fdatawait_range(inode->i_mapping,
534 folio_pos(folio) + folio_size(folio));
537 ret = VM_FAULT_RETRY;
543 ret = VM_FAULT_SIGBUS;
548 if (folio_test_dirty(folio))
549 trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
551 trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
552 netfs_set_group(folio, netfs_group);
553 file_update_time(file);
554 ret = VM_FAULT_LOCKED;
556 sb_end_pagefault(inode->i_sb);
559 EXPORT_SYMBOL(netfs_page_mkwrite);
562 * Kill all the pages in the given range
564 static void netfs_kill_pages(struct address_space *mapping,
565 loff_t start, loff_t len)
568 pgoff_t index = start / PAGE_SIZE;
569 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
571 _enter("%llx-%llx", start, start + len - 1);
574 _debug("kill %lx (to %lx)", index, last);
576 folio = filemap_get_folio(mapping, index);
582 next = folio_next_index(folio);
584 trace_netfs_folio(folio, netfs_folio_trace_kill);
585 folio_clear_uptodate(folio);
586 if (folio_test_fscache(folio))
587 folio_end_fscache(folio);
588 folio_end_writeback(folio);
590 generic_error_remove_folio(mapping, folio);
594 } while (index = next, index <= last);
600 * Redirty all the pages in a given range.
602 static void netfs_redirty_pages(struct address_space *mapping,
603 loff_t start, loff_t len)
606 pgoff_t index = start / PAGE_SIZE;
607 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
609 _enter("%llx-%llx", start, start + len - 1);
612 _debug("redirty %llx @%llx", len, start);
614 folio = filemap_get_folio(mapping, index);
620 next = folio_next_index(folio);
621 trace_netfs_folio(folio, netfs_folio_trace_redirty);
622 filemap_dirty_folio(mapping, folio);
623 if (folio_test_fscache(folio))
624 folio_end_fscache(folio);
625 folio_end_writeback(folio);
627 } while (index = next, index <= last);
629 balance_dirty_pages_ratelimited(mapping);
635 * Completion of write to server
637 static void netfs_pages_written_back(struct netfs_io_request *wreq)
639 struct address_space *mapping = wreq->mapping;
640 struct netfs_folio *finfo;
641 struct netfs_group *group = NULL;
646 XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
648 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
652 last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
653 xas_for_each(&xas, folio, last) {
654 WARN(!folio_test_writeback(folio),
655 "bad %zx @%llx page %lx %lx\n",
656 wreq->len, wreq->start, folio->index, last);
658 if ((finfo = netfs_folio_info(folio))) {
659 /* Streaming writes cannot be redirtied whilst under
660 * writeback, so discard the streaming record.
662 folio_detach_private(folio);
663 group = finfo->netfs_group;
665 trace_netfs_folio(folio, netfs_folio_trace_clear_s);
667 } else if ((group = netfs_folio_group(folio))) {
668 /* Need to detach the group pointer if the page didn't
669 * get redirtied. If it has been redirtied, then it
670 * must be within the same group.
672 if (folio_test_dirty(folio)) {
673 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
676 if (folio_trylock(folio)) {
677 if (!folio_test_dirty(folio)) {
678 folio_detach_private(folio);
680 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
682 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
691 if (!folio_test_dirty(folio)) {
692 folio_detach_private(folio);
694 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
696 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
701 trace_netfs_folio(folio, netfs_folio_trace_clear);
704 if (folio_test_fscache(folio))
705 folio_end_fscache(folio);
706 xas_advance(&xas, folio_next_index(folio) - 1);
707 folio_end_writeback(folio);
711 netfs_put_group_many(group, gcount);
716 * Deal with the disposition of the folios that are under writeback to close
719 static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
721 struct address_space *mapping = wreq->mapping;
725 switch (wreq->error) {
727 netfs_pages_written_back(wreq);
731 pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
742 netfs_redirty_pages(mapping, wreq->start, wreq->len);
752 netfs_kill_pages(mapping, wreq->start, wreq->len);
757 mapping_set_error(mapping, wreq->error);
758 if (wreq->netfs_ops->done)
759 wreq->netfs_ops->done(wreq);
763 * Extend the region to be written back to include subsequent contiguously
764 * dirty pages if possible, but don't sleep while doing so.
766 * If this page holds new content, then we can include filler zeros in the
769 static void netfs_extend_writeback(struct address_space *mapping,
770 struct netfs_group *group,
771 struct xa_state *xas,
779 struct netfs_folio *finfo;
780 struct folio_batch fbatch;
783 pgoff_t index = (start + *_len) / PAGE_SIZE;
788 folio_batch_init(&fbatch);
791 /* Firstly, we gather up a batch of contiguous dirty pages
792 * under the RCU read lock - but we can't clear the dirty flags
793 * there if any of those pages are mapped.
797 xas_for_each(xas, folio, ULONG_MAX) {
799 if (xas_retry(xas, folio))
801 if (xa_is_value(folio))
803 if (folio->index != index) {
808 if (!folio_try_get_rcu(folio)) {
813 /* Has the folio moved or been split? */
814 if (unlikely(folio != xas_reload(xas))) {
820 if (!folio_trylock(folio)) {
825 if (!folio_test_dirty(folio) ||
826 folio_test_writeback(folio) ||
827 folio_test_fscache(folio)) {
835 len = folio_size(folio);
836 priv = folio_get_private(folio);
837 if ((const struct netfs_group *)priv != group) {
839 finfo = netfs_folio_info(folio);
840 if (finfo->netfs_group != group ||
841 finfo->dirty_offset > 0) {
847 len = finfo->dirty_len;
850 *_top += folio_size(folio);
851 index += folio_nr_pages(folio);
852 *_count -= folio_nr_pages(folio);
854 if (*_len >= max_len || *_count <= 0)
857 if (!folio_batch_add(&fbatch, folio))
866 /* Now, if we obtained any folios, we can shift them to being
867 * writable and mark them for caching.
869 if (!folio_batch_count(&fbatch))
872 for (i = 0; i < folio_batch_count(&fbatch); i++) {
873 folio = fbatch.folios[i];
874 trace_netfs_folio(folio, netfs_folio_trace_store_plus);
876 if (!folio_clear_dirty_for_io(folio))
878 folio_start_writeback(folio);
879 netfs_folio_start_fscache(caching, folio);
883 folio_batch_release(&fbatch);
889 * Synchronously write back the locked page and any subsequent non-locked dirty
892 static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
893 struct writeback_control *wbc,
894 struct netfs_group *group,
895 struct xa_state *xas,
897 unsigned long long start,
898 unsigned long long end)
900 struct netfs_io_request *wreq;
901 struct netfs_folio *finfo;
902 struct netfs_inode *ctx = netfs_inode(mapping->host);
903 unsigned long long i_size = i_size_read(&ctx->inode);
905 bool caching = netfs_is_cache_enabled(ctx);
906 long count = wbc->nr_to_write;
909 _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
911 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
915 return PTR_ERR(wreq);
918 if (!folio_clear_dirty_for_io(folio))
920 folio_start_writeback(folio);
921 netfs_folio_start_fscache(caching, folio);
923 count -= folio_nr_pages(folio);
925 /* Find all consecutive lockable dirty pages that have contiguous
926 * written regions, stopping when we find a page that is not
927 * immediately lockable, is not dirty or is missing, or we reach the
930 trace_netfs_folio(folio, netfs_folio_trace_store);
933 finfo = netfs_folio_info(folio);
935 start += finfo->dirty_offset;
936 if (finfo->dirty_offset + finfo->dirty_len != len) {
937 len = finfo->dirty_len;
940 len = finfo->dirty_len;
943 if (start < i_size) {
944 /* Trim the write to the EOF; the extra data is ignored. Also
945 * put an upper limit on the size of a single storedata op.
947 max_len = 65536 * 4096;
948 max_len = min_t(unsigned long long, max_len, end - start + 1);
949 max_len = min_t(unsigned long long, max_len, i_size - start);
952 netfs_extend_writeback(mapping, group, xas, &count, start,
953 max_len, caching, &len, &wreq->upper_len);
957 len = min_t(unsigned long long, len, i_size - start);
959 /* We now have a contiguous set of dirty pages, each with writeback
960 * set; the first page is still locked at this point, but all the rest
961 * have been unlocked.
967 if (start < i_size) {
968 _debug("write back %zx @%llx [%llx]", len, start, i_size);
970 /* Speculatively write to the cache. We have to fix this up
971 * later if the store fails.
973 wreq->cleanup = netfs_cleanup_buffered_write;
975 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
977 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
978 ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
979 if (ret == 0 || ret == -EIOCBQUEUED)
980 wbc->nr_to_write -= len / PAGE_SIZE;
982 _debug("write discard %zx @%llx [%llx]", len, start, i_size);
984 /* The dirty region was entirely beyond the EOF. */
985 fscache_clear_page_bits(mapping, start, len, caching);
986 netfs_pages_written_back(wreq);
990 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
996 * Write a region of pages back to the server
998 static ssize_t netfs_writepages_begin(struct address_space *mapping,
999 struct writeback_control *wbc,
1000 struct netfs_group *group,
1001 struct xa_state *xas,
1002 unsigned long long *_start,
1003 unsigned long long end)
1005 const struct netfs_folio *finfo;
1006 struct folio *folio;
1007 unsigned long long start = *_start;
1012 _enter("%llx,%llx,", start, end);
1015 /* Find the first dirty page in the group. */
1019 folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
1020 if (xas_retry(xas, folio) || xa_is_value(folio))
1025 if (!folio_try_get_rcu(folio)) {
1030 if (unlikely(folio != xas_reload(xas))) {
1036 /* Skip any dirty folio that's not in the group of interest. */
1037 priv = folio_get_private(folio);
1038 if ((const struct netfs_group *)priv != group) {
1039 finfo = netfs_folio_info(folio);
1040 if (finfo->netfs_group != group) {
1053 start = folio_pos(folio); /* May regress with THPs */
1055 _debug("wback %lx", folio->index);
1057 /* At this point we hold neither the i_pages lock nor the page lock:
1058 * the page may be truncated or invalidated (changing page->mapping to
1059 * NULL), or even swizzled back from swapper_space to tmpfs file
1063 if (wbc->sync_mode != WB_SYNC_NONE) {
1064 ret = folio_lock_killable(folio);
1068 if (!folio_trylock(folio))
1072 if (folio->mapping != mapping ||
1073 !folio_test_dirty(folio)) {
1074 start += folio_size(folio);
1075 folio_unlock(folio);
1079 if (folio_test_writeback(folio) ||
1080 folio_test_fscache(folio)) {
1081 folio_unlock(folio);
1082 if (wbc->sync_mode != WB_SYNC_NONE) {
1083 folio_wait_writeback(folio);
1084 #ifdef CONFIG_FSCACHE
1085 folio_wait_fscache(folio);
1090 start += folio_size(folio);
1091 if (wbc->sync_mode == WB_SYNC_NONE) {
1092 if (skips >= 5 || need_resched()) {
1101 ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
1105 *_start = start + ret;
1106 _leave(" = %zd [%llx]", ret, *_start);
1111 * Write a region of pages back to the server
1113 static int netfs_writepages_region(struct address_space *mapping,
1114 struct writeback_control *wbc,
1115 struct netfs_group *group,
1116 unsigned long long *_start,
1117 unsigned long long end)
1121 XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
1124 ret = netfs_writepages_begin(mapping, wbc, group, &xas,
1126 if (ret > 0 && wbc->nr_to_write > 0)
1128 } while (ret > 0 && wbc->nr_to_write > 0);
1130 return ret > 0 ? 0 : ret;
1134 * write some of the pending data back to the server
1136 int netfs_writepages(struct address_space *mapping,
1137 struct writeback_control *wbc)
1139 struct netfs_group *group = NULL;
1145 /* We have to be careful as we can end up racing with setattr()
1146 * truncating the pagecache since the caller doesn't take a lock here
1150 if (wbc->range_cyclic && mapping->writeback_index) {
1151 start = mapping->writeback_index * PAGE_SIZE;
1152 ret = netfs_writepages_region(mapping, wbc, group,
1157 if (wbc->nr_to_write <= 0) {
1158 mapping->writeback_index = start / PAGE_SIZE;
1163 end = mapping->writeback_index * PAGE_SIZE;
1164 mapping->writeback_index = 0;
1165 ret = netfs_writepages_region(mapping, wbc, group, &start, end);
1167 mapping->writeback_index = start / PAGE_SIZE;
1168 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
1170 ret = netfs_writepages_region(mapping, wbc, group,
1172 if (wbc->nr_to_write > 0 && ret == 0)
1173 mapping->writeback_index = start / PAGE_SIZE;
1175 start = wbc->range_start;
1176 ret = netfs_writepages_region(mapping, wbc, group,
1177 &start, wbc->range_end);
1181 _leave(" = %d", ret);
1184 EXPORT_SYMBOL(netfs_writepages);
1187 * Deal with the disposition of a laundered folio.
1189 static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
1192 pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
1193 mapping_set_error(wreq->mapping, wreq->error);
1198 * netfs_launder_folio - Clean up a dirty folio that's being invalidated
1199 * @folio: The folio to clean
1201 * This is called to write back a folio that's being invalidated when an inode
1202 * is getting torn down. Ideally, writepages would be used instead.
1204 int netfs_launder_folio(struct folio *folio)
1206 struct netfs_io_request *wreq;
1207 struct address_space *mapping = folio->mapping;
1208 struct netfs_folio *finfo = netfs_folio_info(folio);
1209 struct netfs_group *group = netfs_folio_group(folio);
1210 struct bio_vec bvec;
1211 unsigned long long i_size = i_size_read(mapping->host);
1212 unsigned long long start = folio_pos(folio);
1213 size_t offset = 0, len;
1217 offset = finfo->dirty_offset;
1219 len = finfo->dirty_len;
1221 len = folio_size(folio);
1223 len = min_t(unsigned long long, len, i_size - start);
1225 wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
1227 ret = PTR_ERR(wreq);
1231 if (!folio_clear_dirty_for_io(folio))
1234 trace_netfs_folio(folio, netfs_folio_trace_launder);
1236 _debug("launder %llx-%llx", start, start + len - 1);
1238 /* Speculatively write to the cache. We have to fix this up later if
1241 wreq->cleanup = netfs_cleanup_launder_folio;
1243 bvec_set_folio(&bvec, folio, len, offset);
1244 iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
1245 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
1246 ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
1249 folio_detach_private(folio);
1250 netfs_put_group(group);
1252 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
1254 folio_wait_fscache(folio);
1255 _leave(" = %d", ret);
1258 EXPORT_SYMBOL(netfs_launder_folio);