1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
21 struct xfs_writepage_ctx {
22 struct iomap_writepage_ctx ctx;
23 unsigned int data_seq;
27 static inline struct xfs_writepage_ctx *
28 XFS_WPC(struct iomap_writepage_ctx *ctx)
30 return container_of(ctx, struct xfs_writepage_ctx, ctx);
34 * Fast and loose check if this write could update the on-disk inode size.
36 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
38 return ioend->io_offset + ioend->io_size >
39 XFS_I(ioend->io_inode)->i_d.di_size;
43 xfs_setfilesize_trans_alloc(
44 struct iomap_ioend *ioend)
46 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
50 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
54 ioend->io_private = tp;
57 * We may pass freeze protection with a transaction. So tell lockdep
60 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
62 * We hand off the transaction to the completion thread now, so
63 * clear the flag here.
65 xfs_trans_clear_context(tp);
70 * Update on-disk file size now that data has been written to disk.
81 xfs_ilock(ip, XFS_ILOCK_EXCL);
82 isize = xfs_new_eof(ip, offset + size);
84 xfs_iunlock(ip, XFS_ILOCK_EXCL);
89 trace_xfs_setfilesize(ip, offset, size);
91 ip->i_d.di_size = isize;
92 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
93 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
95 return xfs_trans_commit(tp);
100 struct xfs_inode *ip,
104 struct xfs_mount *mp = ip->i_mount;
105 struct xfs_trans *tp;
108 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
112 return __xfs_setfilesize(ip, tp, offset, size);
116 xfs_setfilesize_ioend(
117 struct iomap_ioend *ioend,
120 struct xfs_inode *ip = XFS_I(ioend->io_inode);
121 struct xfs_trans *tp = ioend->io_private;
124 * The transaction may have been allocated in the I/O submission thread,
125 * thus we need to mark ourselves as being in a transaction manually.
126 * Similarly for freeze protection.
128 xfs_trans_set_context(tp);
129 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
131 /* we abort the update if there was an IO error */
133 xfs_trans_cancel(tp);
137 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
141 * IO write completion.
145 struct iomap_ioend *ioend)
147 struct xfs_inode *ip = XFS_I(ioend->io_inode);
148 struct xfs_mount *mp = ip->i_mount;
149 xfs_off_t offset = ioend->io_offset;
150 size_t size = ioend->io_size;
151 unsigned int nofs_flag;
155 * We can allocate memory here while doing writeback on behalf of
156 * memory reclaim. To avoid memory allocation deadlocks set the
157 * task-wide nofs context for the following operations.
159 nofs_flag = memalloc_nofs_save();
162 * Just clean up the in-memory strutures if the fs has been shut down.
164 if (XFS_FORCED_SHUTDOWN(mp)) {
170 * Clean up all COW blocks and underlying data fork delalloc blocks on
171 * I/O error. The delalloc punch is required because this ioend was
172 * mapped to blocks in the COW fork and the associated pages are no
173 * longer dirty. If we don't remove delalloc blocks here, they become
174 * stale and can corrupt free space accounting on unmount.
176 error = blk_status_to_errno(ioend->io_bio->bi_status);
177 if (unlikely(error)) {
178 if (ioend->io_flags & IOMAP_F_SHARED) {
179 xfs_reflink_cancel_cow_range(ip, offset, size, true);
180 xfs_bmap_punch_delalloc_range(ip,
181 XFS_B_TO_FSBT(mp, offset),
182 XFS_B_TO_FSB(mp, size));
188 * Success: commit the COW or unwritten blocks if needed.
190 if (ioend->io_flags & IOMAP_F_SHARED)
191 error = xfs_reflink_end_cow(ip, offset, size);
192 else if (ioend->io_type == IOMAP_UNWRITTEN)
193 error = xfs_iomap_write_unwritten(ip, offset, size, false);
195 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
198 if (ioend->io_private)
199 error = xfs_setfilesize_ioend(ioend, error);
200 iomap_finish_ioends(ioend, error);
201 memalloc_nofs_restore(nofs_flag);
205 * If the to be merged ioend has a preallocated transaction for file
206 * size updates we need to ensure the ioend it is merged into also
207 * has one. If it already has one we can simply cancel the transaction
208 * as it is guaranteed to be clean.
211 xfs_ioend_merge_private(
212 struct iomap_ioend *ioend,
213 struct iomap_ioend *next)
215 if (!ioend->io_private) {
216 ioend->io_private = next->io_private;
217 next->io_private = NULL;
219 xfs_setfilesize_ioend(next, -ECANCELED);
223 /* Finish all pending io completions. */
226 struct work_struct *work)
228 struct xfs_inode *ip =
229 container_of(work, struct xfs_inode, i_ioend_work);
230 struct iomap_ioend *ioend;
231 struct list_head tmp;
234 spin_lock_irqsave(&ip->i_ioend_lock, flags);
235 list_replace_init(&ip->i_ioend_list, &tmp);
236 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
238 iomap_sort_ioends(&tmp);
239 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
241 list_del_init(&ioend->io_list);
242 iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
243 xfs_end_ioend(ioend);
247 static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
249 return ioend->io_private ||
250 ioend->io_type == IOMAP_UNWRITTEN ||
251 (ioend->io_flags & IOMAP_F_SHARED);
258 struct iomap_ioend *ioend = bio->bi_private;
259 struct xfs_inode *ip = XFS_I(ioend->io_inode);
262 ASSERT(xfs_ioend_needs_workqueue(ioend));
264 spin_lock_irqsave(&ip->i_ioend_lock, flags);
265 if (list_empty(&ip->i_ioend_list))
266 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
268 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
269 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
273 * Fast revalidation of the cached writeback mapping. Return true if the current
274 * mapping is valid, false otherwise.
278 struct iomap_writepage_ctx *wpc,
279 struct xfs_inode *ip,
282 if (offset < wpc->iomap.offset ||
283 offset >= wpc->iomap.offset + wpc->iomap.length)
286 * If this is a COW mapping, it is sufficient to check that the mapping
287 * covers the offset. Be careful to check this first because the caller
288 * can revalidate a COW mapping without updating the data seqno.
290 if (wpc->iomap.flags & IOMAP_F_SHARED)
294 * This is not a COW mapping. Check the sequence number of the data fork
295 * because concurrent changes could have invalidated the extent. Check
296 * the COW fork because concurrent changes since the last time we
297 * checked (and found nothing at this offset) could have added
298 * overlapping blocks.
300 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
302 if (xfs_inode_has_cow_data(ip) &&
303 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
309 * Pass in a dellalloc extent and convert it to real extents, return the real
310 * extent that maps offset_fsb in wpc->iomap.
312 * The current page is held locked so nothing could have removed the block
313 * backing offset_fsb, although it could have moved from the COW to the data
314 * fork by another thread.
318 struct iomap_writepage_ctx *wpc,
319 struct xfs_inode *ip,
326 if (whichfork == XFS_COW_FORK)
327 seq = &XFS_WPC(wpc)->cow_seq;
329 seq = &XFS_WPC(wpc)->data_seq;
332 * Attempt to allocate whatever delalloc extent currently backs offset
333 * and put the result into wpc->iomap. Allocate in a loop because it
334 * may take several attempts to allocate real blocks for a contiguous
335 * delalloc extent if free space is sufficiently fragmented.
338 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
342 } while (wpc->iomap.offset + wpc->iomap.length <= offset);
349 struct iomap_writepage_ctx *wpc,
353 struct xfs_inode *ip = XFS_I(inode);
354 struct xfs_mount *mp = ip->i_mount;
355 ssize_t count = i_blocksize(inode);
356 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
357 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
358 xfs_fileoff_t cow_fsb;
360 struct xfs_bmbt_irec imap;
361 struct xfs_iext_cursor icur;
365 if (XFS_FORCED_SHUTDOWN(mp))
369 * COW fork blocks can overlap data fork blocks even if the blocks
370 * aren't shared. COW I/O always takes precedent, so we must always
371 * check for overlap on reflink inodes unless the mapping is already a
372 * COW one, or the COW fork hasn't changed from the last time we looked
375 * It's safe to check the COW fork if_seq here without the ILOCK because
376 * we've indirectly protected against concurrent updates: writeback has
377 * the page locked, which prevents concurrent invalidations by reflink
378 * and directio and prevents concurrent buffered writes to the same
379 * page. Changes to if_seq always happen under i_lock, which protects
380 * against concurrent updates and provides a memory barrier on the way
381 * out that ensures that we always see the current value.
383 if (xfs_imap_valid(wpc, ip, offset))
387 * If we don't have a valid map, now it's time to get a new one for this
388 * offset. This will convert delayed allocations (including COW ones)
389 * into real extents. If we return without a valid map, it means we
390 * landed in a hole and we skip the block.
393 cow_fsb = NULLFILEOFF;
394 whichfork = XFS_DATA_FORK;
395 xfs_ilock(ip, XFS_ILOCK_SHARED);
396 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
397 (ip->i_df.if_flags & XFS_IFEXTENTS));
400 * Check if this is offset is covered by a COW extents, and if yes use
401 * it directly instead of looking up anything in the data fork.
403 if (xfs_inode_has_cow_data(ip) &&
404 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
405 cow_fsb = imap.br_startoff;
406 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
407 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
408 xfs_iunlock(ip, XFS_ILOCK_SHARED);
410 whichfork = XFS_COW_FORK;
411 goto allocate_blocks;
415 * No COW extent overlap. Revalidate now that we may have updated
416 * ->cow_seq. If the data mapping is still valid, we're done.
418 if (xfs_imap_valid(wpc, ip, offset)) {
419 xfs_iunlock(ip, XFS_ILOCK_SHARED);
424 * If we don't have a valid map, now it's time to get a new one for this
425 * offset. This will convert delayed allocations (including COW ones)
428 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
429 imap.br_startoff = end_fsb; /* fake a hole past EOF */
430 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
431 xfs_iunlock(ip, XFS_ILOCK_SHARED);
433 /* landed in a hole or beyond EOF? */
434 if (imap.br_startoff > offset_fsb) {
435 imap.br_blockcount = imap.br_startoff - offset_fsb;
436 imap.br_startoff = offset_fsb;
437 imap.br_startblock = HOLESTARTBLOCK;
438 imap.br_state = XFS_EXT_NORM;
442 * Truncate to the next COW extent if there is one. This is the only
443 * opportunity to do this because we can skip COW fork lookups for the
444 * subsequent blocks in the mapping; however, the requirement to treat
445 * the COW range separately remains.
447 if (cow_fsb != NULLFILEOFF &&
448 cow_fsb < imap.br_startoff + imap.br_blockcount)
449 imap.br_blockcount = cow_fsb - imap.br_startoff;
451 /* got a delalloc extent? */
452 if (imap.br_startblock != HOLESTARTBLOCK &&
453 isnullstartblock(imap.br_startblock))
454 goto allocate_blocks;
456 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
457 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
460 error = xfs_convert_blocks(wpc, ip, whichfork, offset);
463 * If we failed to find the extent in the COW fork we might have
464 * raced with a COW to data fork conversion or truncate.
465 * Restart the lookup to catch the extent in the data fork for
466 * the former case, but prevent additional retries to avoid
467 * looping forever for the latter case.
469 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
471 ASSERT(error != -EAGAIN);
476 * Due to merging the return real extent might be larger than the
477 * original delalloc one. Trim the return extent to the next COW
478 * boundary again to force a re-lookup.
480 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
481 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
483 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
484 wpc->iomap.length = cow_offset - wpc->iomap.offset;
487 ASSERT(wpc->iomap.offset <= offset);
488 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
489 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
495 struct iomap_ioend *ioend,
498 unsigned int nofs_flag;
501 * We can allocate memory here while doing writeback on behalf of
502 * memory reclaim. To avoid memory allocation deadlocks set the
503 * task-wide nofs context for the following operations.
505 nofs_flag = memalloc_nofs_save();
507 /* Convert CoW extents to regular */
508 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
509 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
510 ioend->io_offset, ioend->io_size);
513 /* Reserve log space if we might write beyond the on-disk inode size. */
515 ((ioend->io_flags & IOMAP_F_SHARED) ||
516 ioend->io_type != IOMAP_UNWRITTEN) &&
517 xfs_ioend_is_append(ioend) &&
519 status = xfs_setfilesize_trans_alloc(ioend);
521 memalloc_nofs_restore(nofs_flag);
523 if (xfs_ioend_needs_workqueue(ioend))
524 ioend->io_bio->bi_end_io = xfs_end_bio;
529 * If the page has delalloc blocks on it, we need to punch them out before we
530 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
531 * inode that can trip up a later direct I/O read operation on the same region.
533 * We prevent this by truncating away the delalloc regions on the page. Because
534 * they are delalloc, we can do this without needing a transaction. Indeed - if
535 * we get ENOSPC errors, we have to be able to do this truncation without a
536 * transaction as there is no space left for block reservation (typically why we
537 * see a ENOSPC in writeback).
544 struct inode *inode = page->mapping->host;
545 struct xfs_inode *ip = XFS_I(inode);
546 struct xfs_mount *mp = ip->i_mount;
547 unsigned int pageoff = offset_in_page(fileoff);
548 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
549 xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
552 if (XFS_FORCED_SHUTDOWN(mp))
555 xfs_alert_ratelimited(mp,
556 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
557 page, ip->i_ino, fileoff);
559 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
560 i_blocks_per_page(inode, page) - pageoff_fsb);
561 if (error && !XFS_FORCED_SHUTDOWN(mp))
562 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
564 iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
567 static const struct iomap_writeback_ops xfs_writeback_ops = {
568 .map_blocks = xfs_map_blocks,
569 .prepare_ioend = xfs_prepare_ioend,
570 .discard_page = xfs_discard_page,
576 struct writeback_control *wbc)
578 struct xfs_writepage_ctx wpc = { };
580 if (WARN_ON_ONCE(current->journal_info)) {
581 redirty_page_for_writepage(wbc, page);
586 return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
591 struct address_space *mapping,
592 struct writeback_control *wbc)
594 struct xfs_writepage_ctx wpc = { };
597 * Writing back data in a transaction context can result in recursive
598 * transactions. This is bad, so issue a warning and get out of here.
600 if (WARN_ON_ONCE(current->journal_info))
603 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
604 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
609 struct address_space *mapping,
610 struct writeback_control *wbc)
612 struct xfs_inode *ip = XFS_I(mapping->host);
614 xfs_iflags_clear(ip, XFS_ITRUNCATED);
615 return dax_writeback_mapping_range(mapping,
616 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
621 struct address_space *mapping,
624 struct xfs_inode *ip = XFS_I(mapping->host);
626 trace_xfs_vm_bmap(ip);
629 * The swap code (ab-)uses ->bmap to get a block mapping and then
630 * bypasses the file system for actual I/O. We really can't allow
631 * that on reflinks inodes, so we have to skip out here. And yes,
632 * 0 is the magic code for a bmap error.
634 * Since we don't pass back blockdev info, we can't return bmap
635 * information for rt files either.
637 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
639 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
647 return iomap_readpage(page, &xfs_read_iomap_ops);
652 struct readahead_control *rac)
654 iomap_readahead(rac, &xfs_read_iomap_ops);
658 xfs_iomap_swapfile_activate(
659 struct swap_info_struct *sis,
660 struct file *swap_file,
663 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
664 return iomap_swapfile_activate(sis, swap_file, span,
665 &xfs_read_iomap_ops);
668 const struct address_space_operations xfs_address_space_operations = {
669 .readpage = xfs_vm_readpage,
670 .readahead = xfs_vm_readahead,
671 .writepage = xfs_vm_writepage,
672 .writepages = xfs_vm_writepages,
673 .set_page_dirty = iomap_set_page_dirty,
674 .releasepage = iomap_releasepage,
675 .invalidatepage = iomap_invalidatepage,
677 .direct_IO = noop_direct_IO,
678 .migratepage = iomap_migrate_page,
679 .is_partially_uptodate = iomap_is_partially_uptodate,
680 .error_remove_page = generic_error_remove_page,
681 .swap_activate = xfs_iomap_swapfile_activate,
684 const struct address_space_operations xfs_dax_aops = {
685 .writepages = xfs_dax_writepages,
686 .direct_IO = noop_direct_IO,
687 .set_page_dirty = noop_set_page_dirty,
688 .invalidatepage = noop_invalidatepage,
689 .swap_activate = xfs_iomap_swapfile_activate,