1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2021 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/fscrypt.h>
10 #include <linux/pagemap.h>
11 #include <linux/iomap.h>
12 #include <linux/backing-dev.h>
13 #include <linux/uio.h>
14 #include <linux/task_io_accounting_ops.h>
17 #include "../internal.h"
20 * Private flags for iomap_dio, must not overlap with the public ones in
23 #define IOMAP_DIO_WRITE_FUA (1 << 28)
24 #define IOMAP_DIO_NEED_SYNC (1 << 29)
25 #define IOMAP_DIO_WRITE (1 << 30)
26 #define IOMAP_DIO_DIRTY (1 << 31)
30 const struct iomap_dio_ops *dops;
37 bool wait_for_completion;
40 /* used during submission and for synchronous completion: */
42 struct iov_iter *iter;
43 struct task_struct *waiter;
47 /* used for aio completion: */
49 struct work_struct work;
54 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
55 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
57 if (dio->dops && dio->dops->bio_set)
58 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
59 GFP_KERNEL, dio->dops->bio_set);
60 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
63 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
64 struct iomap_dio *dio, struct bio *bio, loff_t pos)
66 atomic_inc(&dio->ref);
68 /* Sync dio can't be polled reliably */
69 if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) {
70 bio_set_polled(bio, dio->iocb);
71 dio->submit.poll_bio = bio;
74 if (dio->dops && dio->dops->submit_io)
75 dio->dops->submit_io(iter, bio, pos);
80 ssize_t iomap_dio_complete(struct iomap_dio *dio)
82 const struct iomap_dio_ops *dops = dio->dops;
83 struct kiocb *iocb = dio->iocb;
84 struct inode *inode = file_inode(iocb->ki_filp);
85 loff_t offset = iocb->ki_pos;
86 ssize_t ret = dio->error;
88 if (dops && dops->end_io)
89 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
93 /* check for short read */
94 if (offset + ret > dio->i_size &&
95 !(dio->flags & IOMAP_DIO_WRITE))
96 ret = dio->i_size - offset;
101 * Try again to invalidate clean pages which might have been cached by
102 * non-direct readahead, or faulted in by get_user_pages() if the source
103 * of the write was an mmap'ed region of the file we're writing. Either
104 * one is a pretty crazy thing to do, so we don't support it 100%. If
105 * this invalidation fails, tough, the write still worked...
107 * And this page cache invalidation has to be after ->end_io(), as some
108 * filesystems convert unwritten extents to real allocations in
109 * ->end_io() when necessary, otherwise a racing buffer read would cache
110 * zeros from unwritten extents.
112 if (!dio->error && dio->size &&
113 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
115 err = invalidate_inode_pages2_range(inode->i_mapping,
116 offset >> PAGE_SHIFT,
117 (offset + dio->size - 1) >> PAGE_SHIFT);
119 dio_warn_stale_pagecache(iocb->ki_filp);
122 inode_dio_end(file_inode(iocb->ki_filp));
124 * If this is a DSYNC write, make sure we push it to stable storage now
125 * that we've written data.
127 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
128 ret = generic_write_sync(iocb, ret);
131 ret += dio->done_before;
137 EXPORT_SYMBOL_GPL(iomap_dio_complete);
139 static void iomap_dio_complete_work(struct work_struct *work)
141 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
142 struct kiocb *iocb = dio->iocb;
144 iocb->ki_complete(iocb, iomap_dio_complete(dio));
148 * Set an error in the dio if none is set yet. We have to use cmpxchg
149 * as the submission context and the completion context(s) can race to
152 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
154 cmpxchg(&dio->error, 0, ret);
157 void iomap_dio_bio_end_io(struct bio *bio)
159 struct iomap_dio *dio = bio->bi_private;
160 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
163 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
165 if (atomic_dec_and_test(&dio->ref)) {
166 if (dio->wait_for_completion) {
167 struct task_struct *waiter = dio->submit.waiter;
168 WRITE_ONCE(dio->submit.waiter, NULL);
169 blk_wake_io_task(waiter);
170 } else if (dio->flags & IOMAP_DIO_WRITE) {
171 struct inode *inode = file_inode(dio->iocb->ki_filp);
173 WRITE_ONCE(dio->iocb->private, NULL);
174 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
175 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
177 WRITE_ONCE(dio->iocb->private, NULL);
178 iomap_dio_complete_work(&dio->aio.work);
183 bio_check_pages_dirty(bio);
185 bio_release_pages(bio, false);
189 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
191 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
192 loff_t pos, unsigned len)
194 struct inode *inode = file_inode(dio->iocb->ki_filp);
195 struct page *page = ZERO_PAGE(0);
198 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
199 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
201 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
202 bio->bi_private = dio;
203 bio->bi_end_io = iomap_dio_bio_end_io;
206 __bio_add_page(bio, page, len, 0);
207 iomap_dio_submit_bio(iter, dio, bio, pos);
211 * Figure out the bio's operation flags from the dio request, the
212 * mapping, and whether or not we want FUA. Note that we can end up
213 * clearing the WRITE_FUA flag in the dio request.
215 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
216 const struct iomap *iomap, bool use_fua)
218 blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
220 if (!(dio->flags & IOMAP_DIO_WRITE)) {
221 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
225 if (iomap->flags & IOMAP_F_ZONE_APPEND)
226 opflags |= REQ_OP_ZONE_APPEND;
228 opflags |= REQ_OP_WRITE;
233 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
238 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
239 struct iomap_dio *dio)
241 const struct iomap *iomap = &iter->iomap;
242 struct inode *inode = iter->inode;
243 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
244 unsigned int fs_block_size = i_blocksize(inode), pad;
245 loff_t length = iomap_length(iter);
246 loff_t pos = iter->pos;
249 bool need_zeroout = false;
250 bool use_fua = false;
251 int nr_pages, ret = 0;
255 if ((pos | length) & ((1 << blkbits) - 1) ||
256 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
259 if (iomap->type == IOMAP_UNWRITTEN) {
260 dio->flags |= IOMAP_DIO_UNWRITTEN;
264 if (iomap->flags & IOMAP_F_SHARED)
265 dio->flags |= IOMAP_DIO_COW;
267 if (iomap->flags & IOMAP_F_NEW) {
269 } else if (iomap->type == IOMAP_MAPPED) {
271 * Use a FUA write if we need datasync semantics, this is a pure
272 * data IO that doesn't require any metadata updates (including
273 * after IO completion such as unwritten extent conversion) and
274 * the underlying device supports FUA. This allows us to avoid
275 * cache flushes on IO completion.
277 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
278 (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev))
283 * Save the original count and trim the iter to just the extent we
284 * are operating on right now. The iter will be re-expanded once
287 orig_count = iov_iter_count(dio->submit.iter);
288 iov_iter_truncate(dio->submit.iter, length);
290 if (!iov_iter_count(dio->submit.iter))
294 * We can only poll for single bio I/Os.
297 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
298 dio->iocb->ki_flags &= ~IOCB_HIPRI;
301 /* zero out from the start of the block to the write offset */
302 pad = pos & (fs_block_size - 1);
304 iomap_dio_zero(iter, dio, pos - pad, pad);
308 * Set the operation flags early so that bio_iov_iter_get_pages
309 * can set up the page vector appropriately for a ZONE_APPEND
312 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
314 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
318 iov_iter_revert(dio->submit.iter, copied);
323 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
324 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
326 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
327 bio->bi_ioprio = dio->iocb->ki_ioprio;
328 bio->bi_private = dio;
329 bio->bi_end_io = iomap_dio_bio_end_io;
331 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
334 * We have to stop part way through an IO. We must fall
335 * through to the sub-block tail zeroing here, otherwise
336 * this short IO may expose stale data in the tail of
337 * the block we haven't written data to.
343 n = bio->bi_iter.bi_size;
344 if (dio->flags & IOMAP_DIO_WRITE) {
345 task_io_account_write(n);
347 if (dio->flags & IOMAP_DIO_DIRTY)
348 bio_set_pages_dirty(bio);
354 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
357 * We can only poll for single bio I/Os.
360 dio->iocb->ki_flags &= ~IOCB_HIPRI;
361 iomap_dio_submit_bio(iter, dio, bio, pos);
366 * We need to zeroout the tail of a sub-block write if the extent type
367 * requires zeroing or the write extends beyond EOF. If we don't zero
368 * the block tail in the latter case, we can expose stale data via mmap
369 * reads of the EOF block.
373 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
374 /* zero out from the end of the write to the end of the block */
375 pad = pos & (fs_block_size - 1);
377 iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
380 /* Undo iter limitation to current extent */
381 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
387 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
388 struct iomap_dio *dio)
390 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
398 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
399 struct iomap_dio *dio)
401 const struct iomap *iomap = &iomi->iomap;
402 struct iov_iter *iter = dio->submit.iter;
403 void *inline_data = iomap_inline_data(iomap, iomi->pos);
404 loff_t length = iomap_length(iomi);
405 loff_t pos = iomi->pos;
408 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
411 if (dio->flags & IOMAP_DIO_WRITE) {
412 loff_t size = iomi->inode->i_size;
415 memset(iomap_inline_data(iomap, size), 0, pos - size);
416 copied = copy_from_iter(inline_data, length, iter);
418 if (pos + copied > size)
419 i_size_write(iomi->inode, pos + copied);
420 mark_inode_dirty(iomi->inode);
423 copied = copy_to_iter(inline_data, length, iter);
431 static loff_t iomap_dio_iter(const struct iomap_iter *iter,
432 struct iomap_dio *dio)
434 switch (iter->iomap.type) {
436 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
438 return iomap_dio_hole_iter(iter, dio);
439 case IOMAP_UNWRITTEN:
440 if (!(dio->flags & IOMAP_DIO_WRITE))
441 return iomap_dio_hole_iter(iter, dio);
442 return iomap_dio_bio_iter(iter, dio);
444 return iomap_dio_bio_iter(iter, dio);
446 return iomap_dio_inline_iter(iter, dio);
449 * DIO is not serialised against mmap() access at all, and so
450 * if the page_mkwrite occurs between the writeback and the
451 * iomap_iter() call in the DIO path, then it will see the
452 * DELALLOC block that the page-mkwrite allocated.
454 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
455 dio->iocb->ki_filp, current->comm);
464 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
465 * is being issued as AIO or not. This allows us to optimise pure data writes
466 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
467 * REQ_FLUSH post write. This is slightly tricky because a single request here
468 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
469 * may be pure data writes. In that case, we still need to do a full data sync
472 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
473 * __iomap_dio_rw can return a partial result if it encounters a non-resident
474 * page in @iter after preparing a transfer. In that case, the non-resident
475 * pages can be faulted in and the request resumed with @done_before set to the
476 * number of bytes previously transferred. The request will then complete with
477 * the correct total number of bytes transferred; this is essential for
478 * completing partial requests asynchronously.
480 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
481 * writes. The callers needs to fall back to buffered I/O in this case.
484 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
485 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
486 unsigned int dio_flags, void *private, size_t done_before)
488 struct address_space *mapping = iocb->ki_filp->f_mapping;
489 struct inode *inode = file_inode(iocb->ki_filp);
490 struct iomap_iter iomi = {
493 .len = iov_iter_count(iter),
494 .flags = IOMAP_DIRECT,
497 loff_t end = iomi.pos + iomi.len - 1, ret = 0;
498 bool wait_for_completion =
499 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
500 struct blk_plug plug;
501 struct iomap_dio *dio;
506 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
508 return ERR_PTR(-ENOMEM);
511 atomic_set(&dio->ref, 1);
513 dio->i_size = i_size_read(inode);
517 dio->done_before = done_before;
519 dio->submit.iter = iter;
520 dio->submit.waiter = current;
521 dio->submit.poll_bio = NULL;
523 if (iov_iter_rw(iter) == READ) {
524 if (iomi.pos >= dio->i_size)
527 if (iocb->ki_flags & IOCB_NOWAIT) {
528 if (filemap_range_needs_writeback(mapping, iomi.pos,
533 iomi.flags |= IOMAP_NOWAIT;
536 if (user_backed_iter(iter))
537 dio->flags |= IOMAP_DIO_DIRTY;
539 iomi.flags |= IOMAP_WRITE;
540 dio->flags |= IOMAP_DIO_WRITE;
542 if (iocb->ki_flags & IOCB_NOWAIT) {
543 if (filemap_range_has_page(mapping, iomi.pos, end)) {
547 iomi.flags |= IOMAP_NOWAIT;
550 /* for data sync or sync, we need sync completion processing */
551 if (iocb_is_dsync(iocb) && !(dio_flags & IOMAP_DIO_NOSYNC)) {
552 dio->flags |= IOMAP_DIO_NEED_SYNC;
555 * For datasync only writes, we optimistically try
556 * using FUA for this IO. Any non-FUA write that
557 * occurs will clear this flag, hence we know before
558 * completion whether a cache flush is necessary.
560 if (!(iocb->ki_flags & IOCB_SYNC))
561 dio->flags |= IOMAP_DIO_WRITE_FUA;
565 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
567 if (iomi.pos >= dio->i_size ||
568 iomi.pos + iomi.len > dio->i_size)
570 iomi.flags |= IOMAP_OVERWRITE_ONLY;
573 ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
577 if (iov_iter_rw(iter) == WRITE) {
579 * Try to invalidate cache pages for the range we are writing.
580 * If this invalidation fails, let the caller fall back to
583 if (invalidate_inode_pages2_range(mapping,
584 iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
585 trace_iomap_dio_invalidate_fail(inode, iomi.pos,
591 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
592 ret = sb_init_dio_done_wq(inode->i_sb);
598 inode_dio_begin(inode);
600 blk_start_plug(&plug);
601 while ((ret = iomap_iter(&iomi, ops)) > 0) {
602 iomi.processed = iomap_dio_iter(&iomi, dio);
605 * We can only poll for single bio I/Os.
607 iocb->ki_flags &= ~IOCB_HIPRI;
610 blk_finish_plug(&plug);
613 * We only report that we've read data up to i_size.
614 * Revert iter to a state corresponding to that as some callers (such
615 * as the splice code) rely on it.
617 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
618 iov_iter_revert(iter, iomi.pos - dio->i_size);
620 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
621 if (!(iocb->ki_flags & IOCB_NOWAIT))
622 wait_for_completion = true;
626 /* magic error code to fall back to buffered I/O */
627 if (ret == -ENOTBLK) {
628 wait_for_completion = true;
632 iomap_dio_set_error(dio, ret);
635 * If all the writes we issued were FUA, we don't need to flush the
636 * cache on IO completion. Clear the sync flag for this case.
638 if (dio->flags & IOMAP_DIO_WRITE_FUA)
639 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
641 WRITE_ONCE(iocb->private, dio->submit.poll_bio);
644 * We are about to drop our additional submission reference, which
645 * might be the last reference to the dio. There are three different
646 * ways we can progress here:
648 * (a) If this is the last reference we will always complete and free
650 * (b) If this is not the last reference, and we serve an asynchronous
651 * iocb, we must never touch the dio after the decrement, the
652 * I/O completion handler will complete and free it.
653 * (c) If this is not the last reference, but we serve a synchronous
654 * iocb, the I/O completion handler will wake us up on the drop
655 * of the final reference, and we will complete and free it here
656 * after we got woken by the I/O completion handler.
658 dio->wait_for_completion = wait_for_completion;
659 if (!atomic_dec_and_test(&dio->ref)) {
660 if (!wait_for_completion)
661 return ERR_PTR(-EIOCBQUEUED);
664 set_current_state(TASK_UNINTERRUPTIBLE);
665 if (!READ_ONCE(dio->submit.waiter))
670 __set_current_state(TASK_RUNNING);
681 EXPORT_SYMBOL_GPL(__iomap_dio_rw);
684 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
685 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
686 unsigned int dio_flags, void *private, size_t done_before)
688 struct iomap_dio *dio;
690 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
692 if (IS_ERR_OR_NULL(dio))
693 return PTR_ERR_OR_ZERO(dio);
694 return iomap_dio_complete(dio);
696 EXPORT_SYMBOL_GPL(iomap_dio_rw);