1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2021 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/backing-dev.h>
11 #include <linux/uio.h>
12 #include <linux/task_io_accounting_ops.h>
15 #include "../internal.h"
18 * Private flags for iomap_dio, must not overlap with the public ones in
21 #define IOMAP_DIO_WRITE_FUA (1 << 28)
22 #define IOMAP_DIO_NEED_SYNC (1 << 29)
23 #define IOMAP_DIO_WRITE (1 << 30)
24 #define IOMAP_DIO_DIRTY (1 << 31)
28 const struct iomap_dio_ops *dops;
35 bool wait_for_completion;
38 /* used during submission and for synchronous completion: */
40 struct iov_iter *iter;
41 struct task_struct *waiter;
42 struct request_queue *last_queue;
46 /* used for aio completion: */
48 struct work_struct work;
53 int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
55 struct request_queue *q = READ_ONCE(kiocb->private);
59 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
61 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
63 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
64 struct iomap_dio *dio, struct bio *bio, loff_t pos)
66 atomic_inc(&dio->ref);
68 if (dio->iocb->ki_flags & IOCB_HIPRI)
69 bio_set_polled(bio, dio->iocb);
71 dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev);
72 if (dio->dops && dio->dops->submit_io)
73 dio->submit.cookie = dio->dops->submit_io(iter, bio, pos);
75 dio->submit.cookie = submit_bio(bio);
78 ssize_t iomap_dio_complete(struct iomap_dio *dio)
80 const struct iomap_dio_ops *dops = dio->dops;
81 struct kiocb *iocb = dio->iocb;
82 struct inode *inode = file_inode(iocb->ki_filp);
83 loff_t offset = iocb->ki_pos;
84 ssize_t ret = dio->error;
86 if (dops && dops->end_io)
87 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
91 /* check for short read */
92 if (offset + ret > dio->i_size &&
93 !(dio->flags & IOMAP_DIO_WRITE))
94 ret = dio->i_size - offset;
99 * Try again to invalidate clean pages which might have been cached by
100 * non-direct readahead, or faulted in by get_user_pages() if the source
101 * of the write was an mmap'ed region of the file we're writing. Either
102 * one is a pretty crazy thing to do, so we don't support it 100%. If
103 * this invalidation fails, tough, the write still worked...
105 * And this page cache invalidation has to be after ->end_io(), as some
106 * filesystems convert unwritten extents to real allocations in
107 * ->end_io() when necessary, otherwise a racing buffer read would cache
108 * zeros from unwritten extents.
110 if (!dio->error && dio->size &&
111 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
113 err = invalidate_inode_pages2_range(inode->i_mapping,
114 offset >> PAGE_SHIFT,
115 (offset + dio->size - 1) >> PAGE_SHIFT);
117 dio_warn_stale_pagecache(iocb->ki_filp);
120 inode_dio_end(file_inode(iocb->ki_filp));
122 * If this is a DSYNC write, make sure we push it to stable storage now
123 * that we've written data.
125 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
126 ret = generic_write_sync(iocb, ret);
129 ret += dio->done_before;
135 EXPORT_SYMBOL_GPL(iomap_dio_complete);
137 static void iomap_dio_complete_work(struct work_struct *work)
139 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
140 struct kiocb *iocb = dio->iocb;
142 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
146 * Set an error in the dio if none is set yet. We have to use cmpxchg
147 * as the submission context and the completion context(s) can race to
150 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
152 cmpxchg(&dio->error, 0, ret);
155 static void iomap_dio_bio_end_io(struct bio *bio)
157 struct iomap_dio *dio = bio->bi_private;
158 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
161 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
163 if (atomic_dec_and_test(&dio->ref)) {
164 if (dio->wait_for_completion) {
165 struct task_struct *waiter = dio->submit.waiter;
166 WRITE_ONCE(dio->submit.waiter, NULL);
167 blk_wake_io_task(waiter);
168 } else if (dio->flags & IOMAP_DIO_WRITE) {
169 struct inode *inode = file_inode(dio->iocb->ki_filp);
171 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
172 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
174 iomap_dio_complete_work(&dio->aio.work);
179 bio_check_pages_dirty(bio);
181 bio_release_pages(bio, false);
186 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
187 loff_t pos, unsigned len)
189 struct page *page = ZERO_PAGE(0);
190 int flags = REQ_SYNC | REQ_IDLE;
193 bio = bio_alloc(GFP_KERNEL, 1);
194 bio_set_dev(bio, iter->iomap.bdev);
195 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
196 bio->bi_private = dio;
197 bio->bi_end_io = iomap_dio_bio_end_io;
200 __bio_add_page(bio, page, len, 0);
201 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
202 iomap_dio_submit_bio(iter, dio, bio, pos);
206 * Figure out the bio's operation flags from the dio request, the
207 * mapping, and whether or not we want FUA. Note that we can end up
208 * clearing the WRITE_FUA flag in the dio request.
210 static inline unsigned int iomap_dio_bio_opflags(struct iomap_dio *dio,
211 const struct iomap *iomap, bool use_fua)
213 unsigned int opflags = REQ_SYNC | REQ_IDLE;
215 if (!(dio->flags & IOMAP_DIO_WRITE)) {
216 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
220 if (iomap->flags & IOMAP_F_ZONE_APPEND)
221 opflags |= REQ_OP_ZONE_APPEND;
223 opflags |= REQ_OP_WRITE;
228 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
233 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
234 struct iomap_dio *dio)
236 const struct iomap *iomap = &iter->iomap;
237 struct inode *inode = iter->inode;
238 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
239 unsigned int fs_block_size = i_blocksize(inode), pad;
240 unsigned int align = iov_iter_alignment(dio->submit.iter);
241 loff_t length = iomap_length(iter);
242 loff_t pos = iter->pos;
243 unsigned int bio_opf;
245 bool need_zeroout = false;
246 bool use_fua = false;
247 int nr_pages, ret = 0;
251 if ((pos | length | align) & ((1 << blkbits) - 1))
254 if (iomap->type == IOMAP_UNWRITTEN) {
255 dio->flags |= IOMAP_DIO_UNWRITTEN;
259 if (iomap->flags & IOMAP_F_SHARED)
260 dio->flags |= IOMAP_DIO_COW;
262 if (iomap->flags & IOMAP_F_NEW) {
264 } else if (iomap->type == IOMAP_MAPPED) {
266 * Use a FUA write if we need datasync semantics, this is a pure
267 * data IO that doesn't require any metadata updates (including
268 * after IO completion such as unwritten extent conversion) and
269 * the underlying device supports FUA. This allows us to avoid
270 * cache flushes on IO completion.
272 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
273 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
274 blk_queue_fua(bdev_get_queue(iomap->bdev)))
279 * Save the original count and trim the iter to just the extent we
280 * are operating on right now. The iter will be re-expanded once
283 orig_count = iov_iter_count(dio->submit.iter);
284 iov_iter_truncate(dio->submit.iter, length);
286 if (!iov_iter_count(dio->submit.iter))
290 /* zero out from the start of the block to the write offset */
291 pad = pos & (fs_block_size - 1);
293 iomap_dio_zero(iter, dio, pos - pad, pad);
297 * Set the operation flags early so that bio_iov_iter_get_pages
298 * can set up the page vector appropriately for a ZONE_APPEND
301 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
303 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
307 iov_iter_revert(dio->submit.iter, copied);
312 bio = bio_alloc(GFP_KERNEL, nr_pages);
313 bio_set_dev(bio, iomap->bdev);
314 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
315 bio->bi_write_hint = dio->iocb->ki_hint;
316 bio->bi_ioprio = dio->iocb->ki_ioprio;
317 bio->bi_private = dio;
318 bio->bi_end_io = iomap_dio_bio_end_io;
319 bio->bi_opf = bio_opf;
321 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
324 * We have to stop part way through an IO. We must fall
325 * through to the sub-block tail zeroing here, otherwise
326 * this short IO may expose stale data in the tail of
327 * the block we haven't written data to.
333 n = bio->bi_iter.bi_size;
334 if (dio->flags & IOMAP_DIO_WRITE) {
335 task_io_account_write(n);
337 if (dio->flags & IOMAP_DIO_DIRTY)
338 bio_set_pages_dirty(bio);
344 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
346 iomap_dio_submit_bio(iter, dio, bio, pos);
351 * We need to zeroout the tail of a sub-block write if the extent type
352 * requires zeroing or the write extends beyond EOF. If we don't zero
353 * the block tail in the latter case, we can expose stale data via mmap
354 * reads of the EOF block.
358 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
359 /* zero out from the end of the write to the end of the block */
360 pad = pos & (fs_block_size - 1);
362 iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
365 /* Undo iter limitation to current extent */
366 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
372 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
373 struct iomap_dio *dio)
375 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
383 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
384 struct iomap_dio *dio)
386 const struct iomap *iomap = &iomi->iomap;
387 struct iov_iter *iter = dio->submit.iter;
388 void *inline_data = iomap_inline_data(iomap, iomi->pos);
389 loff_t length = iomap_length(iomi);
390 loff_t pos = iomi->pos;
393 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
396 if (dio->flags & IOMAP_DIO_WRITE) {
397 loff_t size = iomi->inode->i_size;
400 memset(iomap_inline_data(iomap, size), 0, pos - size);
401 copied = copy_from_iter(inline_data, length, iter);
403 if (pos + copied > size)
404 i_size_write(iomi->inode, pos + copied);
405 mark_inode_dirty(iomi->inode);
408 copied = copy_to_iter(inline_data, length, iter);
416 static loff_t iomap_dio_iter(const struct iomap_iter *iter,
417 struct iomap_dio *dio)
419 switch (iter->iomap.type) {
421 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
423 return iomap_dio_hole_iter(iter, dio);
424 case IOMAP_UNWRITTEN:
425 if (!(dio->flags & IOMAP_DIO_WRITE))
426 return iomap_dio_hole_iter(iter, dio);
427 return iomap_dio_bio_iter(iter, dio);
429 return iomap_dio_bio_iter(iter, dio);
431 return iomap_dio_inline_iter(iter, dio);
434 * DIO is not serialised against mmap() access at all, and so
435 * if the page_mkwrite occurs between the writeback and the
436 * iomap_iter() call in the DIO path, then it will see the
437 * DELALLOC block that the page-mkwrite allocated.
439 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
440 dio->iocb->ki_filp, current->comm);
449 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
450 * is being issued as AIO or not. This allows us to optimise pure data writes
451 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
452 * REQ_FLUSH post write. This is slightly tricky because a single request here
453 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
454 * may be pure data writes. In that case, we still need to do a full data sync
457 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
458 * __iomap_dio_rw can return a partial result if it encounters a non-resident
459 * page in @iter after preparing a transfer. In that case, the non-resident
460 * pages can be faulted in and the request resumed with @done_before set to the
461 * number of bytes previously transferred. The request will then complete with
462 * the correct total number of bytes transferred; this is essential for
463 * completing partial requests asynchronously.
465 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
466 * writes. The callers needs to fall back to buffered I/O in this case.
469 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
470 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
471 unsigned int dio_flags, size_t done_before)
473 struct address_space *mapping = iocb->ki_filp->f_mapping;
474 struct inode *inode = file_inode(iocb->ki_filp);
475 struct iomap_iter iomi = {
478 .len = iov_iter_count(iter),
479 .flags = IOMAP_DIRECT,
481 loff_t end = iomi.pos + iomi.len - 1, ret = 0;
482 bool wait_for_completion =
483 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
484 struct blk_plug plug;
485 struct iomap_dio *dio;
490 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
492 return ERR_PTR(-ENOMEM);
495 atomic_set(&dio->ref, 1);
497 dio->i_size = i_size_read(inode);
501 dio->done_before = done_before;
503 dio->submit.iter = iter;
504 dio->submit.waiter = current;
505 dio->submit.cookie = BLK_QC_T_NONE;
506 dio->submit.last_queue = NULL;
508 if (iov_iter_rw(iter) == READ) {
509 if (iomi.pos >= dio->i_size)
512 if (iocb->ki_flags & IOCB_NOWAIT) {
513 if (filemap_range_needs_writeback(mapping, iomi.pos,
518 iomi.flags |= IOMAP_NOWAIT;
521 if (iter_is_iovec(iter))
522 dio->flags |= IOMAP_DIO_DIRTY;
524 iomi.flags |= IOMAP_WRITE;
525 dio->flags |= IOMAP_DIO_WRITE;
527 if (iocb->ki_flags & IOCB_NOWAIT) {
528 if (filemap_range_has_page(mapping, iomi.pos, end)) {
532 iomi.flags |= IOMAP_NOWAIT;
535 /* for data sync or sync, we need sync completion processing */
536 if (iocb->ki_flags & IOCB_DSYNC)
537 dio->flags |= IOMAP_DIO_NEED_SYNC;
540 * For datasync only writes, we optimistically try using FUA for
541 * this IO. Any non-FUA write that occurs will clear this flag,
542 * hence we know before completion whether a cache flush is
545 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
546 dio->flags |= IOMAP_DIO_WRITE_FUA;
549 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
551 if (iomi.pos >= dio->i_size ||
552 iomi.pos + iomi.len > dio->i_size)
554 iomi.flags |= IOMAP_OVERWRITE_ONLY;
557 ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
561 if (iov_iter_rw(iter) == WRITE) {
563 * Try to invalidate cache pages for the range we are writing.
564 * If this invalidation fails, let the caller fall back to
567 if (invalidate_inode_pages2_range(mapping,
568 iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
569 trace_iomap_dio_invalidate_fail(inode, iomi.pos,
575 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
576 ret = sb_init_dio_done_wq(inode->i_sb);
582 inode_dio_begin(inode);
584 blk_start_plug(&plug);
585 while ((ret = iomap_iter(&iomi, ops)) > 0)
586 iomi.processed = iomap_dio_iter(&iomi, dio);
587 blk_finish_plug(&plug);
590 * We only report that we've read data up to i_size.
591 * Revert iter to a state corresponding to that as some callers (such
592 * as the splice code) rely on it.
594 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
595 iov_iter_revert(iter, iomi.pos - dio->i_size);
597 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
598 if (!(iocb->ki_flags & IOCB_NOWAIT))
599 wait_for_completion = true;
603 /* magic error code to fall back to buffered I/O */
604 if (ret == -ENOTBLK) {
605 wait_for_completion = true;
609 iomap_dio_set_error(dio, ret);
612 * If all the writes we issued were FUA, we don't need to flush the
613 * cache on IO completion. Clear the sync flag for this case.
615 if (dio->flags & IOMAP_DIO_WRITE_FUA)
616 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
618 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
619 WRITE_ONCE(iocb->private, dio->submit.last_queue);
622 * We are about to drop our additional submission reference, which
623 * might be the last reference to the dio. There are three different
624 * ways we can progress here:
626 * (a) If this is the last reference we will always complete and free
628 * (b) If this is not the last reference, and we serve an asynchronous
629 * iocb, we must never touch the dio after the decrement, the
630 * I/O completion handler will complete and free it.
631 * (c) If this is not the last reference, but we serve a synchronous
632 * iocb, the I/O completion handler will wake us up on the drop
633 * of the final reference, and we will complete and free it here
634 * after we got woken by the I/O completion handler.
636 dio->wait_for_completion = wait_for_completion;
637 if (!atomic_dec_and_test(&dio->ref)) {
638 if (!wait_for_completion)
639 return ERR_PTR(-EIOCBQUEUED);
642 set_current_state(TASK_UNINTERRUPTIBLE);
643 if (!READ_ONCE(dio->submit.waiter))
646 if (!(iocb->ki_flags & IOCB_HIPRI) ||
647 !dio->submit.last_queue ||
648 !blk_poll(dio->submit.last_queue,
649 dio->submit.cookie, true))
652 __set_current_state(TASK_RUNNING);
663 EXPORT_SYMBOL_GPL(__iomap_dio_rw);
666 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
667 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
668 unsigned int dio_flags, size_t done_before)
670 struct iomap_dio *dio;
672 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, done_before);
673 if (IS_ERR_OR_NULL(dio))
674 return PTR_ERR_OR_ZERO(dio);
675 return iomap_dio_complete(dio);
677 EXPORT_SYMBOL_GPL(iomap_dio_rw);