1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/bio.h>
17 #include <linux/sched/signal.h>
18 #include <linux/migrate.h>
20 #include "../internal.h"
22 static struct iomap_page *
23 iomap_page_create(struct inode *inode, struct page *page)
25 struct iomap_page *iop = to_iomap_page(page);
27 if (iop || i_blocksize(inode) == PAGE_SIZE)
30 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
31 atomic_set(&iop->read_count, 0);
32 atomic_set(&iop->write_count, 0);
33 spin_lock_init(&iop->uptodate_lock);
34 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
37 * migrate_page_move_mapping() assumes that pages with private data have
38 * their count elevated by 1.
41 set_page_private(page, (unsigned long)iop);
47 iomap_page_release(struct page *page)
49 struct iomap_page *iop = to_iomap_page(page);
53 WARN_ON_ONCE(atomic_read(&iop->read_count));
54 WARN_ON_ONCE(atomic_read(&iop->write_count));
55 ClearPagePrivate(page);
56 set_page_private(page, 0);
62 * Calculate the range inside the page that we actually need to read.
65 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
66 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
68 loff_t orig_pos = *pos;
69 loff_t isize = i_size_read(inode);
70 unsigned block_bits = inode->i_blkbits;
71 unsigned block_size = (1 << block_bits);
72 unsigned poff = offset_in_page(*pos);
73 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
74 unsigned first = poff >> block_bits;
75 unsigned last = (poff + plen - 1) >> block_bits;
78 * If the block size is smaller than the page size we need to check the
79 * per-block uptodate status and adjust the offset and length if needed
80 * to avoid reading in already uptodate ranges.
85 /* move forward for each leading block marked uptodate */
86 for (i = first; i <= last; i++) {
87 if (!test_bit(i, iop->uptodate))
95 /* truncate len if we find any trailing uptodate block(s) */
96 for ( ; i <= last; i++) {
97 if (test_bit(i, iop->uptodate)) {
98 plen -= (last - i + 1) * block_size;
106 * If the extent spans the block that contains the i_size we need to
107 * handle both halves separately so that we properly zero data in the
108 * page cache for blocks that are entirely outside of i_size.
110 if (orig_pos <= isize && orig_pos + length > isize) {
111 unsigned end = offset_in_page(isize - 1) >> block_bits;
113 if (first <= end && last > end)
114 plen -= (last - end) * block_size;
122 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
124 struct iomap_page *iop = to_iomap_page(page);
125 struct inode *inode = page->mapping->host;
126 unsigned first = off >> inode->i_blkbits;
127 unsigned last = (off + len - 1) >> inode->i_blkbits;
128 bool uptodate = true;
132 spin_lock_irqsave(&iop->uptodate_lock, flags);
133 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
134 if (i >= first && i <= last)
135 set_bit(i, iop->uptodate);
136 else if (!test_bit(i, iop->uptodate))
141 SetPageUptodate(page);
142 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
146 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
151 if (page_has_private(page))
152 iomap_iop_set_range_uptodate(page, off, len);
154 SetPageUptodate(page);
158 iomap_read_finish(struct iomap_page *iop, struct page *page)
160 if (!iop || atomic_dec_and_test(&iop->read_count))
165 iomap_read_page_end_io(struct bio_vec *bvec, int error)
167 struct page *page = bvec->bv_page;
168 struct iomap_page *iop = to_iomap_page(page);
170 if (unlikely(error)) {
171 ClearPageUptodate(page);
174 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
177 iomap_read_finish(iop, page);
181 iomap_read_end_io(struct bio *bio)
183 int error = blk_status_to_errno(bio->bi_status);
184 struct bio_vec *bvec;
185 struct bvec_iter_all iter_all;
187 bio_for_each_segment_all(bvec, bio, iter_all)
188 iomap_read_page_end_io(bvec, error);
192 struct iomap_readpage_ctx {
193 struct page *cur_page;
194 bool cur_page_in_bio;
197 struct list_head *pages;
201 iomap_read_inline_data(struct inode *inode, struct page *page,
204 size_t size = i_size_read(inode);
207 if (PageUptodate(page))
211 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
213 addr = kmap_atomic(page);
214 memcpy(addr, iomap->inline_data, size);
215 memset(addr + size, 0, PAGE_SIZE - size);
217 SetPageUptodate(page);
221 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
224 struct iomap_readpage_ctx *ctx = data;
225 struct page *page = ctx->cur_page;
226 struct iomap_page *iop = iomap_page_create(inode, page);
227 bool same_page = false, is_contig = false;
228 loff_t orig_pos = pos;
232 if (iomap->type == IOMAP_INLINE) {
234 iomap_read_inline_data(inode, page, iomap);
238 /* zero post-eof blocks as the page may be mapped */
239 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
243 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
244 zero_user(page, poff, plen);
245 iomap_set_range_uptodate(page, poff, plen);
249 ctx->cur_page_in_bio = true;
252 * Try to merge into a previous segment if we can.
254 sector = iomap_sector(iomap, pos);
255 if (ctx->bio && bio_end_sector(ctx->bio) == sector)
259 __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
260 if (!same_page && iop)
261 atomic_inc(&iop->read_count);
266 * If we start a new segment we need to increase the read count, and we
267 * need to do so before submitting any previous full bio to make sure
268 * that we don't prematurely unlock the page.
271 atomic_inc(&iop->read_count);
273 if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
274 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
275 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
278 submit_bio(ctx->bio);
280 if (ctx->is_readahead) /* same as readahead_gfp_mask */
281 gfp |= __GFP_NORETRY | __GFP_NOWARN;
282 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
283 ctx->bio->bi_opf = REQ_OP_READ;
284 if (ctx->is_readahead)
285 ctx->bio->bi_opf |= REQ_RAHEAD;
286 ctx->bio->bi_iter.bi_sector = sector;
287 bio_set_dev(ctx->bio, iomap->bdev);
288 ctx->bio->bi_end_io = iomap_read_end_io;
291 bio_add_page(ctx->bio, page, plen, poff);
294 * Move the caller beyond our range so that it keeps making progress.
295 * For that we have to include any leading non-uptodate ranges, but
296 * we can skip trailing ones as they will be handled in the next
299 return pos - orig_pos + plen;
303 iomap_readpage(struct page *page, const struct iomap_ops *ops)
305 struct iomap_readpage_ctx ctx = { .cur_page = page };
306 struct inode *inode = page->mapping->host;
310 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
311 ret = iomap_apply(inode, page_offset(page) + poff,
312 PAGE_SIZE - poff, 0, ops, &ctx,
313 iomap_readpage_actor);
315 WARN_ON_ONCE(ret == 0);
323 WARN_ON_ONCE(!ctx.cur_page_in_bio);
325 WARN_ON_ONCE(ctx.cur_page_in_bio);
330 * Just like mpage_readpages and block_read_full_page we always
331 * return 0 and just mark the page as PageError on errors. This
332 * should be cleaned up all through the stack eventually.
336 EXPORT_SYMBOL_GPL(iomap_readpage);
339 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
340 loff_t length, loff_t *done)
342 while (!list_empty(pages)) {
343 struct page *page = lru_to_page(pages);
345 if (page_offset(page) >= (u64)pos + length)
348 list_del(&page->lru);
349 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
354 * If we already have a page in the page cache at index we are
355 * done. Upper layers don't care if it is uptodate after the
356 * readpages call itself as every page gets checked again once
367 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
368 void *data, struct iomap *iomap)
370 struct iomap_readpage_ctx *ctx = data;
373 for (done = 0; done < length; done += ret) {
374 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
375 if (!ctx->cur_page_in_bio)
376 unlock_page(ctx->cur_page);
377 put_page(ctx->cur_page);
378 ctx->cur_page = NULL;
380 if (!ctx->cur_page) {
381 ctx->cur_page = iomap_next_page(inode, ctx->pages,
385 ctx->cur_page_in_bio = false;
387 ret = iomap_readpage_actor(inode, pos + done, length - done,
395 iomap_readpages(struct address_space *mapping, struct list_head *pages,
396 unsigned nr_pages, const struct iomap_ops *ops)
398 struct iomap_readpage_ctx ctx = {
400 .is_readahead = true,
402 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
403 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
404 loff_t length = last - pos + PAGE_SIZE, ret = 0;
407 ret = iomap_apply(mapping->host, pos, length, 0, ops,
408 &ctx, iomap_readpages_actor);
410 WARN_ON_ONCE(ret == 0);
421 if (!ctx.cur_page_in_bio)
422 unlock_page(ctx.cur_page);
423 put_page(ctx.cur_page);
427 * Check that we didn't lose a page due to the arcance calling
430 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
433 EXPORT_SYMBOL_GPL(iomap_readpages);
436 * iomap_is_partially_uptodate checks whether blocks within a page are
439 * Returns true if all blocks which correspond to a file portion
440 * we want to read within the page are uptodate.
443 iomap_is_partially_uptodate(struct page *page, unsigned long from,
446 struct iomap_page *iop = to_iomap_page(page);
447 struct inode *inode = page->mapping->host;
448 unsigned len, first, last;
451 /* Limit range to one page */
452 len = min_t(unsigned, PAGE_SIZE - from, count);
454 /* First and last blocks in range within page */
455 first = from >> inode->i_blkbits;
456 last = (from + len - 1) >> inode->i_blkbits;
459 for (i = first; i <= last; i++)
460 if (!test_bit(i, iop->uptodate))
467 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
470 iomap_releasepage(struct page *page, gfp_t gfp_mask)
473 * mm accommodates an old ext3 case where clean pages might not have had
474 * the dirty bit cleared. Thus, it can send actual dirty pages to
475 * ->releasepage() via shrink_active_list(), skip those here.
477 if (PageDirty(page) || PageWriteback(page))
479 iomap_page_release(page);
482 EXPORT_SYMBOL_GPL(iomap_releasepage);
485 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
488 * If we are invalidating the entire page, clear the dirty state from it
489 * and release it to avoid unnecessary buildup of the LRU.
491 if (offset == 0 && len == PAGE_SIZE) {
492 WARN_ON_ONCE(PageWriteback(page));
493 cancel_dirty_page(page);
494 iomap_page_release(page);
497 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
499 #ifdef CONFIG_MIGRATION
501 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
502 struct page *page, enum migrate_mode mode)
506 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
507 if (ret != MIGRATEPAGE_SUCCESS)
510 if (page_has_private(page)) {
511 ClearPagePrivate(page);
513 set_page_private(newpage, page_private(page));
514 set_page_private(page, 0);
516 SetPagePrivate(newpage);
519 if (mode != MIGRATE_SYNC_NO_COPY)
520 migrate_page_copy(newpage, page);
522 migrate_page_states(newpage, page);
523 return MIGRATEPAGE_SUCCESS;
525 EXPORT_SYMBOL_GPL(iomap_migrate_page);
526 #endif /* CONFIG_MIGRATION */
529 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
531 loff_t i_size = i_size_read(inode);
534 * Only truncate newly allocated pages beyoned EOF, even if the
535 * write started inside the existing inode size.
537 if (pos + len > i_size)
538 truncate_pagecache_range(inode, max(pos, i_size),
543 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
544 unsigned poff, unsigned plen, unsigned from, unsigned to,
550 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
551 zero_user_segments(page, poff, from, to, poff + plen);
552 iomap_set_range_uptodate(page, poff, plen);
556 bio_init(&bio, &bvec, 1);
557 bio.bi_opf = REQ_OP_READ;
558 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
559 bio_set_dev(&bio, iomap->bdev);
560 __bio_add_page(&bio, page, plen, poff);
561 return submit_bio_wait(&bio);
565 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
566 struct page *page, struct iomap *iomap)
568 struct iomap_page *iop = iomap_page_create(inode, page);
569 loff_t block_size = i_blocksize(inode);
570 loff_t block_start = pos & ~(block_size - 1);
571 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
572 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
575 if (PageUptodate(page))
577 ClearPageError(page);
580 iomap_adjust_read_range(inode, iop, &block_start,
581 block_end - block_start, &poff, &plen);
585 if ((from > poff && from < poff + plen) ||
586 (to > poff && to < poff + plen)) {
587 status = iomap_read_page_sync(inode, block_start, page,
588 poff, plen, from, to, iomap);
593 } while ((block_start += plen) < block_end);
599 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
600 struct page **pagep, struct iomap *iomap)
602 const struct iomap_page_ops *page_ops = iomap->page_ops;
603 pgoff_t index = pos >> PAGE_SHIFT;
607 BUG_ON(pos + len > iomap->offset + iomap->length);
609 if (fatal_signal_pending(current))
612 if (page_ops && page_ops->page_prepare) {
613 status = page_ops->page_prepare(inode, pos, len, iomap);
618 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
624 if (iomap->type == IOMAP_INLINE)
625 iomap_read_inline_data(inode, page, iomap);
626 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
627 status = __block_write_begin_int(page, pos, len, NULL, iomap);
629 status = __iomap_write_begin(inode, pos, len, page, iomap);
631 if (unlikely(status))
640 iomap_write_failed(inode, pos, len);
643 if (page_ops && page_ops->page_done)
644 page_ops->page_done(inode, pos, 0, NULL, iomap);
649 iomap_set_page_dirty(struct page *page)
651 struct address_space *mapping = page_mapping(page);
654 if (unlikely(!mapping))
655 return !TestSetPageDirty(page);
658 * Lock out page->mem_cgroup migration to keep PageDirty
659 * synchronized with per-memcg dirty page counters.
661 lock_page_memcg(page);
662 newly_dirty = !TestSetPageDirty(page);
664 __set_page_dirty(page, mapping, 0);
665 unlock_page_memcg(page);
668 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
671 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
674 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
675 unsigned copied, struct page *page, struct iomap *iomap)
677 flush_dcache_page(page);
680 * The blocks that were entirely written will now be uptodate, so we
681 * don't have to worry about a readpage reading them and overwriting a
682 * partial write. However if we have encountered a short write and only
683 * partially written into a block, it will not be marked uptodate, so a
684 * readpage might come in and destroy our partial write.
686 * Do the simplest thing, and just treat any short write to a non
687 * uptodate page as a zero-length write, and force the caller to redo
690 if (unlikely(copied < len && !PageUptodate(page)))
692 iomap_set_range_uptodate(page, offset_in_page(pos), len);
693 iomap_set_page_dirty(page);
698 iomap_write_end_inline(struct inode *inode, struct page *page,
699 struct iomap *iomap, loff_t pos, unsigned copied)
703 WARN_ON_ONCE(!PageUptodate(page));
704 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
706 addr = kmap_atomic(page);
707 memcpy(iomap->inline_data + pos, addr + pos, copied);
710 mark_inode_dirty(inode);
715 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
716 unsigned copied, struct page *page, struct iomap *iomap)
718 const struct iomap_page_ops *page_ops = iomap->page_ops;
719 loff_t old_size = inode->i_size;
722 if (iomap->type == IOMAP_INLINE) {
723 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
724 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
725 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
728 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
732 * Update the in-memory inode size after copying the data into the page
733 * cache. It's up to the file system to write the updated size to disk,
734 * preferably after I/O completion so that no stale data is exposed.
736 if (pos + ret > old_size) {
737 i_size_write(inode, pos + ret);
738 iomap->flags |= IOMAP_F_SIZE_CHANGED;
743 pagecache_isize_extended(inode, old_size, pos);
744 if (page_ops && page_ops->page_done)
745 page_ops->page_done(inode, pos, ret, page, iomap);
749 iomap_write_failed(inode, pos, len);
754 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
757 struct iov_iter *i = data;
760 unsigned int flags = AOP_FLAG_NOFS;
764 unsigned long offset; /* Offset into pagecache page */
765 unsigned long bytes; /* Bytes to write to page */
766 size_t copied; /* Bytes copied from user */
768 offset = offset_in_page(pos);
769 bytes = min_t(unsigned long, PAGE_SIZE - offset,
776 * Bring in the user page that we will copy from _first_.
777 * Otherwise there's a nasty deadlock on copying from the
778 * same page as we're writing to, without it being marked
781 * Not only is this an optimisation, but it is also required
782 * to check that the address is actually valid, when atomic
783 * usercopies are used, below.
785 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
790 status = iomap_write_begin(inode, pos, bytes, flags, &page,
792 if (unlikely(status))
795 if (mapping_writably_mapped(inode->i_mapping))
796 flush_dcache_page(page);
798 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
800 flush_dcache_page(page);
802 status = iomap_write_end(inode, pos, bytes, copied, page,
804 if (unlikely(status < 0))
810 iov_iter_advance(i, copied);
811 if (unlikely(copied == 0)) {
813 * If we were unable to copy any data at all, we must
814 * fall back to a single segment length write.
816 * If we didn't fallback here, we could livelock
817 * because not all segments in the iov can be copied at
818 * once without a pagefault.
820 bytes = min_t(unsigned long, PAGE_SIZE - offset,
821 iov_iter_single_seg_count(i));
828 balance_dirty_pages_ratelimited(inode->i_mapping);
829 } while (iov_iter_count(i) && length);
831 return written ? written : status;
835 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
836 const struct iomap_ops *ops)
838 struct inode *inode = iocb->ki_filp->f_mapping->host;
839 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
841 while (iov_iter_count(iter)) {
842 ret = iomap_apply(inode, pos, iov_iter_count(iter),
843 IOMAP_WRITE, ops, iter, iomap_write_actor);
850 return written ? written : ret;
852 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
855 __iomap_read_page(struct inode *inode, loff_t offset)
857 struct address_space *mapping = inode->i_mapping;
860 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
863 if (!PageUptodate(page)) {
865 return ERR_PTR(-EIO);
871 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
878 struct page *page, *rpage;
879 unsigned long offset; /* Offset into pagecache page */
880 unsigned long bytes; /* Bytes to write to page */
882 offset = offset_in_page(pos);
883 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
885 rpage = __iomap_read_page(inode, pos);
887 return PTR_ERR(rpage);
889 status = iomap_write_begin(inode, pos, bytes,
890 AOP_FLAG_NOFS, &page, iomap);
892 if (unlikely(status))
895 WARN_ON_ONCE(!PageUptodate(page));
897 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
898 if (unlikely(status <= 0)) {
899 if (WARN_ON_ONCE(status == 0))
910 balance_dirty_pages_ratelimited(inode->i_mapping);
917 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
918 const struct iomap_ops *ops)
923 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
933 EXPORT_SYMBOL_GPL(iomap_file_dirty);
935 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
936 unsigned bytes, struct iomap *iomap)
941 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
946 zero_user(page, offset, bytes);
947 mark_page_accessed(page);
949 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
952 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
955 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
956 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
960 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
961 void *data, struct iomap *iomap)
963 bool *did_zero = data;
967 /* already zeroed? we're done. */
968 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
972 unsigned offset, bytes;
974 offset = offset_in_page(pos);
975 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
978 status = iomap_dax_zero(pos, offset, bytes, iomap);
980 status = iomap_zero(inode, pos, offset, bytes, iomap);
995 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
996 const struct iomap_ops *ops)
1001 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1002 ops, did_zero, iomap_zero_range_actor);
1012 EXPORT_SYMBOL_GPL(iomap_zero_range);
1015 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1016 const struct iomap_ops *ops)
1018 unsigned int blocksize = i_blocksize(inode);
1019 unsigned int off = pos & (blocksize - 1);
1021 /* Block boundary? Nothing to do */
1024 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1026 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1029 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1030 void *data, struct iomap *iomap)
1032 struct page *page = data;
1035 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1036 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1039 block_commit_write(page, 0, length);
1041 WARN_ON_ONCE(!PageUptodate(page));
1042 iomap_page_create(inode, page);
1043 set_page_dirty(page);
1049 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1051 struct page *page = vmf->page;
1052 struct inode *inode = file_inode(vmf->vma->vm_file);
1053 unsigned long length;
1054 loff_t offset, size;
1058 size = i_size_read(inode);
1059 offset = page_offset(page);
1060 if (page->mapping != inode->i_mapping || offset > size) {
1061 /* We overload EFAULT to mean page got truncated */
1066 /* page is wholly or partially inside EOF */
1067 if (offset > size - PAGE_SIZE)
1068 length = offset_in_page(size);
1072 while (length > 0) {
1073 ret = iomap_apply(inode, offset, length,
1074 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1075 iomap_page_mkwrite_actor);
1076 if (unlikely(ret <= 0))
1082 wait_for_stable_page(page);
1083 return VM_FAULT_LOCKED;
1086 return block_page_mkwrite_return(ret);
1088 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);