GNU Linux-libre 5.4.257-gnu1
[releases.git] / fs / iomap / buffered-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (c) 2016-2018 Christoph Hellwig.
5  */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/bio.h>
17 #include <linux/sched/signal.h>
18 #include <linux/migrate.h>
19
20 #include "../internal.h"
21
22 static struct iomap_page *
23 iomap_page_create(struct inode *inode, struct page *page)
24 {
25         struct iomap_page *iop = to_iomap_page(page);
26
27         if (iop || i_blocksize(inode) == PAGE_SIZE)
28                 return iop;
29
30         iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
31         atomic_set(&iop->read_count, 0);
32         atomic_set(&iop->write_count, 0);
33         spin_lock_init(&iop->uptodate_lock);
34         bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
35
36         /*
37          * migrate_page_move_mapping() assumes that pages with private data have
38          * their count elevated by 1.
39          */
40         get_page(page);
41         set_page_private(page, (unsigned long)iop);
42         SetPagePrivate(page);
43         return iop;
44 }
45
46 static void
47 iomap_page_release(struct page *page)
48 {
49         struct iomap_page *iop = to_iomap_page(page);
50
51         if (!iop)
52                 return;
53         WARN_ON_ONCE(atomic_read(&iop->read_count));
54         WARN_ON_ONCE(atomic_read(&iop->write_count));
55         ClearPagePrivate(page);
56         set_page_private(page, 0);
57         put_page(page);
58         kfree(iop);
59 }
60
61 /*
62  * Calculate the range inside the page that we actually need to read.
63  */
64 static void
65 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
66                 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
67 {
68         loff_t orig_pos = *pos;
69         loff_t isize = i_size_read(inode);
70         unsigned block_bits = inode->i_blkbits;
71         unsigned block_size = (1 << block_bits);
72         unsigned poff = offset_in_page(*pos);
73         unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
74         unsigned first = poff >> block_bits;
75         unsigned last = (poff + plen - 1) >> block_bits;
76
77         /*
78          * If the block size is smaller than the page size we need to check the
79          * per-block uptodate status and adjust the offset and length if needed
80          * to avoid reading in already uptodate ranges.
81          */
82         if (iop) {
83                 unsigned int i;
84
85                 /* move forward for each leading block marked uptodate */
86                 for (i = first; i <= last; i++) {
87                         if (!test_bit(i, iop->uptodate))
88                                 break;
89                         *pos += block_size;
90                         poff += block_size;
91                         plen -= block_size;
92                         first++;
93                 }
94
95                 /* truncate len if we find any trailing uptodate block(s) */
96                 for ( ; i <= last; i++) {
97                         if (test_bit(i, iop->uptodate)) {
98                                 plen -= (last - i + 1) * block_size;
99                                 last = i - 1;
100                                 break;
101                         }
102                 }
103         }
104
105         /*
106          * If the extent spans the block that contains the i_size we need to
107          * handle both halves separately so that we properly zero data in the
108          * page cache for blocks that are entirely outside of i_size.
109          */
110         if (orig_pos <= isize && orig_pos + length > isize) {
111                 unsigned end = offset_in_page(isize - 1) >> block_bits;
112
113                 if (first <= end && last > end)
114                         plen -= (last - end) * block_size;
115         }
116
117         *offp = poff;
118         *lenp = plen;
119 }
120
121 static void
122 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
123 {
124         struct iomap_page *iop = to_iomap_page(page);
125         struct inode *inode = page->mapping->host;
126         unsigned first = off >> inode->i_blkbits;
127         unsigned last = (off + len - 1) >> inode->i_blkbits;
128         bool uptodate = true;
129         unsigned long flags;
130         unsigned int i;
131
132         spin_lock_irqsave(&iop->uptodate_lock, flags);
133         for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
134                 if (i >= first && i <= last)
135                         set_bit(i, iop->uptodate);
136                 else if (!test_bit(i, iop->uptodate))
137                         uptodate = false;
138         }
139
140         if (uptodate)
141                 SetPageUptodate(page);
142         spin_unlock_irqrestore(&iop->uptodate_lock, flags);
143 }
144
145 static void
146 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
147 {
148         if (PageError(page))
149                 return;
150
151         if (page_has_private(page))
152                 iomap_iop_set_range_uptodate(page, off, len);
153         else
154                 SetPageUptodate(page);
155 }
156
157 static void
158 iomap_read_finish(struct iomap_page *iop, struct page *page)
159 {
160         if (!iop || atomic_dec_and_test(&iop->read_count))
161                 unlock_page(page);
162 }
163
164 static void
165 iomap_read_page_end_io(struct bio_vec *bvec, int error)
166 {
167         struct page *page = bvec->bv_page;
168         struct iomap_page *iop = to_iomap_page(page);
169
170         if (unlikely(error)) {
171                 ClearPageUptodate(page);
172                 SetPageError(page);
173         } else {
174                 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
175         }
176
177         iomap_read_finish(iop, page);
178 }
179
180 static void
181 iomap_read_end_io(struct bio *bio)
182 {
183         int error = blk_status_to_errno(bio->bi_status);
184         struct bio_vec *bvec;
185         struct bvec_iter_all iter_all;
186
187         bio_for_each_segment_all(bvec, bio, iter_all)
188                 iomap_read_page_end_io(bvec, error);
189         bio_put(bio);
190 }
191
192 struct iomap_readpage_ctx {
193         struct page             *cur_page;
194         bool                    cur_page_in_bio;
195         bool                    is_readahead;
196         struct bio              *bio;
197         struct list_head        *pages;
198 };
199
200 static void
201 iomap_read_inline_data(struct inode *inode, struct page *page,
202                 struct iomap *iomap)
203 {
204         size_t size = i_size_read(inode);
205         void *addr;
206
207         if (PageUptodate(page))
208                 return;
209
210         BUG_ON(page->index);
211         BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
212
213         addr = kmap_atomic(page);
214         memcpy(addr, iomap->inline_data, size);
215         memset(addr + size, 0, PAGE_SIZE - size);
216         kunmap_atomic(addr);
217         SetPageUptodate(page);
218 }
219
220 static loff_t
221 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
222                 struct iomap *iomap)
223 {
224         struct iomap_readpage_ctx *ctx = data;
225         struct page *page = ctx->cur_page;
226         struct iomap_page *iop = iomap_page_create(inode, page);
227         bool same_page = false, is_contig = false;
228         loff_t orig_pos = pos;
229         unsigned poff, plen;
230         sector_t sector;
231
232         if (iomap->type == IOMAP_INLINE) {
233                 WARN_ON_ONCE(pos);
234                 iomap_read_inline_data(inode, page, iomap);
235                 return PAGE_SIZE;
236         }
237
238         /* zero post-eof blocks as the page may be mapped */
239         iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
240         if (plen == 0)
241                 goto done;
242
243         if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
244                 zero_user(page, poff, plen);
245                 iomap_set_range_uptodate(page, poff, plen);
246                 goto done;
247         }
248
249         ctx->cur_page_in_bio = true;
250
251         /*
252          * Try to merge into a previous segment if we can.
253          */
254         sector = iomap_sector(iomap, pos);
255         if (ctx->bio && bio_end_sector(ctx->bio) == sector)
256                 is_contig = true;
257
258         if (is_contig &&
259             __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
260                 if (!same_page && iop)
261                         atomic_inc(&iop->read_count);
262                 goto done;
263         }
264
265         /*
266          * If we start a new segment we need to increase the read count, and we
267          * need to do so before submitting any previous full bio to make sure
268          * that we don't prematurely unlock the page.
269          */
270         if (iop)
271                 atomic_inc(&iop->read_count);
272
273         if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
274                 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
275                 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
276
277                 if (ctx->bio)
278                         submit_bio(ctx->bio);
279
280                 if (ctx->is_readahead) /* same as readahead_gfp_mask */
281                         gfp |= __GFP_NORETRY | __GFP_NOWARN;
282                 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
283                 ctx->bio->bi_opf = REQ_OP_READ;
284                 if (ctx->is_readahead)
285                         ctx->bio->bi_opf |= REQ_RAHEAD;
286                 ctx->bio->bi_iter.bi_sector = sector;
287                 bio_set_dev(ctx->bio, iomap->bdev);
288                 ctx->bio->bi_end_io = iomap_read_end_io;
289         }
290
291         bio_add_page(ctx->bio, page, plen, poff);
292 done:
293         /*
294          * Move the caller beyond our range so that it keeps making progress.
295          * For that we have to include any leading non-uptodate ranges, but
296          * we can skip trailing ones as they will be handled in the next
297          * iteration.
298          */
299         return pos - orig_pos + plen;
300 }
301
302 int
303 iomap_readpage(struct page *page, const struct iomap_ops *ops)
304 {
305         struct iomap_readpage_ctx ctx = { .cur_page = page };
306         struct inode *inode = page->mapping->host;
307         unsigned poff;
308         loff_t ret;
309
310         for (poff = 0; poff < PAGE_SIZE; poff += ret) {
311                 ret = iomap_apply(inode, page_offset(page) + poff,
312                                 PAGE_SIZE - poff, 0, ops, &ctx,
313                                 iomap_readpage_actor);
314                 if (ret <= 0) {
315                         WARN_ON_ONCE(ret == 0);
316                         SetPageError(page);
317                         break;
318                 }
319         }
320
321         if (ctx.bio) {
322                 submit_bio(ctx.bio);
323                 WARN_ON_ONCE(!ctx.cur_page_in_bio);
324         } else {
325                 WARN_ON_ONCE(ctx.cur_page_in_bio);
326                 unlock_page(page);
327         }
328
329         /*
330          * Just like mpage_readpages and block_read_full_page we always
331          * return 0 and just mark the page as PageError on errors.  This
332          * should be cleaned up all through the stack eventually.
333          */
334         return 0;
335 }
336 EXPORT_SYMBOL_GPL(iomap_readpage);
337
338 static struct page *
339 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
340                 loff_t length, loff_t *done)
341 {
342         while (!list_empty(pages)) {
343                 struct page *page = lru_to_page(pages);
344
345                 if (page_offset(page) >= (u64)pos + length)
346                         break;
347
348                 list_del(&page->lru);
349                 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
350                                 GFP_NOFS))
351                         return page;
352
353                 /*
354                  * If we already have a page in the page cache at index we are
355                  * done.  Upper layers don't care if it is uptodate after the
356                  * readpages call itself as every page gets checked again once
357                  * actually needed.
358                  */
359                 *done += PAGE_SIZE;
360                 put_page(page);
361         }
362
363         return NULL;
364 }
365
366 static loff_t
367 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
368                 void *data, struct iomap *iomap)
369 {
370         struct iomap_readpage_ctx *ctx = data;
371         loff_t done, ret;
372
373         for (done = 0; done < length; done += ret) {
374                 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
375                         if (!ctx->cur_page_in_bio)
376                                 unlock_page(ctx->cur_page);
377                         put_page(ctx->cur_page);
378                         ctx->cur_page = NULL;
379                 }
380                 if (!ctx->cur_page) {
381                         ctx->cur_page = iomap_next_page(inode, ctx->pages,
382                                         pos, length, &done);
383                         if (!ctx->cur_page)
384                                 break;
385                         ctx->cur_page_in_bio = false;
386                 }
387                 ret = iomap_readpage_actor(inode, pos + done, length - done,
388                                 ctx, iomap);
389         }
390
391         return done;
392 }
393
394 int
395 iomap_readpages(struct address_space *mapping, struct list_head *pages,
396                 unsigned nr_pages, const struct iomap_ops *ops)
397 {
398         struct iomap_readpage_ctx ctx = {
399                 .pages          = pages,
400                 .is_readahead   = true,
401         };
402         loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
403         loff_t last = page_offset(list_entry(pages->next, struct page, lru));
404         loff_t length = last - pos + PAGE_SIZE, ret = 0;
405
406         while (length > 0) {
407                 ret = iomap_apply(mapping->host, pos, length, 0, ops,
408                                 &ctx, iomap_readpages_actor);
409                 if (ret <= 0) {
410                         WARN_ON_ONCE(ret == 0);
411                         goto done;
412                 }
413                 pos += ret;
414                 length -= ret;
415         }
416         ret = 0;
417 done:
418         if (ctx.bio)
419                 submit_bio(ctx.bio);
420         if (ctx.cur_page) {
421                 if (!ctx.cur_page_in_bio)
422                         unlock_page(ctx.cur_page);
423                 put_page(ctx.cur_page);
424         }
425
426         /*
427          * Check that we didn't lose a page due to the arcance calling
428          * conventions..
429          */
430         WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
431         return ret;
432 }
433 EXPORT_SYMBOL_GPL(iomap_readpages);
434
435 /*
436  * iomap_is_partially_uptodate checks whether blocks within a page are
437  * uptodate or not.
438  *
439  * Returns true if all blocks which correspond to a file portion
440  * we want to read within the page are uptodate.
441  */
442 int
443 iomap_is_partially_uptodate(struct page *page, unsigned long from,
444                 unsigned long count)
445 {
446         struct iomap_page *iop = to_iomap_page(page);
447         struct inode *inode = page->mapping->host;
448         unsigned len, first, last;
449         unsigned i;
450
451         /* Limit range to one page */
452         len = min_t(unsigned, PAGE_SIZE - from, count);
453
454         /* First and last blocks in range within page */
455         first = from >> inode->i_blkbits;
456         last = (from + len - 1) >> inode->i_blkbits;
457
458         if (iop) {
459                 for (i = first; i <= last; i++)
460                         if (!test_bit(i, iop->uptodate))
461                                 return 0;
462                 return 1;
463         }
464
465         return 0;
466 }
467 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
468
469 int
470 iomap_releasepage(struct page *page, gfp_t gfp_mask)
471 {
472         /*
473          * mm accommodates an old ext3 case where clean pages might not have had
474          * the dirty bit cleared. Thus, it can send actual dirty pages to
475          * ->releasepage() via shrink_active_list(), skip those here.
476          */
477         if (PageDirty(page) || PageWriteback(page))
478                 return 0;
479         iomap_page_release(page);
480         return 1;
481 }
482 EXPORT_SYMBOL_GPL(iomap_releasepage);
483
484 void
485 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
486 {
487         /*
488          * If we are invalidating the entire page, clear the dirty state from it
489          * and release it to avoid unnecessary buildup of the LRU.
490          */
491         if (offset == 0 && len == PAGE_SIZE) {
492                 WARN_ON_ONCE(PageWriteback(page));
493                 cancel_dirty_page(page);
494                 iomap_page_release(page);
495         }
496 }
497 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
498
499 #ifdef CONFIG_MIGRATION
500 int
501 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
502                 struct page *page, enum migrate_mode mode)
503 {
504         int ret;
505
506         ret = migrate_page_move_mapping(mapping, newpage, page, 0);
507         if (ret != MIGRATEPAGE_SUCCESS)
508                 return ret;
509
510         if (page_has_private(page)) {
511                 ClearPagePrivate(page);
512                 get_page(newpage);
513                 set_page_private(newpage, page_private(page));
514                 set_page_private(page, 0);
515                 put_page(page);
516                 SetPagePrivate(newpage);
517         }
518
519         if (mode != MIGRATE_SYNC_NO_COPY)
520                 migrate_page_copy(newpage, page);
521         else
522                 migrate_page_states(newpage, page);
523         return MIGRATEPAGE_SUCCESS;
524 }
525 EXPORT_SYMBOL_GPL(iomap_migrate_page);
526 #endif /* CONFIG_MIGRATION */
527
528 static void
529 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
530 {
531         loff_t i_size = i_size_read(inode);
532
533         /*
534          * Only truncate newly allocated pages beyoned EOF, even if the
535          * write started inside the existing inode size.
536          */
537         if (pos + len > i_size)
538                 truncate_pagecache_range(inode, max(pos, i_size),
539                                          pos + len - 1);
540 }
541
542 static int
543 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
544                 unsigned poff, unsigned plen, unsigned from, unsigned to,
545                 struct iomap *iomap)
546 {
547         struct bio_vec bvec;
548         struct bio bio;
549
550         if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
551                 zero_user_segments(page, poff, from, to, poff + plen);
552                 iomap_set_range_uptodate(page, poff, plen);
553                 return 0;
554         }
555
556         bio_init(&bio, &bvec, 1);
557         bio.bi_opf = REQ_OP_READ;
558         bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
559         bio_set_dev(&bio, iomap->bdev);
560         __bio_add_page(&bio, page, plen, poff);
561         return submit_bio_wait(&bio);
562 }
563
564 static int
565 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
566                 struct page *page, struct iomap *iomap)
567 {
568         struct iomap_page *iop = iomap_page_create(inode, page);
569         loff_t block_size = i_blocksize(inode);
570         loff_t block_start = pos & ~(block_size - 1);
571         loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
572         unsigned from = offset_in_page(pos), to = from + len, poff, plen;
573         int status = 0;
574
575         if (PageUptodate(page))
576                 return 0;
577         ClearPageError(page);
578
579         do {
580                 iomap_adjust_read_range(inode, iop, &block_start,
581                                 block_end - block_start, &poff, &plen);
582                 if (plen == 0)
583                         break;
584
585                 if ((from > poff && from < poff + plen) ||
586                     (to > poff && to < poff + plen)) {
587                         status = iomap_read_page_sync(inode, block_start, page,
588                                         poff, plen, from, to, iomap);
589                         if (status)
590                                 break;
591                 }
592
593         } while ((block_start += plen) < block_end);
594
595         return status;
596 }
597
598 static int
599 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
600                 struct page **pagep, struct iomap *iomap)
601 {
602         const struct iomap_page_ops *page_ops = iomap->page_ops;
603         pgoff_t index = pos >> PAGE_SHIFT;
604         struct page *page;
605         int status = 0;
606
607         BUG_ON(pos + len > iomap->offset + iomap->length);
608
609         if (fatal_signal_pending(current))
610                 return -EINTR;
611
612         if (page_ops && page_ops->page_prepare) {
613                 status = page_ops->page_prepare(inode, pos, len, iomap);
614                 if (status)
615                         return status;
616         }
617
618         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
619         if (!page) {
620                 status = -ENOMEM;
621                 goto out_no_page;
622         }
623
624         if (iomap->type == IOMAP_INLINE)
625                 iomap_read_inline_data(inode, page, iomap);
626         else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
627                 status = __block_write_begin_int(page, pos, len, NULL, iomap);
628         else
629                 status = __iomap_write_begin(inode, pos, len, page, iomap);
630
631         if (unlikely(status))
632                 goto out_unlock;
633
634         *pagep = page;
635         return 0;
636
637 out_unlock:
638         unlock_page(page);
639         put_page(page);
640         iomap_write_failed(inode, pos, len);
641
642 out_no_page:
643         if (page_ops && page_ops->page_done)
644                 page_ops->page_done(inode, pos, 0, NULL, iomap);
645         return status;
646 }
647
648 int
649 iomap_set_page_dirty(struct page *page)
650 {
651         struct address_space *mapping = page_mapping(page);
652         int newly_dirty;
653
654         if (unlikely(!mapping))
655                 return !TestSetPageDirty(page);
656
657         /*
658          * Lock out page->mem_cgroup migration to keep PageDirty
659          * synchronized with per-memcg dirty page counters.
660          */
661         lock_page_memcg(page);
662         newly_dirty = !TestSetPageDirty(page);
663         if (newly_dirty)
664                 __set_page_dirty(page, mapping, 0);
665         unlock_page_memcg(page);
666
667         if (newly_dirty)
668                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
669         return newly_dirty;
670 }
671 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
672
673 static int
674 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
675                 unsigned copied, struct page *page, struct iomap *iomap)
676 {
677         flush_dcache_page(page);
678
679         /*
680          * The blocks that were entirely written will now be uptodate, so we
681          * don't have to worry about a readpage reading them and overwriting a
682          * partial write.  However if we have encountered a short write and only
683          * partially written into a block, it will not be marked uptodate, so a
684          * readpage might come in and destroy our partial write.
685          *
686          * Do the simplest thing, and just treat any short write to a non
687          * uptodate page as a zero-length write, and force the caller to redo
688          * the whole thing.
689          */
690         if (unlikely(copied < len && !PageUptodate(page)))
691                 return 0;
692         iomap_set_range_uptodate(page, offset_in_page(pos), len);
693         iomap_set_page_dirty(page);
694         return copied;
695 }
696
697 static int
698 iomap_write_end_inline(struct inode *inode, struct page *page,
699                 struct iomap *iomap, loff_t pos, unsigned copied)
700 {
701         void *addr;
702
703         WARN_ON_ONCE(!PageUptodate(page));
704         BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
705
706         addr = kmap_atomic(page);
707         memcpy(iomap->inline_data + pos, addr + pos, copied);
708         kunmap_atomic(addr);
709
710         mark_inode_dirty(inode);
711         return copied;
712 }
713
714 static int
715 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
716                 unsigned copied, struct page *page, struct iomap *iomap)
717 {
718         const struct iomap_page_ops *page_ops = iomap->page_ops;
719         loff_t old_size = inode->i_size;
720         int ret;
721
722         if (iomap->type == IOMAP_INLINE) {
723                 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
724         } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
725                 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
726                                 page, NULL);
727         } else {
728                 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
729         }
730
731         /*
732          * Update the in-memory inode size after copying the data into the page
733          * cache.  It's up to the file system to write the updated size to disk,
734          * preferably after I/O completion so that no stale data is exposed.
735          */
736         if (pos + ret > old_size) {
737                 i_size_write(inode, pos + ret);
738                 iomap->flags |= IOMAP_F_SIZE_CHANGED;
739         }
740         unlock_page(page);
741
742         if (old_size < pos)
743                 pagecache_isize_extended(inode, old_size, pos);
744         if (page_ops && page_ops->page_done)
745                 page_ops->page_done(inode, pos, ret, page, iomap);
746         put_page(page);
747
748         if (ret < len)
749                 iomap_write_failed(inode, pos, len);
750         return ret;
751 }
752
753 static loff_t
754 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
755                 struct iomap *iomap)
756 {
757         struct iov_iter *i = data;
758         long status = 0;
759         ssize_t written = 0;
760         unsigned int flags = AOP_FLAG_NOFS;
761
762         do {
763                 struct page *page;
764                 unsigned long offset;   /* Offset into pagecache page */
765                 unsigned long bytes;    /* Bytes to write to page */
766                 size_t copied;          /* Bytes copied from user */
767
768                 offset = offset_in_page(pos);
769                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
770                                                 iov_iter_count(i));
771 again:
772                 if (bytes > length)
773                         bytes = length;
774
775                 /*
776                  * Bring in the user page that we will copy from _first_.
777                  * Otherwise there's a nasty deadlock on copying from the
778                  * same page as we're writing to, without it being marked
779                  * up-to-date.
780                  *
781                  * Not only is this an optimisation, but it is also required
782                  * to check that the address is actually valid, when atomic
783                  * usercopies are used, below.
784                  */
785                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
786                         status = -EFAULT;
787                         break;
788                 }
789
790                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
791                                 iomap);
792                 if (unlikely(status))
793                         break;
794
795                 if (mapping_writably_mapped(inode->i_mapping))
796                         flush_dcache_page(page);
797
798                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
799
800                 flush_dcache_page(page);
801
802                 status = iomap_write_end(inode, pos, bytes, copied, page,
803                                 iomap);
804                 if (unlikely(status < 0))
805                         break;
806                 copied = status;
807
808                 cond_resched();
809
810                 iov_iter_advance(i, copied);
811                 if (unlikely(copied == 0)) {
812                         /*
813                          * If we were unable to copy any data at all, we must
814                          * fall back to a single segment length write.
815                          *
816                          * If we didn't fallback here, we could livelock
817                          * because not all segments in the iov can be copied at
818                          * once without a pagefault.
819                          */
820                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
821                                                 iov_iter_single_seg_count(i));
822                         goto again;
823                 }
824                 pos += copied;
825                 written += copied;
826                 length -= copied;
827
828                 balance_dirty_pages_ratelimited(inode->i_mapping);
829         } while (iov_iter_count(i) && length);
830
831         return written ? written : status;
832 }
833
834 ssize_t
835 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
836                 const struct iomap_ops *ops)
837 {
838         struct inode *inode = iocb->ki_filp->f_mapping->host;
839         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
840
841         while (iov_iter_count(iter)) {
842                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
843                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
844                 if (ret <= 0)
845                         break;
846                 pos += ret;
847                 written += ret;
848         }
849
850         return written ? written : ret;
851 }
852 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
853
854 static struct page *
855 __iomap_read_page(struct inode *inode, loff_t offset)
856 {
857         struct address_space *mapping = inode->i_mapping;
858         struct page *page;
859
860         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
861         if (IS_ERR(page))
862                 return page;
863         if (!PageUptodate(page)) {
864                 put_page(page);
865                 return ERR_PTR(-EIO);
866         }
867         return page;
868 }
869
870 static loff_t
871 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
872                 struct iomap *iomap)
873 {
874         long status = 0;
875         ssize_t written = 0;
876
877         do {
878                 struct page *page, *rpage;
879                 unsigned long offset;   /* Offset into pagecache page */
880                 unsigned long bytes;    /* Bytes to write to page */
881
882                 offset = offset_in_page(pos);
883                 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
884
885                 rpage = __iomap_read_page(inode, pos);
886                 if (IS_ERR(rpage))
887                         return PTR_ERR(rpage);
888
889                 status = iomap_write_begin(inode, pos, bytes,
890                                            AOP_FLAG_NOFS, &page, iomap);
891                 put_page(rpage);
892                 if (unlikely(status))
893                         return status;
894
895                 WARN_ON_ONCE(!PageUptodate(page));
896
897                 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
898                 if (unlikely(status <= 0)) {
899                         if (WARN_ON_ONCE(status == 0))
900                                 return -EIO;
901                         return status;
902                 }
903
904                 cond_resched();
905
906                 pos += status;
907                 written += status;
908                 length -= status;
909
910                 balance_dirty_pages_ratelimited(inode->i_mapping);
911         } while (length);
912
913         return written;
914 }
915
916 int
917 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
918                 const struct iomap_ops *ops)
919 {
920         loff_t ret;
921
922         while (len) {
923                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
924                                 iomap_dirty_actor);
925                 if (ret <= 0)
926                         return ret;
927                 pos += ret;
928                 len -= ret;
929         }
930
931         return 0;
932 }
933 EXPORT_SYMBOL_GPL(iomap_file_dirty);
934
935 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
936                 unsigned bytes, struct iomap *iomap)
937 {
938         struct page *page;
939         int status;
940
941         status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
942                                    iomap);
943         if (status)
944                 return status;
945
946         zero_user(page, offset, bytes);
947         mark_page_accessed(page);
948
949         return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
950 }
951
952 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
953                 struct iomap *iomap)
954 {
955         return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
956                         iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
957 }
958
959 static loff_t
960 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
961                 void *data, struct iomap *iomap)
962 {
963         bool *did_zero = data;
964         loff_t written = 0;
965         int status;
966
967         /* already zeroed?  we're done. */
968         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
969                 return count;
970
971         do {
972                 unsigned offset, bytes;
973
974                 offset = offset_in_page(pos);
975                 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
976
977                 if (IS_DAX(inode))
978                         status = iomap_dax_zero(pos, offset, bytes, iomap);
979                 else
980                         status = iomap_zero(inode, pos, offset, bytes, iomap);
981                 if (status < 0)
982                         return status;
983
984                 pos += bytes;
985                 count -= bytes;
986                 written += bytes;
987                 if (did_zero)
988                         *did_zero = true;
989         } while (count > 0);
990
991         return written;
992 }
993
994 int
995 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
996                 const struct iomap_ops *ops)
997 {
998         loff_t ret;
999
1000         while (len > 0) {
1001                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1002                                 ops, did_zero, iomap_zero_range_actor);
1003                 if (ret <= 0)
1004                         return ret;
1005
1006                 pos += ret;
1007                 len -= ret;
1008         }
1009
1010         return 0;
1011 }
1012 EXPORT_SYMBOL_GPL(iomap_zero_range);
1013
1014 int
1015 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1016                 const struct iomap_ops *ops)
1017 {
1018         unsigned int blocksize = i_blocksize(inode);
1019         unsigned int off = pos & (blocksize - 1);
1020
1021         /* Block boundary? Nothing to do */
1022         if (!off)
1023                 return 0;
1024         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1025 }
1026 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1027
1028 static loff_t
1029 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1030                 void *data, struct iomap *iomap)
1031 {
1032         struct page *page = data;
1033         int ret;
1034
1035         if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1036                 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1037                 if (ret)
1038                         return ret;
1039                 block_commit_write(page, 0, length);
1040         } else {
1041                 WARN_ON_ONCE(!PageUptodate(page));
1042                 iomap_page_create(inode, page);
1043                 set_page_dirty(page);
1044         }
1045
1046         return length;
1047 }
1048
1049 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1050 {
1051         struct page *page = vmf->page;
1052         struct inode *inode = file_inode(vmf->vma->vm_file);
1053         unsigned long length;
1054         loff_t offset, size;
1055         ssize_t ret;
1056
1057         lock_page(page);
1058         size = i_size_read(inode);
1059         offset = page_offset(page);
1060         if (page->mapping != inode->i_mapping || offset > size) {
1061                 /* We overload EFAULT to mean page got truncated */
1062                 ret = -EFAULT;
1063                 goto out_unlock;
1064         }
1065
1066         /* page is wholly or partially inside EOF */
1067         if (offset > size - PAGE_SIZE)
1068                 length = offset_in_page(size);
1069         else
1070                 length = PAGE_SIZE;
1071
1072         while (length > 0) {
1073                 ret = iomap_apply(inode, offset, length,
1074                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1075                                 iomap_page_mkwrite_actor);
1076                 if (unlikely(ret <= 0))
1077                         goto out_unlock;
1078                 offset += ret;
1079                 length -= ret;
1080         }
1081
1082         wait_for_stable_page(page);
1083         return VM_FAULT_LOCKED;
1084 out_unlock:
1085         unlock_page(page);
1086         return block_page_mkwrite_return(ret);
1087 }
1088 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);