GNU Linux-libre 4.19.314-gnu1
[releases.git] / fs / iomap.c
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016-2018 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/migrate.h>
21 #include <linux/mm.h>
22 #include <linux/mm_inline.h>
23 #include <linux/swap.h>
24 #include <linux/pagemap.h>
25 #include <linux/pagevec.h>
26 #include <linux/file.h>
27 #include <linux/uio.h>
28 #include <linux/backing-dev.h>
29 #include <linux/buffer_head.h>
30 #include <linux/task_io_accounting_ops.h>
31 #include <linux/dax.h>
32 #include <linux/sched/signal.h>
33 #include <linux/swap.h>
34
35 #include "internal.h"
36
37 /*
38  * Execute a iomap write on a segment of the mapping that spans a
39  * contiguous range of pages that have identical block mapping state.
40  *
41  * This avoids the need to map pages individually, do individual allocations
42  * for each page and most importantly avoid the need for filesystem specific
43  * locking per page. Instead, all the operations are amortised over the entire
44  * range of pages. It is assumed that the filesystems will lock whatever
45  * resources they require in the iomap_begin call, and release them in the
46  * iomap_end call.
47  */
48 loff_t
49 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
50                 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
51 {
52         struct iomap iomap = { 0 };
53         loff_t written = 0, ret;
54
55         /*
56          * Need to map a range from start position for length bytes. This can
57          * span multiple pages - it is only guaranteed to return a range of a
58          * single type of pages (e.g. all into a hole, all mapped or all
59          * unwritten). Failure at this point has nothing to undo.
60          *
61          * If allocation is required for this range, reserve the space now so
62          * that the allocation is guaranteed to succeed later on. Once we copy
63          * the data into the page cache pages, then we cannot fail otherwise we
64          * expose transient stale data. If the reserve fails, we can safely
65          * back out at this point as there is nothing to undo.
66          */
67         ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
68         if (ret)
69                 return ret;
70         if (WARN_ON(iomap.offset > pos))
71                 return -EIO;
72         if (WARN_ON(iomap.length == 0))
73                 return -EIO;
74
75         /*
76          * Cut down the length to the one actually provided by the filesystem,
77          * as it might not be able to give us the whole size that we requested.
78          */
79         if (iomap.offset + iomap.length < pos + length)
80                 length = iomap.offset + iomap.length - pos;
81
82         /*
83          * Now that we have guaranteed that the space allocation will succeed.
84          * we can do the copy-in page by page without having to worry about
85          * failures exposing transient data.
86          */
87         written = actor(inode, pos, length, data, &iomap);
88
89         /*
90          * Now the data has been copied, commit the range we've copied.  This
91          * should not fail unless the filesystem has had a fatal error.
92          */
93         if (ops->iomap_end) {
94                 ret = ops->iomap_end(inode, pos, length,
95                                      written > 0 ? written : 0,
96                                      flags, &iomap);
97         }
98
99         return written ? written : ret;
100 }
101
102 static sector_t
103 iomap_sector(struct iomap *iomap, loff_t pos)
104 {
105         return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
106 }
107
108 static struct iomap_page *
109 iomap_page_create(struct inode *inode, struct page *page)
110 {
111         struct iomap_page *iop = to_iomap_page(page);
112         unsigned int nr_blocks = PAGE_SIZE / i_blocksize(inode);
113
114         if (iop || i_blocksize(inode) == PAGE_SIZE)
115                 return iop;
116
117         iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
118         atomic_set(&iop->read_count, 0);
119         atomic_set(&iop->write_count, 0);
120         spin_lock_init(&iop->uptodate_lock);
121         bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
122         if (PageUptodate(page))
123                 bitmap_fill(iop->uptodate, nr_blocks);
124
125         /*
126          * migrate_page_move_mapping() assumes that pages with private data have
127          * their count elevated by 1.
128          */
129         get_page(page);
130         set_page_private(page, (unsigned long)iop);
131         SetPagePrivate(page);
132         return iop;
133 }
134
135 static void
136 iomap_page_release(struct page *page)
137 {
138         struct iomap_page *iop = to_iomap_page(page);
139
140         if (!iop)
141                 return;
142         WARN_ON_ONCE(atomic_read(&iop->read_count));
143         WARN_ON_ONCE(atomic_read(&iop->write_count));
144         ClearPagePrivate(page);
145         set_page_private(page, 0);
146         put_page(page);
147         kfree(iop);
148 }
149
150 /*
151  * Calculate the range inside the page that we actually need to read.
152  */
153 static void
154 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
155                 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
156 {
157         loff_t orig_pos = *pos;
158         loff_t isize = i_size_read(inode);
159         unsigned block_bits = inode->i_blkbits;
160         unsigned block_size = (1 << block_bits);
161         unsigned poff = offset_in_page(*pos);
162         unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
163         unsigned first = poff >> block_bits;
164         unsigned last = (poff + plen - 1) >> block_bits;
165
166         /*
167          * If the block size is smaller than the page size we need to check the
168          * per-block uptodate status and adjust the offset and length if needed
169          * to avoid reading in already uptodate ranges.
170          */
171         if (iop) {
172                 unsigned int i;
173
174                 /* move forward for each leading block marked uptodate */
175                 for (i = first; i <= last; i++) {
176                         if (!test_bit(i, iop->uptodate))
177                                 break;
178                         *pos += block_size;
179                         poff += block_size;
180                         plen -= block_size;
181                         first++;
182                 }
183
184                 /* truncate len if we find any trailing uptodate block(s) */
185                 for ( ; i <= last; i++) {
186                         if (test_bit(i, iop->uptodate)) {
187                                 plen -= (last - i + 1) * block_size;
188                                 last = i - 1;
189                                 break;
190                         }
191                 }
192         }
193
194         /*
195          * If the extent spans the block that contains the i_size we need to
196          * handle both halves separately so that we properly zero data in the
197          * page cache for blocks that are entirely outside of i_size.
198          */
199         if (orig_pos <= isize && orig_pos + length > isize) {
200                 unsigned end = offset_in_page(isize - 1) >> block_bits;
201
202                 if (first <= end && last > end)
203                         plen -= (last - end) * block_size;
204         }
205
206         *offp = poff;
207         *lenp = plen;
208 }
209
210 static void
211 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
212 {
213         struct iomap_page *iop = to_iomap_page(page);
214         struct inode *inode = page->mapping->host;
215         unsigned first = off >> inode->i_blkbits;
216         unsigned last = (off + len - 1) >> inode->i_blkbits;
217         bool uptodate = true;
218         unsigned long flags;
219         unsigned int i;
220
221         spin_lock_irqsave(&iop->uptodate_lock, flags);
222         for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
223                 if (i >= first && i <= last)
224                         set_bit(i, iop->uptodate);
225                 else if (!test_bit(i, iop->uptodate))
226                         uptodate = false;
227         }
228
229         if (uptodate)
230                 SetPageUptodate(page);
231         spin_unlock_irqrestore(&iop->uptodate_lock, flags);
232 }
233
234 static void
235 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
236 {
237         if (PageError(page))
238                 return;
239
240         if (page_has_private(page))
241                 iomap_iop_set_range_uptodate(page, off, len);
242         else
243                 SetPageUptodate(page);
244 }
245
246 static void
247 iomap_read_finish(struct iomap_page *iop, struct page *page)
248 {
249         if (!iop || atomic_dec_and_test(&iop->read_count))
250                 unlock_page(page);
251 }
252
253 static void
254 iomap_read_page_end_io(struct bio_vec *bvec, int error)
255 {
256         struct page *page = bvec->bv_page;
257         struct iomap_page *iop = to_iomap_page(page);
258
259         if (unlikely(error)) {
260                 ClearPageUptodate(page);
261                 SetPageError(page);
262         } else {
263                 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
264         }
265
266         iomap_read_finish(iop, page);
267 }
268
269 static void
270 iomap_read_inline_data(struct inode *inode, struct page *page,
271                 struct iomap *iomap)
272 {
273         size_t size = i_size_read(inode);
274         void *addr;
275
276         if (PageUptodate(page))
277                 return;
278
279         BUG_ON(page->index);
280         BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
281
282         addr = kmap_atomic(page);
283         memcpy(addr, iomap->inline_data, size);
284         memset(addr + size, 0, PAGE_SIZE - size);
285         kunmap_atomic(addr);
286         SetPageUptodate(page);
287 }
288
289 static void
290 iomap_read_end_io(struct bio *bio)
291 {
292         int error = blk_status_to_errno(bio->bi_status);
293         struct bio_vec *bvec;
294         int i;
295
296         bio_for_each_segment_all(bvec, bio, i)
297                 iomap_read_page_end_io(bvec, error);
298         bio_put(bio);
299 }
300
301 struct iomap_readpage_ctx {
302         struct page             *cur_page;
303         bool                    cur_page_in_bio;
304         bool                    is_readahead;
305         struct bio              *bio;
306         struct list_head        *pages;
307 };
308
309 static loff_t
310 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
311                 struct iomap *iomap)
312 {
313         struct iomap_readpage_ctx *ctx = data;
314         struct page *page = ctx->cur_page;
315         struct iomap_page *iop = iomap_page_create(inode, page);
316         bool is_contig = false;
317         loff_t orig_pos = pos;
318         unsigned poff, plen;
319         sector_t sector;
320
321         if (iomap->type == IOMAP_INLINE) {
322                 WARN_ON_ONCE(pos);
323                 iomap_read_inline_data(inode, page, iomap);
324                 return PAGE_SIZE;
325         }
326
327         /* zero post-eof blocks as the page may be mapped */
328         iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
329         if (plen == 0)
330                 goto done;
331
332         if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
333                 zero_user(page, poff, plen);
334                 iomap_set_range_uptodate(page, poff, plen);
335                 goto done;
336         }
337
338         ctx->cur_page_in_bio = true;
339
340         /*
341          * Try to merge into a previous segment if we can.
342          */
343         sector = iomap_sector(iomap, pos);
344         if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
345                 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
346                         goto done;
347                 is_contig = true;
348         }
349
350         /*
351          * If we start a new segment we need to increase the read count, and we
352          * need to do so before submitting any previous full bio to make sure
353          * that we don't prematurely unlock the page.
354          */
355         if (iop)
356                 atomic_inc(&iop->read_count);
357
358         if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
359                 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
360                 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
361
362                 if (ctx->bio)
363                         submit_bio(ctx->bio);
364
365                 if (ctx->is_readahead) /* same as readahead_gfp_mask */
366                         gfp |= __GFP_NORETRY | __GFP_NOWARN;
367                 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
368                 ctx->bio->bi_opf = REQ_OP_READ;
369                 if (ctx->is_readahead)
370                         ctx->bio->bi_opf |= REQ_RAHEAD;
371                 ctx->bio->bi_iter.bi_sector = sector;
372                 bio_set_dev(ctx->bio, iomap->bdev);
373                 ctx->bio->bi_end_io = iomap_read_end_io;
374         }
375
376         __bio_add_page(ctx->bio, page, plen, poff);
377 done:
378         /*
379          * Move the caller beyond our range so that it keeps making progress.
380          * For that we have to include any leading non-uptodate ranges, but
381          * we can skip trailing ones as they will be handled in the next
382          * iteration.
383          */
384         return pos - orig_pos + plen;
385 }
386
387 int
388 iomap_readpage(struct page *page, const struct iomap_ops *ops)
389 {
390         struct iomap_readpage_ctx ctx = { .cur_page = page };
391         struct inode *inode = page->mapping->host;
392         unsigned poff;
393         loff_t ret;
394
395         for (poff = 0; poff < PAGE_SIZE; poff += ret) {
396                 ret = iomap_apply(inode, page_offset(page) + poff,
397                                 PAGE_SIZE - poff, 0, ops, &ctx,
398                                 iomap_readpage_actor);
399                 if (ret <= 0) {
400                         WARN_ON_ONCE(ret == 0);
401                         SetPageError(page);
402                         break;
403                 }
404         }
405
406         if (ctx.bio) {
407                 submit_bio(ctx.bio);
408                 WARN_ON_ONCE(!ctx.cur_page_in_bio);
409         } else {
410                 WARN_ON_ONCE(ctx.cur_page_in_bio);
411                 unlock_page(page);
412         }
413
414         /*
415          * Just like mpage_readpages and block_read_full_page we always
416          * return 0 and just mark the page as PageError on errors.  This
417          * should be cleaned up all through the stack eventually.
418          */
419         return 0;
420 }
421 EXPORT_SYMBOL_GPL(iomap_readpage);
422
423 static struct page *
424 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
425                 loff_t length, loff_t *done)
426 {
427         while (!list_empty(pages)) {
428                 struct page *page = lru_to_page(pages);
429
430                 if (page_offset(page) >= (u64)pos + length)
431                         break;
432
433                 list_del(&page->lru);
434                 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
435                                 GFP_NOFS))
436                         return page;
437
438                 /*
439                  * If we already have a page in the page cache at index we are
440                  * done.  Upper layers don't care if it is uptodate after the
441                  * readpages call itself as every page gets checked again once
442                  * actually needed.
443                  */
444                 *done += PAGE_SIZE;
445                 put_page(page);
446         }
447
448         return NULL;
449 }
450
451 static loff_t
452 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
453                 void *data, struct iomap *iomap)
454 {
455         struct iomap_readpage_ctx *ctx = data;
456         loff_t done, ret;
457
458         for (done = 0; done < length; done += ret) {
459                 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
460                         if (!ctx->cur_page_in_bio)
461                                 unlock_page(ctx->cur_page);
462                         put_page(ctx->cur_page);
463                         ctx->cur_page = NULL;
464                 }
465                 if (!ctx->cur_page) {
466                         ctx->cur_page = iomap_next_page(inode, ctx->pages,
467                                         pos, length, &done);
468                         if (!ctx->cur_page)
469                                 break;
470                         ctx->cur_page_in_bio = false;
471                 }
472                 ret = iomap_readpage_actor(inode, pos + done, length - done,
473                                 ctx, iomap);
474         }
475
476         return done;
477 }
478
479 int
480 iomap_readpages(struct address_space *mapping, struct list_head *pages,
481                 unsigned nr_pages, const struct iomap_ops *ops)
482 {
483         struct iomap_readpage_ctx ctx = {
484                 .pages          = pages,
485                 .is_readahead   = true,
486         };
487         loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
488         loff_t last = page_offset(list_entry(pages->next, struct page, lru));
489         loff_t length = last - pos + PAGE_SIZE, ret = 0;
490
491         while (length > 0) {
492                 ret = iomap_apply(mapping->host, pos, length, 0, ops,
493                                 &ctx, iomap_readpages_actor);
494                 if (ret <= 0) {
495                         WARN_ON_ONCE(ret == 0);
496                         goto done;
497                 }
498                 pos += ret;
499                 length -= ret;
500         }
501         ret = 0;
502 done:
503         if (ctx.bio)
504                 submit_bio(ctx.bio);
505         if (ctx.cur_page) {
506                 if (!ctx.cur_page_in_bio)
507                         unlock_page(ctx.cur_page);
508                 put_page(ctx.cur_page);
509         }
510
511         /*
512          * Check that we didn't lose a page due to the arcance calling
513          * conventions..
514          */
515         WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
516         return ret;
517 }
518 EXPORT_SYMBOL_GPL(iomap_readpages);
519
520 /*
521  * iomap_is_partially_uptodate checks whether blocks within a page are
522  * uptodate or not.
523  *
524  * Returns true if all blocks which correspond to a file portion
525  * we want to read within the page are uptodate.
526  */
527 int
528 iomap_is_partially_uptodate(struct page *page, unsigned long from,
529                 unsigned long count)
530 {
531         struct iomap_page *iop = to_iomap_page(page);
532         struct inode *inode = page->mapping->host;
533         unsigned len, first, last;
534         unsigned i;
535
536         /* Limit range to one page */
537         len = min_t(unsigned, PAGE_SIZE - from, count);
538
539         /* First and last blocks in range within page */
540         first = from >> inode->i_blkbits;
541         last = (from + len - 1) >> inode->i_blkbits;
542
543         if (iop) {
544                 for (i = first; i <= last; i++)
545                         if (!test_bit(i, iop->uptodate))
546                                 return 0;
547                 return 1;
548         }
549
550         return 0;
551 }
552 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
553
554 int
555 iomap_releasepage(struct page *page, gfp_t gfp_mask)
556 {
557         /*
558          * mm accommodates an old ext3 case where clean pages might not have had
559          * the dirty bit cleared. Thus, it can send actual dirty pages to
560          * ->releasepage() via shrink_active_list(), skip those here.
561          */
562         if (PageDirty(page) || PageWriteback(page))
563                 return 0;
564         iomap_page_release(page);
565         return 1;
566 }
567 EXPORT_SYMBOL_GPL(iomap_releasepage);
568
569 void
570 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
571 {
572         /*
573          * If we are invalidating the entire page, clear the dirty state from it
574          * and release it to avoid unnecessary buildup of the LRU.
575          */
576         if (offset == 0 && len == PAGE_SIZE) {
577                 WARN_ON_ONCE(PageWriteback(page));
578                 cancel_dirty_page(page);
579                 iomap_page_release(page);
580         }
581 }
582 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
583
584 #ifdef CONFIG_MIGRATION
585 int
586 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
587                 struct page *page, enum migrate_mode mode)
588 {
589         int ret;
590
591         ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
592         if (ret != MIGRATEPAGE_SUCCESS)
593                 return ret;
594
595         if (page_has_private(page)) {
596                 ClearPagePrivate(page);
597                 get_page(newpage);
598                 set_page_private(newpage, page_private(page));
599                 set_page_private(page, 0);
600                 put_page(page);
601                 SetPagePrivate(newpage);
602         }
603
604         if (mode != MIGRATE_SYNC_NO_COPY)
605                 migrate_page_copy(newpage, page);
606         else
607                 migrate_page_states(newpage, page);
608         return MIGRATEPAGE_SUCCESS;
609 }
610 EXPORT_SYMBOL_GPL(iomap_migrate_page);
611 #endif /* CONFIG_MIGRATION */
612
613 static void
614 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
615 {
616         loff_t i_size = i_size_read(inode);
617
618         /*
619          * Only truncate newly allocated pages beyoned EOF, even if the
620          * write started inside the existing inode size.
621          */
622         if (pos + len > i_size)
623                 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
624 }
625
626 static int
627 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
628                 unsigned poff, unsigned plen, unsigned from, unsigned to,
629                 struct iomap *iomap)
630 {
631         struct bio_vec bvec;
632         struct bio bio;
633
634         if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
635                 zero_user_segments(page, poff, from, to, poff + plen);
636                 iomap_set_range_uptodate(page, poff, plen);
637                 return 0;
638         }
639
640         bio_init(&bio, &bvec, 1);
641         bio.bi_opf = REQ_OP_READ;
642         bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
643         bio_set_dev(&bio, iomap->bdev);
644         __bio_add_page(&bio, page, plen, poff);
645         return submit_bio_wait(&bio);
646 }
647
648 static int
649 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
650                 struct page *page, struct iomap *iomap)
651 {
652         struct iomap_page *iop = iomap_page_create(inode, page);
653         loff_t block_size = i_blocksize(inode);
654         loff_t block_start = pos & ~(block_size - 1);
655         loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
656         unsigned from = offset_in_page(pos), to = from + len, poff, plen;
657         int status = 0;
658
659         if (PageUptodate(page))
660                 return 0;
661
662         do {
663                 iomap_adjust_read_range(inode, iop, &block_start,
664                                 block_end - block_start, &poff, &plen);
665                 if (plen == 0)
666                         break;
667
668                 if ((from > poff && from < poff + plen) ||
669                     (to > poff && to < poff + plen)) {
670                         status = iomap_read_page_sync(inode, block_start, page,
671                                         poff, plen, from, to, iomap);
672                         if (status)
673                                 break;
674                 }
675
676         } while ((block_start += plen) < block_end);
677
678         return status;
679 }
680
681 static int
682 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
683                 struct page **pagep, struct iomap *iomap)
684 {
685         pgoff_t index = pos >> PAGE_SHIFT;
686         struct page *page;
687         int status = 0;
688
689         BUG_ON(pos + len > iomap->offset + iomap->length);
690
691         if (fatal_signal_pending(current))
692                 return -EINTR;
693
694         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
695         if (!page)
696                 return -ENOMEM;
697
698         if (iomap->type == IOMAP_INLINE)
699                 iomap_read_inline_data(inode, page, iomap);
700         else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
701                 status = __block_write_begin_int(page, pos, len, NULL, iomap);
702         else
703                 status = __iomap_write_begin(inode, pos, len, page, iomap);
704         if (unlikely(status)) {
705                 unlock_page(page);
706                 put_page(page);
707                 page = NULL;
708
709                 iomap_write_failed(inode, pos, len);
710         }
711
712         *pagep = page;
713         return status;
714 }
715
716 int
717 iomap_set_page_dirty(struct page *page)
718 {
719         struct address_space *mapping = page_mapping(page);
720         int newly_dirty;
721
722         if (unlikely(!mapping))
723                 return !TestSetPageDirty(page);
724
725         /*
726          * Lock out page->mem_cgroup migration to keep PageDirty
727          * synchronized with per-memcg dirty page counters.
728          */
729         lock_page_memcg(page);
730         newly_dirty = !TestSetPageDirty(page);
731         if (newly_dirty)
732                 __set_page_dirty(page, mapping, 0);
733         unlock_page_memcg(page);
734
735         if (newly_dirty)
736                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
737         return newly_dirty;
738 }
739 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
740
741 static int
742 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
743                 unsigned copied, struct page *page, struct iomap *iomap)
744 {
745         flush_dcache_page(page);
746
747         /*
748          * The blocks that were entirely written will now be uptodate, so we
749          * don't have to worry about a readpage reading them and overwriting a
750          * partial write.  However if we have encountered a short write and only
751          * partially written into a block, it will not be marked uptodate, so a
752          * readpage might come in and destroy our partial write.
753          *
754          * Do the simplest thing, and just treat any short write to a non
755          * uptodate page as a zero-length write, and force the caller to redo
756          * the whole thing.
757          */
758         if (unlikely(copied < len && !PageUptodate(page))) {
759                 copied = 0;
760         } else {
761                 iomap_set_range_uptodate(page, offset_in_page(pos), len);
762                 iomap_set_page_dirty(page);
763         }
764         return __generic_write_end(inode, pos, copied, page);
765 }
766
767 static int
768 iomap_write_end_inline(struct inode *inode, struct page *page,
769                 struct iomap *iomap, loff_t pos, unsigned copied)
770 {
771         void *addr;
772
773         WARN_ON_ONCE(!PageUptodate(page));
774         BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
775
776         addr = kmap_atomic(page);
777         memcpy(iomap->inline_data + pos, addr + pos, copied);
778         kunmap_atomic(addr);
779
780         mark_inode_dirty(inode);
781         __generic_write_end(inode, pos, copied, page);
782         return copied;
783 }
784
785 static int
786 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
787                 unsigned copied, struct page *page, struct iomap *iomap)
788 {
789         int ret;
790
791         if (iomap->type == IOMAP_INLINE) {
792                 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
793         } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
794                 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
795                                 copied, page, NULL);
796         } else {
797                 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
798         }
799
800         if (iomap->page_done)
801                 iomap->page_done(inode, pos, copied, page, iomap);
802
803         if (ret < len)
804                 iomap_write_failed(inode, pos, len);
805         return ret;
806 }
807
808 static loff_t
809 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
810                 struct iomap *iomap)
811 {
812         struct iov_iter *i = data;
813         long status = 0;
814         ssize_t written = 0;
815         unsigned int flags = AOP_FLAG_NOFS;
816
817         do {
818                 struct page *page;
819                 unsigned long offset;   /* Offset into pagecache page */
820                 unsigned long bytes;    /* Bytes to write to page */
821                 size_t copied;          /* Bytes copied from user */
822
823                 offset = offset_in_page(pos);
824                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
825                                                 iov_iter_count(i));
826 again:
827                 if (bytes > length)
828                         bytes = length;
829
830                 /*
831                  * Bring in the user page that we will copy from _first_.
832                  * Otherwise there's a nasty deadlock on copying from the
833                  * same page as we're writing to, without it being marked
834                  * up-to-date.
835                  *
836                  * Not only is this an optimisation, but it is also required
837                  * to check that the address is actually valid, when atomic
838                  * usercopies are used, below.
839                  */
840                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
841                         status = -EFAULT;
842                         break;
843                 }
844
845                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
846                                 iomap);
847                 if (unlikely(status))
848                         break;
849
850                 if (mapping_writably_mapped(inode->i_mapping))
851                         flush_dcache_page(page);
852
853                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
854
855                 flush_dcache_page(page);
856
857                 status = iomap_write_end(inode, pos, bytes, copied, page,
858                                 iomap);
859                 if (unlikely(status < 0))
860                         break;
861                 copied = status;
862
863                 cond_resched();
864
865                 iov_iter_advance(i, copied);
866                 if (unlikely(copied == 0)) {
867                         /*
868                          * If we were unable to copy any data at all, we must
869                          * fall back to a single segment length write.
870                          *
871                          * If we didn't fallback here, we could livelock
872                          * because not all segments in the iov can be copied at
873                          * once without a pagefault.
874                          */
875                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
876                                                 iov_iter_single_seg_count(i));
877                         goto again;
878                 }
879                 pos += copied;
880                 written += copied;
881                 length -= copied;
882
883                 balance_dirty_pages_ratelimited(inode->i_mapping);
884         } while (iov_iter_count(i) && length);
885
886         return written ? written : status;
887 }
888
889 ssize_t
890 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
891                 const struct iomap_ops *ops)
892 {
893         struct inode *inode = iocb->ki_filp->f_mapping->host;
894         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
895
896         while (iov_iter_count(iter)) {
897                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
898                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
899                 if (ret <= 0)
900                         break;
901                 pos += ret;
902                 written += ret;
903         }
904
905         return written ? written : ret;
906 }
907 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
908
909 static struct page *
910 __iomap_read_page(struct inode *inode, loff_t offset)
911 {
912         struct address_space *mapping = inode->i_mapping;
913         struct page *page;
914
915         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
916         if (IS_ERR(page))
917                 return page;
918         if (!PageUptodate(page)) {
919                 put_page(page);
920                 return ERR_PTR(-EIO);
921         }
922         return page;
923 }
924
925 static loff_t
926 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
927                 struct iomap *iomap)
928 {
929         long status = 0;
930         ssize_t written = 0;
931
932         do {
933                 struct page *page, *rpage;
934                 unsigned long offset;   /* Offset into pagecache page */
935                 unsigned long bytes;    /* Bytes to write to page */
936
937                 offset = offset_in_page(pos);
938                 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
939
940                 rpage = __iomap_read_page(inode, pos);
941                 if (IS_ERR(rpage))
942                         return PTR_ERR(rpage);
943
944                 status = iomap_write_begin(inode, pos, bytes,
945                                            AOP_FLAG_NOFS, &page, iomap);
946                 put_page(rpage);
947                 if (unlikely(status))
948                         return status;
949
950                 WARN_ON_ONCE(!PageUptodate(page));
951
952                 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
953                 if (unlikely(status <= 0)) {
954                         if (WARN_ON_ONCE(status == 0))
955                                 return -EIO;
956                         return status;
957                 }
958
959                 cond_resched();
960
961                 pos += status;
962                 written += status;
963                 length -= status;
964
965                 balance_dirty_pages_ratelimited(inode->i_mapping);
966         } while (length);
967
968         return written;
969 }
970
971 int
972 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
973                 const struct iomap_ops *ops)
974 {
975         loff_t ret;
976
977         while (len) {
978                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
979                                 iomap_dirty_actor);
980                 if (ret <= 0)
981                         return ret;
982                 pos += ret;
983                 len -= ret;
984         }
985
986         return 0;
987 }
988 EXPORT_SYMBOL_GPL(iomap_file_dirty);
989
990 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
991                 unsigned bytes, struct iomap *iomap)
992 {
993         struct page *page;
994         int status;
995
996         status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
997                                    iomap);
998         if (status)
999                 return status;
1000
1001         zero_user(page, offset, bytes);
1002         mark_page_accessed(page);
1003
1004         return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
1005 }
1006
1007 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
1008                 struct iomap *iomap)
1009 {
1010         return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
1011                         iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
1012 }
1013
1014 static loff_t
1015 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
1016                 void *data, struct iomap *iomap)
1017 {
1018         bool *did_zero = data;
1019         loff_t written = 0;
1020         int status;
1021
1022         /* already zeroed?  we're done. */
1023         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1024                 return count;
1025
1026         do {
1027                 unsigned offset, bytes;
1028
1029                 offset = offset_in_page(pos);
1030                 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
1031
1032                 if (IS_DAX(inode))
1033                         status = iomap_dax_zero(pos, offset, bytes, iomap);
1034                 else
1035                         status = iomap_zero(inode, pos, offset, bytes, iomap);
1036                 if (status < 0)
1037                         return status;
1038
1039                 pos += bytes;
1040                 count -= bytes;
1041                 written += bytes;
1042                 if (did_zero)
1043                         *did_zero = true;
1044         } while (count > 0);
1045
1046         return written;
1047 }
1048
1049 int
1050 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1051                 const struct iomap_ops *ops)
1052 {
1053         loff_t ret;
1054
1055         while (len > 0) {
1056                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1057                                 ops, did_zero, iomap_zero_range_actor);
1058                 if (ret <= 0)
1059                         return ret;
1060
1061                 pos += ret;
1062                 len -= ret;
1063         }
1064
1065         return 0;
1066 }
1067 EXPORT_SYMBOL_GPL(iomap_zero_range);
1068
1069 int
1070 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1071                 const struct iomap_ops *ops)
1072 {
1073         unsigned int blocksize = i_blocksize(inode);
1074         unsigned int off = pos & (blocksize - 1);
1075
1076         /* Block boundary? Nothing to do */
1077         if (!off)
1078                 return 0;
1079         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1080 }
1081 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1082
1083 static loff_t
1084 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1085                 void *data, struct iomap *iomap)
1086 {
1087         struct page *page = data;
1088         int ret;
1089
1090         if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1091                 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1092                 if (ret)
1093                         return ret;
1094                 block_commit_write(page, 0, length);
1095         } else {
1096                 WARN_ON_ONCE(!PageUptodate(page));
1097                 iomap_page_create(inode, page);
1098                 set_page_dirty(page);
1099         }
1100
1101         return length;
1102 }
1103
1104 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1105 {
1106         struct page *page = vmf->page;
1107         struct inode *inode = file_inode(vmf->vma->vm_file);
1108         unsigned long length;
1109         loff_t offset, size;
1110         ssize_t ret;
1111
1112         lock_page(page);
1113         size = i_size_read(inode);
1114         if ((page->mapping != inode->i_mapping) ||
1115             (page_offset(page) > size)) {
1116                 /* We overload EFAULT to mean page got truncated */
1117                 ret = -EFAULT;
1118                 goto out_unlock;
1119         }
1120
1121         /* page is wholly or partially inside EOF */
1122         if (((page->index + 1) << PAGE_SHIFT) > size)
1123                 length = offset_in_page(size);
1124         else
1125                 length = PAGE_SIZE;
1126
1127         offset = page_offset(page);
1128         while (length > 0) {
1129                 ret = iomap_apply(inode, offset, length,
1130                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1131                                 iomap_page_mkwrite_actor);
1132                 if (unlikely(ret <= 0))
1133                         goto out_unlock;
1134                 offset += ret;
1135                 length -= ret;
1136         }
1137
1138         wait_for_stable_page(page);
1139         return VM_FAULT_LOCKED;
1140 out_unlock:
1141         unlock_page(page);
1142         return block_page_mkwrite_return(ret);
1143 }
1144 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1145
1146 struct fiemap_ctx {
1147         struct fiemap_extent_info *fi;
1148         struct iomap prev;
1149 };
1150
1151 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1152                 struct iomap *iomap, u32 flags)
1153 {
1154         switch (iomap->type) {
1155         case IOMAP_HOLE:
1156                 /* skip holes */
1157                 return 0;
1158         case IOMAP_DELALLOC:
1159                 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1160                 break;
1161         case IOMAP_MAPPED:
1162                 break;
1163         case IOMAP_UNWRITTEN:
1164                 flags |= FIEMAP_EXTENT_UNWRITTEN;
1165                 break;
1166         case IOMAP_INLINE:
1167                 flags |= FIEMAP_EXTENT_DATA_INLINE;
1168                 break;
1169         }
1170
1171         if (iomap->flags & IOMAP_F_MERGED)
1172                 flags |= FIEMAP_EXTENT_MERGED;
1173         if (iomap->flags & IOMAP_F_SHARED)
1174                 flags |= FIEMAP_EXTENT_SHARED;
1175
1176         return fiemap_fill_next_extent(fi, iomap->offset,
1177                         iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
1178                         iomap->length, flags);
1179 }
1180
1181 static loff_t
1182 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1183                 struct iomap *iomap)
1184 {
1185         struct fiemap_ctx *ctx = data;
1186         loff_t ret = length;
1187
1188         if (iomap->type == IOMAP_HOLE)
1189                 return length;
1190
1191         ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1192         ctx->prev = *iomap;
1193         switch (ret) {
1194         case 0:         /* success */
1195                 return length;
1196         case 1:         /* extent array full */
1197                 return 0;
1198         default:
1199                 return ret;
1200         }
1201 }
1202
1203 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
1204                 loff_t start, loff_t len, const struct iomap_ops *ops)
1205 {
1206         struct fiemap_ctx ctx;
1207         loff_t ret;
1208
1209         memset(&ctx, 0, sizeof(ctx));
1210         ctx.fi = fi;
1211         ctx.prev.type = IOMAP_HOLE;
1212
1213         ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1214         if (ret)
1215                 return ret;
1216
1217         if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1218                 ret = filemap_write_and_wait(inode->i_mapping);
1219                 if (ret)
1220                         return ret;
1221         }
1222
1223         while (len > 0) {
1224                 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
1225                                 iomap_fiemap_actor);
1226                 /* inode with no (attribute) mapping will give ENOENT */
1227                 if (ret == -ENOENT)
1228                         break;
1229                 if (ret < 0)
1230                         return ret;
1231                 if (ret == 0)
1232                         break;
1233
1234                 start += ret;
1235                 len -= ret;
1236         }
1237
1238         if (ctx.prev.type != IOMAP_HOLE) {
1239                 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1240                 if (ret < 0)
1241                         return ret;
1242         }
1243
1244         return 0;
1245 }
1246 EXPORT_SYMBOL_GPL(iomap_fiemap);
1247
1248 /*
1249  * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
1250  * Returns true if found and updates @lastoff to the offset in file.
1251  */
1252 static bool
1253 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1254                 int whence)
1255 {
1256         const struct address_space_operations *ops = inode->i_mapping->a_ops;
1257         unsigned int bsize = i_blocksize(inode), off;
1258         bool seek_data = whence == SEEK_DATA;
1259         loff_t poff = page_offset(page);
1260
1261         if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1262                 return false;
1263
1264         if (*lastoff < poff) {
1265                 /*
1266                  * Last offset smaller than the start of the page means we found
1267                  * a hole:
1268                  */
1269                 if (whence == SEEK_HOLE)
1270                         return true;
1271                 *lastoff = poff;
1272         }
1273
1274         /*
1275          * Just check the page unless we can and should check block ranges:
1276          */
1277         if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1278                 return PageUptodate(page) == seek_data;
1279
1280         lock_page(page);
1281         if (unlikely(page->mapping != inode->i_mapping))
1282                 goto out_unlock_not_found;
1283
1284         for (off = 0; off < PAGE_SIZE; off += bsize) {
1285                 if (offset_in_page(*lastoff) >= off + bsize)
1286                         continue;
1287                 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1288                         unlock_page(page);
1289                         return true;
1290                 }
1291                 *lastoff = poff + off + bsize;
1292         }
1293
1294 out_unlock_not_found:
1295         unlock_page(page);
1296         return false;
1297 }
1298
1299 /*
1300  * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1301  *
1302  * Within unwritten extents, the page cache determines which parts are holes
1303  * and which are data: uptodate buffer heads count as data; everything else
1304  * counts as a hole.
1305  *
1306  * Returns the resulting offset on successs, and -ENOENT otherwise.
1307  */
1308 static loff_t
1309 page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1310                 int whence)
1311 {
1312         pgoff_t index = offset >> PAGE_SHIFT;
1313         pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1314         loff_t lastoff = offset;
1315         struct pagevec pvec;
1316
1317         if (length <= 0)
1318                 return -ENOENT;
1319
1320         pagevec_init(&pvec);
1321
1322         do {
1323                 unsigned nr_pages, i;
1324
1325                 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1326                                                 end - 1);
1327                 if (nr_pages == 0)
1328                         break;
1329
1330                 for (i = 0; i < nr_pages; i++) {
1331                         struct page *page = pvec.pages[i];
1332
1333                         if (page_seek_hole_data(inode, page, &lastoff, whence))
1334                                 goto check_range;
1335                         lastoff = page_offset(page) + PAGE_SIZE;
1336                 }
1337                 pagevec_release(&pvec);
1338         } while (index < end);
1339
1340         /* When no page at lastoff and we are not done, we found a hole. */
1341         if (whence != SEEK_HOLE)
1342                 goto not_found;
1343
1344 check_range:
1345         if (lastoff < offset + length)
1346                 goto out;
1347 not_found:
1348         lastoff = -ENOENT;
1349 out:
1350         pagevec_release(&pvec);
1351         return lastoff;
1352 }
1353
1354
1355 static loff_t
1356 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1357                       void *data, struct iomap *iomap)
1358 {
1359         switch (iomap->type) {
1360         case IOMAP_UNWRITTEN:
1361                 offset = page_cache_seek_hole_data(inode, offset, length,
1362                                                    SEEK_HOLE);
1363                 if (offset < 0)
1364                         return length;
1365                 /* fall through */
1366         case IOMAP_HOLE:
1367                 *(loff_t *)data = offset;
1368                 return 0;
1369         default:
1370                 return length;
1371         }
1372 }
1373
1374 loff_t
1375 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1376 {
1377         loff_t size = i_size_read(inode);
1378         loff_t length = size - offset;
1379         loff_t ret;
1380
1381         /* Nothing to be found before or beyond the end of the file. */
1382         if (offset < 0 || offset >= size)
1383                 return -ENXIO;
1384
1385         while (length > 0) {
1386                 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1387                                   &offset, iomap_seek_hole_actor);
1388                 if (ret < 0)
1389                         return ret;
1390                 if (ret == 0)
1391                         break;
1392
1393                 offset += ret;
1394                 length -= ret;
1395         }
1396
1397         return offset;
1398 }
1399 EXPORT_SYMBOL_GPL(iomap_seek_hole);
1400
1401 static loff_t
1402 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1403                       void *data, struct iomap *iomap)
1404 {
1405         switch (iomap->type) {
1406         case IOMAP_HOLE:
1407                 return length;
1408         case IOMAP_UNWRITTEN:
1409                 offset = page_cache_seek_hole_data(inode, offset, length,
1410                                                    SEEK_DATA);
1411                 if (offset < 0)
1412                         return length;
1413                 /*FALLTHRU*/
1414         default:
1415                 *(loff_t *)data = offset;
1416                 return 0;
1417         }
1418 }
1419
1420 loff_t
1421 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1422 {
1423         loff_t size = i_size_read(inode);
1424         loff_t length = size - offset;
1425         loff_t ret;
1426
1427         /* Nothing to be found before or beyond the end of the file. */
1428         if (offset < 0 || offset >= size)
1429                 return -ENXIO;
1430
1431         while (length > 0) {
1432                 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1433                                   &offset, iomap_seek_data_actor);
1434                 if (ret < 0)
1435                         return ret;
1436                 if (ret == 0)
1437                         break;
1438
1439                 offset += ret;
1440                 length -= ret;
1441         }
1442
1443         if (length <= 0)
1444                 return -ENXIO;
1445         return offset;
1446 }
1447 EXPORT_SYMBOL_GPL(iomap_seek_data);
1448
1449 /*
1450  * Private flags for iomap_dio, must not overlap with the public ones in
1451  * iomap.h:
1452  */
1453 #define IOMAP_DIO_WRITE_FUA     (1 << 28)
1454 #define IOMAP_DIO_NEED_SYNC     (1 << 29)
1455 #define IOMAP_DIO_WRITE         (1 << 30)
1456 #define IOMAP_DIO_DIRTY         (1 << 31)
1457
1458 struct iomap_dio {
1459         struct kiocb            *iocb;
1460         iomap_dio_end_io_t      *end_io;
1461         loff_t                  i_size;
1462         loff_t                  size;
1463         atomic_t                ref;
1464         unsigned                flags;
1465         int                     error;
1466         bool                    wait_for_completion;
1467
1468         union {
1469                 /* used during submission and for synchronous completion: */
1470                 struct {
1471                         struct iov_iter         *iter;
1472                         struct task_struct      *waiter;
1473                         struct request_queue    *last_queue;
1474                         blk_qc_t                cookie;
1475                 } submit;
1476
1477                 /* used for aio completion: */
1478                 struct {
1479                         struct work_struct      work;
1480                 } aio;
1481         };
1482 };
1483
1484 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1485 {
1486         struct kiocb *iocb = dio->iocb;
1487         struct inode *inode = file_inode(iocb->ki_filp);
1488         loff_t offset = iocb->ki_pos;
1489         ssize_t ret;
1490
1491         if (dio->end_io) {
1492                 ret = dio->end_io(iocb,
1493                                 dio->error ? dio->error : dio->size,
1494                                 dio->flags);
1495         } else {
1496                 ret = dio->error;
1497         }
1498
1499         if (likely(!ret)) {
1500                 ret = dio->size;
1501                 /* check for short read */
1502                 if (offset + ret > dio->i_size &&
1503                     !(dio->flags & IOMAP_DIO_WRITE))
1504                         ret = dio->i_size - offset;
1505                 iocb->ki_pos += ret;
1506         }
1507
1508         /*
1509          * Try again to invalidate clean pages which might have been cached by
1510          * non-direct readahead, or faulted in by get_user_pages() if the source
1511          * of the write was an mmap'ed region of the file we're writing.  Either
1512          * one is a pretty crazy thing to do, so we don't support it 100%.  If
1513          * this invalidation fails, tough, the write still worked...
1514          *
1515          * And this page cache invalidation has to be after dio->end_io(), as
1516          * some filesystems convert unwritten extents to real allocations in
1517          * end_io() when necessary, otherwise a racing buffer read would cache
1518          * zeros from unwritten extents.
1519          */
1520         if (!dio->error &&
1521             (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1522                 int err;
1523                 err = invalidate_inode_pages2_range(inode->i_mapping,
1524                                 offset >> PAGE_SHIFT,
1525                                 (offset + dio->size - 1) >> PAGE_SHIFT);
1526                 if (err)
1527                         dio_warn_stale_pagecache(iocb->ki_filp);
1528         }
1529
1530         /*
1531          * If this is a DSYNC write, make sure we push it to stable storage now
1532          * that we've written data.
1533          */
1534         if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1535                 ret = generic_write_sync(iocb, ret);
1536
1537         inode_dio_end(file_inode(iocb->ki_filp));
1538         kfree(dio);
1539
1540         return ret;
1541 }
1542
1543 static void iomap_dio_complete_work(struct work_struct *work)
1544 {
1545         struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1546         struct kiocb *iocb = dio->iocb;
1547
1548         iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
1549 }
1550
1551 /*
1552  * Set an error in the dio if none is set yet.  We have to use cmpxchg
1553  * as the submission context and the completion context(s) can race to
1554  * update the error.
1555  */
1556 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1557 {
1558         cmpxchg(&dio->error, 0, ret);
1559 }
1560
1561 static void iomap_dio_bio_end_io(struct bio *bio)
1562 {
1563         struct iomap_dio *dio = bio->bi_private;
1564         bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1565
1566         if (bio->bi_status)
1567                 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
1568
1569         if (atomic_dec_and_test(&dio->ref)) {
1570                 if (dio->wait_for_completion) {
1571                         struct task_struct *waiter = dio->submit.waiter;
1572                         WRITE_ONCE(dio->submit.waiter, NULL);
1573                         wake_up_process(waiter);
1574                 } else if (dio->flags & IOMAP_DIO_WRITE) {
1575                         struct inode *inode = file_inode(dio->iocb->ki_filp);
1576
1577                         INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1578                         queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1579                 } else {
1580                         iomap_dio_complete_work(&dio->aio.work);
1581                 }
1582         }
1583
1584         if (should_dirty) {
1585                 bio_check_pages_dirty(bio);
1586         } else {
1587                 struct bio_vec *bvec;
1588                 int i;
1589
1590                 bio_for_each_segment_all(bvec, bio, i)
1591                         put_page(bvec->bv_page);
1592                 bio_put(bio);
1593         }
1594 }
1595
1596 static blk_qc_t
1597 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1598                 unsigned len)
1599 {
1600         struct page *page = ZERO_PAGE(0);
1601         struct bio *bio;
1602
1603         bio = bio_alloc(GFP_KERNEL, 1);
1604         bio_set_dev(bio, iomap->bdev);
1605         bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1606         bio->bi_private = dio;
1607         bio->bi_end_io = iomap_dio_bio_end_io;
1608
1609         get_page(page);
1610         __bio_add_page(bio, page, len, 0);
1611         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
1612
1613         atomic_inc(&dio->ref);
1614         return submit_bio(bio);
1615 }
1616
1617 static loff_t
1618 iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1619                 struct iomap_dio *dio, struct iomap *iomap)
1620 {
1621         unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1622         unsigned int fs_block_size = i_blocksize(inode), pad;
1623         unsigned int align = iov_iter_alignment(dio->submit.iter);
1624         struct iov_iter iter;
1625         struct bio *bio;
1626         bool need_zeroout = false;
1627         bool use_fua = false;
1628         int nr_pages, ret = 0;
1629         size_t copied = 0;
1630
1631         if ((pos | length | align) & ((1 << blkbits) - 1))
1632                 return -EINVAL;
1633
1634         if (iomap->type == IOMAP_UNWRITTEN) {
1635                 dio->flags |= IOMAP_DIO_UNWRITTEN;
1636                 need_zeroout = true;
1637         }
1638
1639         if (iomap->flags & IOMAP_F_SHARED)
1640                 dio->flags |= IOMAP_DIO_COW;
1641
1642         if (iomap->flags & IOMAP_F_NEW) {
1643                 need_zeroout = true;
1644         } else if (iomap->type == IOMAP_MAPPED) {
1645                 /*
1646                  * Use a FUA write if we need datasync semantics, this is a pure
1647                  * data IO that doesn't require any metadata updates (including
1648                  * after IO completion such as unwritten extent conversion) and
1649                  * the underlying device supports FUA. This allows us to avoid
1650                  * cache flushes on IO completion.
1651                  */
1652                 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1653                     (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1654                     blk_queue_fua(bdev_get_queue(iomap->bdev)))
1655                         use_fua = true;
1656         }
1657
1658         /*
1659          * Operate on a partial iter trimmed to the extent we were called for.
1660          * We'll update the iter in the dio once we're done with this extent.
1661          */
1662         iter = *dio->submit.iter;
1663         iov_iter_truncate(&iter, length);
1664
1665         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1666         if (nr_pages <= 0)
1667                 return nr_pages;
1668
1669         if (need_zeroout) {
1670                 /* zero out from the start of the block to the write offset */
1671                 pad = pos & (fs_block_size - 1);
1672                 if (pad)
1673                         iomap_dio_zero(dio, iomap, pos - pad, pad);
1674         }
1675
1676         do {
1677                 size_t n;
1678                 if (dio->error) {
1679                         iov_iter_revert(dio->submit.iter, copied);
1680                         return 0;
1681                 }
1682
1683                 bio = bio_alloc(GFP_KERNEL, nr_pages);
1684                 bio_set_dev(bio, iomap->bdev);
1685                 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1686                 bio->bi_write_hint = dio->iocb->ki_hint;
1687                 bio->bi_ioprio = dio->iocb->ki_ioprio;
1688                 bio->bi_private = dio;
1689                 bio->bi_end_io = iomap_dio_bio_end_io;
1690
1691                 ret = bio_iov_iter_get_pages(bio, &iter);
1692                 if (unlikely(ret)) {
1693                         /*
1694                          * We have to stop part way through an IO. We must fall
1695                          * through to the sub-block tail zeroing here, otherwise
1696                          * this short IO may expose stale data in the tail of
1697                          * the block we haven't written data to.
1698                          */
1699                         bio_put(bio);
1700                         goto zero_tail;
1701                 }
1702
1703                 n = bio->bi_iter.bi_size;
1704                 if (dio->flags & IOMAP_DIO_WRITE) {
1705                         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1706                         if (use_fua)
1707                                 bio->bi_opf |= REQ_FUA;
1708                         else
1709                                 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
1710                         task_io_account_write(n);
1711                 } else {
1712                         bio->bi_opf = REQ_OP_READ;
1713                         if (dio->flags & IOMAP_DIO_DIRTY)
1714                                 bio_set_pages_dirty(bio);
1715                 }
1716
1717                 iov_iter_advance(dio->submit.iter, n);
1718
1719                 dio->size += n;
1720                 pos += n;
1721                 copied += n;
1722
1723                 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1724
1725                 atomic_inc(&dio->ref);
1726
1727                 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1728                 dio->submit.cookie = submit_bio(bio);
1729         } while (nr_pages);
1730
1731         /*
1732          * We need to zeroout the tail of a sub-block write if the extent type
1733          * requires zeroing or the write extends beyond EOF. If we don't zero
1734          * the block tail in the latter case, we can expose stale data via mmap
1735          * reads of the EOF block.
1736          */
1737 zero_tail:
1738         if (need_zeroout ||
1739             ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
1740                 /* zero out from the end of the write to the end of the block */
1741                 pad = pos & (fs_block_size - 1);
1742                 if (pad)
1743                         iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1744         }
1745         return copied ? copied : ret;
1746 }
1747
1748 static loff_t
1749 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1750 {
1751         length = iov_iter_zero(length, dio->submit.iter);
1752         dio->size += length;
1753         return length;
1754 }
1755
1756 static loff_t
1757 iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1758                 struct iomap_dio *dio, struct iomap *iomap)
1759 {
1760         struct iov_iter *iter = dio->submit.iter;
1761         size_t copied;
1762
1763         BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1764
1765         if (dio->flags & IOMAP_DIO_WRITE) {
1766                 loff_t size = inode->i_size;
1767
1768                 if (pos > size)
1769                         memset(iomap->inline_data + size, 0, pos - size);
1770                 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1771                 if (copied) {
1772                         if (pos + copied > size)
1773                                 i_size_write(inode, pos + copied);
1774                         mark_inode_dirty(inode);
1775                 }
1776         } else {
1777                 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1778         }
1779         dio->size += copied;
1780         return copied;
1781 }
1782
1783 static loff_t
1784 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1785                 void *data, struct iomap *iomap)
1786 {
1787         struct iomap_dio *dio = data;
1788
1789         switch (iomap->type) {
1790         case IOMAP_HOLE:
1791                 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1792                         return -EIO;
1793                 return iomap_dio_hole_actor(length, dio);
1794         case IOMAP_UNWRITTEN:
1795                 if (!(dio->flags & IOMAP_DIO_WRITE))
1796                         return iomap_dio_hole_actor(length, dio);
1797                 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1798         case IOMAP_MAPPED:
1799                 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1800         case IOMAP_INLINE:
1801                 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
1802         default:
1803                 WARN_ON_ONCE(1);
1804                 return -EIO;
1805         }
1806 }
1807
1808 /*
1809  * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
1810  * is being issued as AIO or not.  This allows us to optimise pure data writes
1811  * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1812  * REQ_FLUSH post write. This is slightly tricky because a single request here
1813  * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1814  * may be pure data writes. In that case, we still need to do a full data sync
1815  * completion.
1816  */
1817 ssize_t
1818 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1819                 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
1820 {
1821         struct address_space *mapping = iocb->ki_filp->f_mapping;
1822         struct inode *inode = file_inode(iocb->ki_filp);
1823         size_t count = iov_iter_count(iter);
1824         loff_t pos = iocb->ki_pos, start = pos;
1825         loff_t end = iocb->ki_pos + count - 1, ret = 0;
1826         unsigned int flags = IOMAP_DIRECT;
1827         bool wait_for_completion = is_sync_kiocb(iocb);
1828         struct blk_plug plug;
1829         struct iomap_dio *dio;
1830
1831         lockdep_assert_held(&inode->i_rwsem);
1832
1833         if (!count)
1834                 return 0;
1835
1836         dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1837         if (!dio)
1838                 return -ENOMEM;
1839
1840         dio->iocb = iocb;
1841         atomic_set(&dio->ref, 1);
1842         dio->size = 0;
1843         dio->i_size = i_size_read(inode);
1844         dio->end_io = end_io;
1845         dio->error = 0;
1846         dio->flags = 0;
1847
1848         dio->submit.iter = iter;
1849         dio->submit.waiter = current;
1850         dio->submit.cookie = BLK_QC_T_NONE;
1851         dio->submit.last_queue = NULL;
1852
1853         if (iov_iter_rw(iter) == READ) {
1854                 if (pos >= dio->i_size)
1855                         goto out_free_dio;
1856
1857                 if (iter->type == ITER_IOVEC)
1858                         dio->flags |= IOMAP_DIO_DIRTY;
1859         } else {
1860                 flags |= IOMAP_WRITE;
1861                 dio->flags |= IOMAP_DIO_WRITE;
1862
1863                 /* for data sync or sync, we need sync completion processing */
1864                 if (iocb->ki_flags & IOCB_DSYNC)
1865                         dio->flags |= IOMAP_DIO_NEED_SYNC;
1866
1867                 /*
1868                  * For datasync only writes, we optimistically try using FUA for
1869                  * this IO.  Any non-FUA write that occurs will clear this flag,
1870                  * hence we know before completion whether a cache flush is
1871                  * necessary.
1872                  */
1873                 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1874                         dio->flags |= IOMAP_DIO_WRITE_FUA;
1875         }
1876
1877         if (iocb->ki_flags & IOCB_NOWAIT) {
1878                 if (filemap_range_has_page(mapping, start, end)) {
1879                         ret = -EAGAIN;
1880                         goto out_free_dio;
1881                 }
1882                 flags |= IOMAP_NOWAIT;
1883         }
1884
1885         ret = filemap_write_and_wait_range(mapping, start, end);
1886         if (ret)
1887                 goto out_free_dio;
1888
1889         /*
1890          * Try to invalidate cache pages for the range we're direct
1891          * writing.  If this invalidation fails, tough, the write will
1892          * still work, but racing two incompatible write paths is a
1893          * pretty crazy thing to do, so we don't support it 100%.
1894          */
1895         ret = invalidate_inode_pages2_range(mapping,
1896                         start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1897         if (ret)
1898                 dio_warn_stale_pagecache(iocb->ki_filp);
1899         ret = 0;
1900
1901         if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1902             !inode->i_sb->s_dio_done_wq) {
1903                 ret = sb_init_dio_done_wq(inode->i_sb);
1904                 if (ret < 0)
1905                         goto out_free_dio;
1906         }
1907
1908         inode_dio_begin(inode);
1909
1910         blk_start_plug(&plug);
1911         do {
1912                 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1913                                 iomap_dio_actor);
1914                 if (ret <= 0) {
1915                         /* magic error code to fall back to buffered I/O */
1916                         if (ret == -ENOTBLK) {
1917                                 wait_for_completion = true;
1918                                 ret = 0;
1919                         }
1920                         break;
1921                 }
1922                 pos += ret;
1923
1924                 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
1925                         /*
1926                          * We only report that we've read data up to i_size.
1927                          * Revert iter to a state corresponding to that as
1928                          * some callers (such as splice code) rely on it.
1929                          */
1930                         iov_iter_revert(iter, pos - dio->i_size);
1931                         break;
1932                 }
1933         } while ((count = iov_iter_count(iter)) > 0);
1934         blk_finish_plug(&plug);
1935
1936         if (ret < 0)
1937                 iomap_dio_set_error(dio, ret);
1938
1939         /*
1940          * If all the writes we issued were FUA, we don't need to flush the
1941          * cache on IO completion. Clear the sync flag for this case.
1942          */
1943         if (dio->flags & IOMAP_DIO_WRITE_FUA)
1944                 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1945
1946         /*
1947          * We are about to drop our additional submission reference, which
1948          * might be the last reference to the dio.  There are three three
1949          * different ways we can progress here:
1950          *
1951          *  (a) If this is the last reference we will always complete and free
1952          *      the dio ourselves.
1953          *  (b) If this is not the last reference, and we serve an asynchronous
1954          *      iocb, we must never touch the dio after the decrement, the
1955          *      I/O completion handler will complete and free it.
1956          *  (c) If this is not the last reference, but we serve a synchronous
1957          *      iocb, the I/O completion handler will wake us up on the drop
1958          *      of the final reference, and we will complete and free it here
1959          *      after we got woken by the I/O completion handler.
1960          */
1961         dio->wait_for_completion = wait_for_completion;
1962         if (!atomic_dec_and_test(&dio->ref)) {
1963                 if (!wait_for_completion)
1964                         return -EIOCBQUEUED;
1965
1966                 for (;;) {
1967                         set_current_state(TASK_UNINTERRUPTIBLE);
1968                         if (!READ_ONCE(dio->submit.waiter))
1969                                 break;
1970
1971                         if (!(iocb->ki_flags & IOCB_HIPRI) ||
1972                             !dio->submit.last_queue ||
1973                             !blk_poll(dio->submit.last_queue,
1974                                          dio->submit.cookie))
1975                                 io_schedule();
1976                 }
1977                 __set_current_state(TASK_RUNNING);
1978         }
1979
1980         return iomap_dio_complete(dio);
1981
1982 out_free_dio:
1983         kfree(dio);
1984         return ret;
1985 }
1986 EXPORT_SYMBOL_GPL(iomap_dio_rw);
1987
1988 /* Swapfile activation */
1989
1990 #ifdef CONFIG_SWAP
1991 struct iomap_swapfile_info {
1992         struct iomap iomap;             /* accumulated iomap */
1993         struct swap_info_struct *sis;
1994         uint64_t lowest_ppage;          /* lowest physical addr seen (pages) */
1995         uint64_t highest_ppage;         /* highest physical addr seen (pages) */
1996         unsigned long nr_pages;         /* number of pages collected */
1997         int nr_extents;                 /* extent count */
1998 };
1999
2000 /*
2001  * Collect physical extents for this swap file.  Physical extents reported to
2002  * the swap code must be trimmed to align to a page boundary.  The logical
2003  * offset within the file is irrelevant since the swapfile code maps logical
2004  * page numbers of the swap device to the physical page-aligned extents.
2005  */
2006 static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
2007 {
2008         struct iomap *iomap = &isi->iomap;
2009         unsigned long nr_pages;
2010         uint64_t first_ppage;
2011         uint64_t first_ppage_reported;
2012         uint64_t next_ppage;
2013         int error;
2014
2015         /*
2016          * Round the start up and the end down so that the physical
2017          * extent aligns to a page boundary.
2018          */
2019         first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2020         next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2021                         PAGE_SHIFT;
2022
2023         /* Skip too-short physical extents. */
2024         if (first_ppage >= next_ppage)
2025                 return 0;
2026         nr_pages = next_ppage - first_ppage;
2027
2028         /*
2029          * Calculate how much swap space we're adding; the first page contains
2030          * the swap header and doesn't count.  The mm still wants that first
2031          * page fed to add_swap_extent, however.
2032          */
2033         first_ppage_reported = first_ppage;
2034         if (iomap->offset == 0)
2035                 first_ppage_reported++;
2036         if (isi->lowest_ppage > first_ppage_reported)
2037                 isi->lowest_ppage = first_ppage_reported;
2038         if (isi->highest_ppage < (next_ppage - 1))
2039                 isi->highest_ppage = next_ppage - 1;
2040
2041         /* Add extent, set up for the next call. */
2042         error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2043         if (error < 0)
2044                 return error;
2045         isi->nr_extents += error;
2046         isi->nr_pages += nr_pages;
2047         return 0;
2048 }
2049
2050 /*
2051  * Accumulate iomaps for this swap file.  We have to accumulate iomaps because
2052  * swap only cares about contiguous page-aligned physical extents and makes no
2053  * distinction between written and unwritten extents.
2054  */
2055 static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2056                 loff_t count, void *data, struct iomap *iomap)
2057 {
2058         struct iomap_swapfile_info *isi = data;
2059         int error;
2060
2061         switch (iomap->type) {
2062         case IOMAP_MAPPED:
2063         case IOMAP_UNWRITTEN:
2064                 /* Only real or unwritten extents. */
2065                 break;
2066         case IOMAP_INLINE:
2067                 /* No inline data. */
2068                 pr_err("swapon: file is inline\n");
2069                 return -EINVAL;
2070         default:
2071                 pr_err("swapon: file has unallocated extents\n");
2072                 return -EINVAL;
2073         }
2074
2075         /* No uncommitted metadata or shared blocks. */
2076         if (iomap->flags & IOMAP_F_DIRTY) {
2077                 pr_err("swapon: file is not committed\n");
2078                 return -EINVAL;
2079         }
2080         if (iomap->flags & IOMAP_F_SHARED) {
2081                 pr_err("swapon: file has shared extents\n");
2082                 return -EINVAL;
2083         }
2084
2085         /* Only one bdev per swap file. */
2086         if (iomap->bdev != isi->sis->bdev) {
2087                 pr_err("swapon: file is on multiple devices\n");
2088                 return -EINVAL;
2089         }
2090
2091         if (isi->iomap.length == 0) {
2092                 /* No accumulated extent, so just store it. */
2093                 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2094         } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2095                 /* Append this to the accumulated extent. */
2096                 isi->iomap.length += iomap->length;
2097         } else {
2098                 /* Otherwise, add the retained iomap and store this one. */
2099                 error = iomap_swapfile_add_extent(isi);
2100                 if (error)
2101                         return error;
2102                 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2103         }
2104         return count;
2105 }
2106
2107 /*
2108  * Iterate a swap file's iomaps to construct physical extents that can be
2109  * passed to the swapfile subsystem.
2110  */
2111 int iomap_swapfile_activate(struct swap_info_struct *sis,
2112                 struct file *swap_file, sector_t *pagespan,
2113                 const struct iomap_ops *ops)
2114 {
2115         struct iomap_swapfile_info isi = {
2116                 .sis = sis,
2117                 .lowest_ppage = (sector_t)-1ULL,
2118         };
2119         struct address_space *mapping = swap_file->f_mapping;
2120         struct inode *inode = mapping->host;
2121         loff_t pos = 0;
2122         loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2123         loff_t ret;
2124
2125         /*
2126          * Persist all file mapping metadata so that we won't have any
2127          * IOMAP_F_DIRTY iomaps.
2128          */
2129         ret = vfs_fsync(swap_file, 1);
2130         if (ret)
2131                 return ret;
2132
2133         while (len > 0) {
2134                 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2135                                 ops, &isi, iomap_swapfile_activate_actor);
2136                 if (ret <= 0)
2137                         return ret;
2138
2139                 pos += ret;
2140                 len -= ret;
2141         }
2142
2143         if (isi.iomap.length) {
2144                 ret = iomap_swapfile_add_extent(&isi);
2145                 if (ret)
2146                         return ret;
2147         }
2148
2149         *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2150         sis->max = isi.nr_pages;
2151         sis->pages = isi.nr_pages - 1;
2152         sis->highest_bit = isi.nr_pages - 1;
2153         return isi.nr_extents;
2154 }
2155 EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2156 #endif /* CONFIG_SWAP */
2157
2158 static loff_t
2159 iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2160                 void *data, struct iomap *iomap)
2161 {
2162         sector_t *bno = data, addr;
2163
2164         if (iomap->type == IOMAP_MAPPED) {
2165                 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2166                 if (addr > INT_MAX)
2167                         WARN(1, "would truncate bmap result\n");
2168                 else
2169                         *bno = addr;
2170         }
2171         return 0;
2172 }
2173
2174 /* legacy ->bmap interface.  0 is the error return (!) */
2175 sector_t
2176 iomap_bmap(struct address_space *mapping, sector_t bno,
2177                 const struct iomap_ops *ops)
2178 {
2179         struct inode *inode = mapping->host;
2180         loff_t pos = bno << inode->i_blkbits;
2181         unsigned blocksize = i_blocksize(inode);
2182
2183         if (filemap_write_and_wait(mapping))
2184                 return 0;
2185
2186         bno = 0;
2187         iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2188         return bno;
2189 }
2190 EXPORT_SYMBOL_GPL(iomap_bmap);