1 // SPDX-License-Identifier: GPL-2.0
5 #include "btree_iter.h"
8 #include "fs-io-pagecache.h"
11 #include <linux/pagevec.h>
12 #include <linux/writeback.h>
14 int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
15 loff_t start, u64 end,
16 fgf_t fgp_flags, gfp_t gfp,
24 if ((u64) pos >= (u64) start + (1ULL << 20))
25 fgp_flags &= ~FGP_CREAT;
27 ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
31 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
32 if (IS_ERR_OR_NULL(f))
35 BUG_ON(fs->nr && folio_pos(f) != pos);
37 pos = folio_end_pos(f);
41 if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
44 return fs->nr ? 0 : ret;
47 /* pagecache_block must be held */
48 int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
49 loff_t start, loff_t end)
54 * XXX: the way this is currently implemented, we can spin if a process
55 * is continually redirtying a specific page
58 if (!mapping->nrpages)
61 ret = filemap_write_and_wait_range(mapping, start, end);
65 if (!mapping->nrpages)
68 ret = invalidate_inode_pages2_range(mapping,
71 } while (ret == -EBUSY);
77 /* Useful for debug tracing: */
78 static const char * const bch2_folio_sector_states[] = {
80 BCH_FOLIO_SECTOR_STATE()
86 static inline enum bch_folio_sector_state
87 folio_sector_dirty(enum bch_folio_sector_state state)
90 case SECTOR_unallocated:
93 return SECTOR_dirty_reserved;
99 static inline enum bch_folio_sector_state
100 folio_sector_undirty(enum bch_folio_sector_state state)
104 return SECTOR_unallocated;
105 case SECTOR_dirty_reserved:
106 return SECTOR_reserved;
112 static inline enum bch_folio_sector_state
113 folio_sector_reserve(enum bch_folio_sector_state state)
116 case SECTOR_unallocated:
117 return SECTOR_reserved;
119 return SECTOR_dirty_reserved;
125 /* for newly allocated folios: */
126 struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
130 s = kzalloc(sizeof(*s) +
131 sizeof(struct bch_folio_sector) *
132 folio_sectors(folio), gfp);
136 spin_lock_init(&s->lock);
137 folio_attach_private(folio, s);
141 struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
143 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
146 static unsigned bkey_to_sector_state(struct bkey_s_c k)
148 if (bkey_extent_is_reservation(k))
149 return SECTOR_reserved;
150 if (bkey_extent_is_allocation(k.k))
151 return SECTOR_allocated;
152 return SECTOR_unallocated;
155 static void __bch2_folio_set(struct folio *folio,
156 unsigned pg_offset, unsigned pg_len,
157 unsigned nr_ptrs, unsigned state)
159 struct bch_folio *s = bch2_folio(folio);
160 unsigned i, sectors = folio_sectors(folio);
162 BUG_ON(pg_offset >= sectors);
163 BUG_ON(pg_offset + pg_len > sectors);
167 for (i = pg_offset; i < pg_offset + pg_len; i++) {
168 s->s[i].nr_replicas = nr_ptrs;
169 bch2_folio_sector_set(folio, s, i, state);
175 spin_unlock(&s->lock);
179 * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
182 int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
183 struct folio **fs, unsigned nr_folios)
185 struct btree_trans *trans;
186 struct btree_iter iter;
189 u64 offset = folio_sector(fs[0]);
192 bool need_set = false;
195 for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
196 s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
200 need_set |= !s->uptodate;
207 trans = bch2_trans_get(c);
209 bch2_trans_begin(trans);
211 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
215 for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
216 SPOS(inum.inum, offset, snapshot),
217 BTREE_ITER_SLOTS, k, ret) {
218 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
219 unsigned state = bkey_to_sector_state(k);
221 while (folio_idx < nr_folios) {
222 struct folio *folio = fs[folio_idx];
223 u64 folio_start = folio_sector(folio);
224 u64 folio_end = folio_end_sector(folio);
225 unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
227 unsigned folio_len = min(k.k->p.offset, folio_end) -
228 folio_offset - folio_start;
230 BUG_ON(k.k->p.offset < folio_start);
231 BUG_ON(bkey_start_offset(k.k) > folio_end);
233 if (!bch2_folio(folio)->uptodate)
234 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
236 if (k.k->p.offset < folio_end)
241 if (folio_idx == nr_folios)
245 offset = iter.pos.offset;
246 bch2_trans_iter_exit(trans, &iter);
248 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
250 bch2_trans_put(trans);
255 void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
257 struct bvec_iter iter;
259 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
260 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
261 unsigned state = bkey_to_sector_state(k);
263 bio_for_each_folio(fv, bio, iter)
264 __bch2_folio_set(fv.fv_folio,
270 void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
273 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
274 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
275 struct folio_batch fbatch;
281 folio_batch_init(&fbatch);
283 while (filemap_get_folios(inode->v.i_mapping,
284 &index, end_index, &fbatch)) {
285 for (i = 0; i < folio_batch_count(&fbatch); i++) {
286 struct folio *folio = fbatch.folios[i];
287 u64 folio_start = folio_sector(folio);
288 u64 folio_end = folio_end_sector(folio);
289 unsigned folio_offset = max(start, folio_start) - folio_start;
290 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
293 BUG_ON(end <= folio_start);
296 s = bch2_folio(folio);
300 for (j = folio_offset; j < folio_offset + folio_len; j++)
301 s->s[j].nr_replicas = 0;
302 spin_unlock(&s->lock);
307 folio_batch_release(&fbatch);
312 int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
316 struct bch_fs *c = inode->v.i_sb->s_fs_info;
317 pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
318 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
319 struct folio_batch fbatch;
320 s64 i_sectors_delta = 0;
326 folio_batch_init(&fbatch);
328 while (filemap_get_folios(inode->v.i_mapping,
329 &index, end_index, &fbatch)) {
330 for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
331 struct folio *folio = fbatch.folios[i];
335 else if (!folio_trylock(folio)) {
336 folio_batch_release(&fbatch);
341 u64 folio_start = folio_sector(folio);
342 u64 folio_end = folio_end_sector(folio);
344 BUG_ON(end <= folio_start);
346 *start = min(end, folio_end);
348 struct bch_folio *s = bch2_folio(folio);
350 unsigned folio_offset = max(*start, folio_start) - folio_start;
351 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
354 for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
355 i_sectors_delta -= s->s[j].state == SECTOR_dirty;
356 bch2_folio_sector_set(folio, s, j,
357 folio_sector_reserve(s->s[j].state));
359 spin_unlock(&s->lock);
364 folio_batch_release(&fbatch);
368 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
372 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
373 unsigned nr_replicas)
375 return max(0, (int) nr_replicas -
377 s->replicas_reserved);
380 int bch2_get_folio_disk_reservation(struct bch_fs *c,
381 struct bch_inode_info *inode,
382 struct folio *folio, bool check_enospc)
384 struct bch_folio *s = bch2_folio_create(folio, 0);
385 unsigned nr_replicas = inode_nr_replicas(c, inode);
386 struct disk_reservation disk_res = { 0 };
387 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
393 for (i = 0; i < sectors; i++)
394 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
396 if (!disk_res_sectors)
399 ret = bch2_disk_reservation_get(c, &disk_res,
402 ? BCH_DISK_RESERVATION_NOFAIL
407 for (i = 0; i < sectors; i++)
408 s->s[i].replicas_reserved +=
409 sectors_to_reserve(&s->s[i], nr_replicas);
414 void bch2_folio_reservation_put(struct bch_fs *c,
415 struct bch_inode_info *inode,
416 struct bch2_folio_reservation *res)
418 bch2_disk_reservation_put(c, &res->disk);
419 bch2_quota_reservation_put(c, inode, &res->quota);
422 int bch2_folio_reservation_get(struct bch_fs *c,
423 struct bch_inode_info *inode,
425 struct bch2_folio_reservation *res,
426 unsigned offset, unsigned len)
428 struct bch_folio *s = bch2_folio_create(folio, 0);
429 unsigned i, disk_sectors = 0, quota_sectors = 0;
435 BUG_ON(!s->uptodate);
437 for (i = round_down(offset, block_bytes(c)) >> 9;
438 i < round_up(offset + len, block_bytes(c)) >> 9;
440 disk_sectors += sectors_to_reserve(&s->s[i],
441 res->disk.nr_replicas);
442 quota_sectors += s->s[i].state == SECTOR_unallocated;
446 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
452 ret = bch2_quota_reservation_add(c, inode, &res->quota,
453 quota_sectors, true);
455 struct disk_reservation tmp = {
456 .sectors = disk_sectors
459 bch2_disk_reservation_put(c, &tmp);
460 res->disk.sectors -= disk_sectors;
468 static void bch2_clear_folio_bits(struct folio *folio)
470 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
471 struct bch_fs *c = inode->v.i_sb->s_fs_info;
472 struct bch_folio *s = bch2_folio(folio);
473 struct disk_reservation disk_res = { 0 };
474 int i, sectors = folio_sectors(folio), dirty_sectors = 0;
479 EBUG_ON(!folio_test_locked(folio));
480 EBUG_ON(folio_test_writeback(folio));
482 for (i = 0; i < sectors; i++) {
483 disk_res.sectors += s->s[i].replicas_reserved;
484 s->s[i].replicas_reserved = 0;
486 dirty_sectors -= s->s[i].state == SECTOR_dirty;
487 bch2_folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
490 bch2_disk_reservation_put(c, &disk_res);
492 bch2_i_sectors_acct(c, inode, NULL, dirty_sectors);
494 bch2_folio_release(folio);
497 void bch2_set_folio_dirty(struct bch_fs *c,
498 struct bch_inode_info *inode,
500 struct bch2_folio_reservation *res,
501 unsigned offset, unsigned len)
503 struct bch_folio *s = bch2_folio(folio);
504 unsigned i, dirty_sectors = 0;
506 WARN_ON((u64) folio_pos(folio) + offset + len >
507 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
509 BUG_ON(!s->uptodate);
513 for (i = round_down(offset, block_bytes(c)) >> 9;
514 i < round_up(offset + len, block_bytes(c)) >> 9;
516 unsigned sectors = sectors_to_reserve(&s->s[i],
517 res->disk.nr_replicas);
520 * This can happen if we race with the error path in
521 * bch2_writepage_io_done():
523 sectors = min_t(unsigned, sectors, res->disk.sectors);
525 s->s[i].replicas_reserved += sectors;
526 res->disk.sectors -= sectors;
528 dirty_sectors += s->s[i].state == SECTOR_unallocated;
530 bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
533 spin_unlock(&s->lock);
535 bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors);
537 if (!folio_test_dirty(folio))
538 filemap_dirty_folio(inode->v.i_mapping, folio);
541 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
543 struct file *file = vmf->vma->vm_file;
544 struct address_space *mapping = file->f_mapping;
545 struct address_space *fdm = faults_disabled_mapping();
546 struct bch_inode_info *inode = file_bch_inode(file);
550 return VM_FAULT_SIGBUS;
554 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
556 if (bch2_pagecache_add_tryget(inode))
559 bch2_pagecache_block_put(fdm_host);
561 bch2_pagecache_add_get(inode);
562 bch2_pagecache_add_put(inode);
564 bch2_pagecache_block_get(fdm_host);
566 /* Signal that lock has been dropped: */
567 set_fdm_dropped_locks();
568 return VM_FAULT_SIGBUS;
571 bch2_pagecache_add_get(inode);
573 ret = filemap_fault(vmf);
574 bch2_pagecache_add_put(inode);
579 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
581 struct folio *folio = page_folio(vmf->page);
582 struct file *file = vmf->vma->vm_file;
583 struct bch_inode_info *inode = file_bch_inode(file);
584 struct address_space *mapping = file->f_mapping;
585 struct bch_fs *c = inode->v.i_sb->s_fs_info;
586 struct bch2_folio_reservation res;
591 bch2_folio_reservation_init(c, inode, &res);
593 sb_start_pagefault(inode->v.i_sb);
594 file_update_time(file);
597 * Not strictly necessary, but helps avoid dio writes livelocking in
598 * bch2_write_invalidate_inode_pages_range() - can drop this if/when we get
599 * a bch2_write_invalidate_inode_pages_range() that works without dropping
600 * page lock before invalidating page
602 bch2_pagecache_add_get(inode);
605 isize = i_size_read(&inode->v);
607 if (folio->mapping != mapping || folio_pos(folio) >= isize) {
609 ret = VM_FAULT_NOPAGE;
613 len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
615 if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
616 bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
618 ret = VM_FAULT_SIGBUS;
622 bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
623 bch2_folio_reservation_put(c, inode, &res);
625 folio_wait_stable(folio);
626 ret = VM_FAULT_LOCKED;
628 bch2_pagecache_add_put(inode);
629 sb_end_pagefault(inode->v.i_sb);
634 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
636 if (offset || length < folio_size(folio))
639 bch2_clear_folio_bits(folio);
642 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
644 if (folio_test_dirty(folio) || folio_test_writeback(folio))
647 bch2_clear_folio_bits(folio);
653 static int folio_data_offset(struct folio *folio, loff_t pos,
654 unsigned min_replicas)
656 struct bch_folio *s = bch2_folio(folio);
657 unsigned i, sectors = folio_sectors(folio);
660 for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
661 if (s->s[i].state >= SECTOR_dirty &&
662 s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
663 return i << SECTOR_SHIFT;
668 loff_t bch2_seek_pagecache_data(struct inode *vinode,
671 unsigned min_replicas,
674 struct folio_batch fbatch;
675 pgoff_t start_index = start_offset >> PAGE_SHIFT;
676 pgoff_t end_index = end_offset >> PAGE_SHIFT;
677 pgoff_t index = start_index;
682 folio_batch_init(&fbatch);
684 while (filemap_get_folios(vinode->i_mapping,
685 &index, end_index, &fbatch)) {
686 for (i = 0; i < folio_batch_count(&fbatch); i++) {
687 struct folio *folio = fbatch.folios[i];
691 } else if (!folio_trylock(folio)) {
692 folio_batch_release(&fbatch);
696 offset = folio_data_offset(folio,
697 max(folio_pos(folio), start_offset),
700 ret = clamp(folio_pos(folio) + offset,
701 start_offset, end_offset);
703 folio_batch_release(&fbatch);
708 folio_batch_release(&fbatch);
716 * Search for a hole in a folio.
718 * The filemap layer returns -ENOENT if no folio exists, so reuse the same error
719 * code to indicate a pagecache hole exists at the returned offset. Otherwise
720 * return 0 if the folio is filled with data, or an error code. This function
721 * can return -EAGAIN if nonblock is specified.
723 static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
724 unsigned min_replicas, bool nonblock)
731 folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
732 FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
734 return PTR_ERR(folio);
736 s = bch2_folio(folio);
740 sectors = folio_sectors(folio);
741 for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
742 if (s->s[i].state < SECTOR_dirty ||
743 s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
744 *offset = max(*offset,
745 folio_pos(folio) + (i << SECTOR_SHIFT));
749 *offset = folio_end_pos(folio);
757 loff_t bch2_seek_pagecache_hole(struct inode *vinode,
760 unsigned min_replicas,
763 struct address_space *mapping = vinode->i_mapping;
764 loff_t offset = start_offset;
767 while (!ret && offset < end_offset)
768 ret = folio_hole_offset(mapping, &offset, min_replicas, nonblock);
770 if (ret && ret != -ENOENT)
772 return min(offset, end_offset);
775 int bch2_clamp_data_hole(struct inode *inode,
778 unsigned min_replicas,
783 ret = bch2_seek_pagecache_hole(inode,
784 *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
790 if (*hole_start == *hole_end)
793 ret = bch2_seek_pagecache_data(inode,
794 *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
802 #endif /* NO_BCACHEFS_FS */