1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
6 #include "btrfs_inode.h"
9 * Subpage (sectorsize < PAGE_SIZE) support overview:
13 * - Only support 64K page size for now
14 * This is to make metadata handling easier, as 64K page would ensure
15 * all nodesize would fit inside one page, thus we don't need to handle
16 * cases where a tree block crosses several pages.
18 * - Only metadata read-write for now
19 * The data read-write part is in development.
21 * - Metadata can't cross 64K page boundary
22 * btrfs-progs and kernel have done that for a while, thus only ancient
23 * filesystems could have such problem. For such case, do a graceful
29 * Metadata read is fully supported.
30 * Meaning when reading one tree block will only trigger the read for the
31 * needed range, other unrelated range in the same page will not be touched.
33 * Metadata write support is partial.
34 * The writeback is still for the full page, but we will only submit
35 * the dirty extent buffers in the page.
37 * This means, if we have a metadata page like this:
41 * |/////////| |///////////|
42 * \- Tree block A \- Tree block B
44 * Even if we just want to writeback tree block A, we will also writeback
45 * tree block B if it's also dirty.
47 * This may cause extra metadata writeback which results more COW.
52 * Both metadata and data will use a new structure, btrfs_subpage, to
53 * record the status of each sector inside a page. This provides the extra
57 * Since we have multiple tree blocks inside one page, we can't rely on page
58 * locking anymore, or we will have greatly reduced concurrency or even
59 * deadlocks (hold one tree lock while trying to lock another tree lock in
62 * Thus for metadata locking, subpage support relies on io_tree locking only.
63 * This means a slightly higher tree locking latency.
66 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
68 if (fs_info->sectorsize >= PAGE_SIZE)
72 * Only data pages (either through DIO or compression) can have no
73 * mapping. And if page->mapping->host is data inode, it's subpage.
74 * As we have ruled our sectorsize >= PAGE_SIZE case already.
76 if (!page->mapping || !page->mapping->host ||
77 is_data_inode(page->mapping->host))
81 * Now the only remaining case is metadata, which we only go subpage
82 * routine if nodesize < PAGE_SIZE.
84 if (fs_info->nodesize < PAGE_SIZE)
89 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
94 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
96 nr_bits = PAGE_SIZE / sectorsize;
97 subpage_info->bitmap_nr_bits = nr_bits;
99 subpage_info->uptodate_offset = cur;
102 subpage_info->error_offset = cur;
105 subpage_info->dirty_offset = cur;
108 subpage_info->writeback_offset = cur;
111 subpage_info->ordered_offset = cur;
114 subpage_info->checked_offset = cur;
117 subpage_info->total_nr_bits = cur;
120 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
121 struct page *page, enum btrfs_subpage_type type)
123 struct btrfs_subpage *subpage;
126 * We have cases like a dummy extent buffer page, which is not mappped
127 * and doesn't need to be locked.
130 ASSERT(PageLocked(page));
132 /* Either not subpage, or the page already has private attached */
133 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
136 subpage = btrfs_alloc_subpage(fs_info, type);
138 return PTR_ERR(subpage);
140 attach_page_private(page, subpage);
144 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
147 struct btrfs_subpage *subpage;
149 /* Either not subpage, or already detached */
150 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
153 subpage = detach_page_private(page);
155 btrfs_free_subpage(subpage);
158 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
159 enum btrfs_subpage_type type)
161 struct btrfs_subpage *ret;
162 unsigned int real_size;
164 ASSERT(fs_info->sectorsize < PAGE_SIZE);
166 real_size = struct_size(ret, bitmaps,
167 BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
168 ret = kzalloc(real_size, GFP_NOFS);
170 return ERR_PTR(-ENOMEM);
172 spin_lock_init(&ret->lock);
173 if (type == BTRFS_SUBPAGE_METADATA) {
174 atomic_set(&ret->eb_refs, 0);
176 atomic_set(&ret->readers, 0);
177 atomic_set(&ret->writers, 0);
182 void btrfs_free_subpage(struct btrfs_subpage *subpage)
188 * Increase the eb_refs of current subpage.
190 * This is important for eb allocation, to prevent race with last eb freeing
192 * With the eb_refs increased before the eb inserted into radix tree,
193 * detach_extent_buffer_page() won't detach the page private while we're still
194 * allocating the extent buffer.
196 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
199 struct btrfs_subpage *subpage;
201 if (!btrfs_is_subpage(fs_info, page))
204 ASSERT(PagePrivate(page) && page->mapping);
205 lockdep_assert_held(&page->mapping->private_lock);
207 subpage = (struct btrfs_subpage *)page->private;
208 atomic_inc(&subpage->eb_refs);
211 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
214 struct btrfs_subpage *subpage;
216 if (!btrfs_is_subpage(fs_info, page))
219 ASSERT(PagePrivate(page) && page->mapping);
220 lockdep_assert_held(&page->mapping->private_lock);
222 subpage = (struct btrfs_subpage *)page->private;
223 ASSERT(atomic_read(&subpage->eb_refs));
224 atomic_dec(&subpage->eb_refs);
227 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
228 struct page *page, u64 start, u32 len)
231 ASSERT(PagePrivate(page) && page->private);
232 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
233 IS_ALIGNED(len, fs_info->sectorsize));
235 * The range check only works for mapped page, we can still have
236 * unmapped page like dummy extent buffer pages.
239 ASSERT(page_offset(page) <= start &&
240 start + len <= page_offset(page) + PAGE_SIZE);
243 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
244 struct page *page, u64 start, u32 len)
246 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
247 const int nbits = len >> fs_info->sectorsize_bits;
249 btrfs_subpage_assert(fs_info, page, start, len);
251 atomic_add(nbits, &subpage->readers);
254 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
255 struct page *page, u64 start, u32 len)
257 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
258 const int nbits = len >> fs_info->sectorsize_bits;
262 btrfs_subpage_assert(fs_info, page, start, len);
263 is_data = is_data_inode(page->mapping->host);
264 ASSERT(atomic_read(&subpage->readers) >= nbits);
265 last = atomic_sub_and_test(nbits, &subpage->readers);
268 * For data we need to unlock the page if the last read has finished.
270 * And please don't replace @last with atomic_sub_and_test() call
271 * inside if () condition.
272 * As we want the atomic_sub_and_test() to be always executed.
278 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
280 u64 orig_start = *start;
283 *start = max_t(u64, page_offset(page), orig_start);
285 * For certain call sites like btrfs_drop_pages(), we may have pages
286 * beyond the target range. In that case, just set @len to 0, subpage
287 * helpers can handle @len == 0 without any problem.
289 if (page_offset(page) >= orig_start + orig_len)
292 *len = min_t(u64, page_offset(page) + PAGE_SIZE,
293 orig_start + orig_len) - *start;
296 void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
297 struct page *page, u64 start, u32 len)
299 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
300 const int nbits = (len >> fs_info->sectorsize_bits);
303 btrfs_subpage_assert(fs_info, page, start, len);
305 ASSERT(atomic_read(&subpage->readers) == 0);
306 ret = atomic_add_return(nbits, &subpage->writers);
307 ASSERT(ret == nbits);
310 bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
311 struct page *page, u64 start, u32 len)
313 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
314 const int nbits = (len >> fs_info->sectorsize_bits);
316 btrfs_subpage_assert(fs_info, page, start, len);
319 * We have call sites passing @lock_page into
320 * extent_clear_unlock_delalloc() for compression path.
322 * This @locked_page is locked by plain lock_page(), thus its
323 * subpage::writers is 0. Handle them in a special way.
325 if (atomic_read(&subpage->writers) == 0)
328 ASSERT(atomic_read(&subpage->writers) >= nbits);
329 return atomic_sub_and_test(nbits, &subpage->writers);
333 * Lock a page for delalloc page writeback.
335 * Return -EAGAIN if the page is not properly initialized.
336 * Return 0 with the page locked, and writer counter updated.
338 * Even with 0 returned, the page still need extra check to make sure
339 * it's really the correct page, as the caller is using
340 * find_get_pages_contig(), which can race with page invalidating.
342 int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
343 struct page *page, u64 start, u32 len)
345 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
350 if (!PagePrivate(page) || !page->private) {
354 btrfs_subpage_clamp_range(page, &start, &len);
355 btrfs_subpage_start_writer(fs_info, page, start, len);
359 void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
360 struct page *page, u64 start, u32 len)
362 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
363 return unlock_page(page);
364 btrfs_subpage_clamp_range(page, &start, &len);
365 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
369 static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
372 unsigned int found_zero;
374 found_zero = find_next_zero_bit(addr, start + nbits, start);
375 if (found_zero == start + nbits)
380 static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
383 unsigned int found_set;
385 found_set = find_next_bit(addr, start + nbits, start);
386 if (found_set == start + nbits)
391 #define subpage_calc_start_bit(fs_info, page, name, start, len) \
393 unsigned int start_bit; \
395 btrfs_subpage_assert(fs_info, page, start, len); \
396 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
397 start_bit += fs_info->subpage_info->name##_offset; \
401 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
402 bitmap_test_range_all_set(subpage->bitmaps, \
403 fs_info->subpage_info->name##_offset, \
404 fs_info->subpage_info->bitmap_nr_bits)
406 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
407 bitmap_test_range_all_zero(subpage->bitmaps, \
408 fs_info->subpage_info->name##_offset, \
409 fs_info->subpage_info->bitmap_nr_bits)
411 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
412 struct page *page, u64 start, u32 len)
414 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
415 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
416 uptodate, start, len);
419 spin_lock_irqsave(&subpage->lock, flags);
420 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
421 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
422 SetPageUptodate(page);
423 spin_unlock_irqrestore(&subpage->lock, flags);
426 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
427 struct page *page, u64 start, u32 len)
429 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
430 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
431 uptodate, start, len);
434 spin_lock_irqsave(&subpage->lock, flags);
435 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
436 ClearPageUptodate(page);
437 spin_unlock_irqrestore(&subpage->lock, flags);
440 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
441 struct page *page, u64 start, u32 len)
443 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
444 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
448 spin_lock_irqsave(&subpage->lock, flags);
449 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
451 spin_unlock_irqrestore(&subpage->lock, flags);
454 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
455 struct page *page, u64 start, u32 len)
457 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
458 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
462 spin_lock_irqsave(&subpage->lock, flags);
463 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
464 if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
465 ClearPageError(page);
466 spin_unlock_irqrestore(&subpage->lock, flags);
469 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
470 struct page *page, u64 start, u32 len)
472 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
473 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
477 spin_lock_irqsave(&subpage->lock, flags);
478 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
479 spin_unlock_irqrestore(&subpage->lock, flags);
480 set_page_dirty(page);
484 * Extra clear_and_test function for subpage dirty bitmap.
486 * Return true if we're the last bits in the dirty_bitmap and clear the
488 * Return false otherwise.
490 * NOTE: Callers should manually clear page dirty for true case, as we have
491 * extra handling for tree blocks.
493 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
494 struct page *page, u64 start, u32 len)
496 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
497 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
502 spin_lock_irqsave(&subpage->lock, flags);
503 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
504 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
506 spin_unlock_irqrestore(&subpage->lock, flags);
510 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
511 struct page *page, u64 start, u32 len)
515 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
517 clear_page_dirty_for_io(page);
520 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
521 struct page *page, u64 start, u32 len)
523 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
524 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
525 writeback, start, len);
528 spin_lock_irqsave(&subpage->lock, flags);
529 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
530 set_page_writeback(page);
531 spin_unlock_irqrestore(&subpage->lock, flags);
534 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
535 struct page *page, u64 start, u32 len)
537 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
538 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
539 writeback, start, len);
542 spin_lock_irqsave(&subpage->lock, flags);
543 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
544 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
545 ASSERT(PageWriteback(page));
546 end_page_writeback(page);
548 spin_unlock_irqrestore(&subpage->lock, flags);
551 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
552 struct page *page, u64 start, u32 len)
554 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
555 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
556 ordered, start, len);
559 spin_lock_irqsave(&subpage->lock, flags);
560 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
561 SetPageOrdered(page);
562 spin_unlock_irqrestore(&subpage->lock, flags);
565 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
566 struct page *page, u64 start, u32 len)
568 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
569 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
570 ordered, start, len);
573 spin_lock_irqsave(&subpage->lock, flags);
574 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
575 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
576 ClearPageOrdered(page);
577 spin_unlock_irqrestore(&subpage->lock, flags);
580 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
581 struct page *page, u64 start, u32 len)
583 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
584 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
585 checked, start, len);
588 spin_lock_irqsave(&subpage->lock, flags);
589 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
590 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
591 SetPageChecked(page);
592 spin_unlock_irqrestore(&subpage->lock, flags);
595 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
596 struct page *page, u64 start, u32 len)
598 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
599 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
600 checked, start, len);
603 spin_lock_irqsave(&subpage->lock, flags);
604 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
605 ClearPageChecked(page);
606 spin_unlock_irqrestore(&subpage->lock, flags);
610 * Unlike set/clear which is dependent on each page status, for test all bits
611 * are tested in the same way.
613 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
614 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
615 struct page *page, u64 start, u32 len) \
617 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
618 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
620 unsigned long flags; \
623 spin_lock_irqsave(&subpage->lock, flags); \
624 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
625 len >> fs_info->sectorsize_bits); \
626 spin_unlock_irqrestore(&subpage->lock, flags); \
629 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
630 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
631 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
632 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
633 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
634 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
637 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
638 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
639 * back to regular sectorsize branch.
641 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
643 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
644 struct page *page, u64 start, u32 len) \
646 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
647 set_page_func(page); \
650 btrfs_subpage_set_##name(fs_info, page, start, len); \
652 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
653 struct page *page, u64 start, u32 len) \
655 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
656 clear_page_func(page); \
659 btrfs_subpage_clear_##name(fs_info, page, start, len); \
661 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
662 struct page *page, u64 start, u32 len) \
664 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
665 return test_page_func(page); \
666 return btrfs_subpage_test_##name(fs_info, page, start, len); \
668 void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
669 struct page *page, u64 start, u32 len) \
671 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
672 set_page_func(page); \
675 btrfs_subpage_clamp_range(page, &start, &len); \
676 btrfs_subpage_set_##name(fs_info, page, start, len); \
678 void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
679 struct page *page, u64 start, u32 len) \
681 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
682 clear_page_func(page); \
685 btrfs_subpage_clamp_range(page, &start, &len); \
686 btrfs_subpage_clear_##name(fs_info, page, start, len); \
688 bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
689 struct page *page, u64 start, u32 len) \
691 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
692 return test_page_func(page); \
693 btrfs_subpage_clamp_range(page, &start, &len); \
694 return btrfs_subpage_test_##name(fs_info, page, start, len); \
696 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
698 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
699 IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
701 IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
703 IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
705 IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
708 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
711 void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
714 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
716 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
719 ASSERT(!PageDirty(page));
720 if (!btrfs_is_subpage(fs_info, page))
723 ASSERT(PagePrivate(page) && page->private);
724 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
728 * Handle different locked pages with different page sizes:
730 * - Page locked by plain lock_page()
731 * It should not have any subpage::writers count.
732 * Can be unlocked by unlock_page().
733 * This is the most common locked page for __extent_writepage() called
734 * inside extent_write_cache_pages() or extent_write_full_page().
735 * Rarer cases include the @locked_page from extent_write_locked_range().
737 * - Page locked by lock_delalloc_pages()
738 * There is only one caller, all pages except @locked_page for
739 * extent_write_locked_range().
740 * In this case, we have to call subpage helper to handle the case.
742 void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
745 struct btrfs_subpage *subpage;
747 ASSERT(PageLocked(page));
748 /* For non-subpage case, we just unlock the page */
749 if (!btrfs_is_subpage(fs_info, page))
750 return unlock_page(page);
752 ASSERT(PagePrivate(page) && page->private);
753 subpage = (struct btrfs_subpage *)page->private;
756 * For subpage case, there are two types of locked page. With or
757 * without writers number.
759 * Since we own the page lock, no one else could touch subpage::writers
760 * and we are safe to do several atomic operations without spinlock.
762 if (atomic_read(&subpage->writers) == 0)
763 /* No writers, locked by plain lock_page() */
764 return unlock_page(page);
766 /* Have writers, use proper subpage helper to end it */
767 btrfs_page_end_writer_lock(fs_info, page, start, len);