1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2013 Fusion IO. All rights reserved.
6 #include <linux/pagemap.h>
7 #include <linux/pagevec.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/sizes.h>
11 #include "btrfs-tests.h"
13 #include "../extent_io.h"
14 #include "../btrfs_inode.h"
16 #define PROCESS_UNLOCK (1 << 0)
17 #define PROCESS_RELEASE (1 << 1)
18 #define PROCESS_TEST_LOCKED (1 << 2)
20 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
24 struct folio_batch fbatch;
25 unsigned long index = start >> PAGE_SHIFT;
26 unsigned long end_index = end >> PAGE_SHIFT;
31 folio_batch_init(&fbatch);
33 while (index <= end_index) {
34 ret = filemap_get_folios_contig(inode->i_mapping, &index,
36 for (i = 0; i < ret; i++) {
37 struct folio *folio = fbatch.folios[i];
39 if (flags & PROCESS_TEST_LOCKED &&
40 !folio_test_locked(folio))
42 if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
44 if (flags & PROCESS_RELEASE)
47 folio_batch_release(&fbatch);
52 "stuck in a loop, start %llu, end %llu, ret %d\n",
61 #define STATE_FLAG_STR_LEN 256
63 #define PRINT_ONE_FLAG(state, dest, cur, name) \
65 if (state->state & EXTENT_##name) \
66 cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \
67 "%s" #name, cur == 0 ? "" : "|"); \
70 static void extent_flag_to_str(const struct extent_state *state, char *dest)
75 PRINT_ONE_FLAG(state, dest, cur, DIRTY);
76 PRINT_ONE_FLAG(state, dest, cur, UPTODATE);
77 PRINT_ONE_FLAG(state, dest, cur, LOCKED);
78 PRINT_ONE_FLAG(state, dest, cur, NEW);
79 PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
80 PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
81 PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
82 PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
83 PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
84 PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
85 PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
86 PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
87 PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
90 static void dump_extent_io_tree(const struct extent_io_tree *tree)
93 char flags_str[STATE_FLAG_STR_LEN];
95 node = rb_first(&tree->state);
96 test_msg("io tree content:");
98 struct extent_state *state;
100 state = rb_entry(node, struct extent_state, rb_node);
101 extent_flag_to_str(state, flags_str);
102 test_msg(" start=%llu len=%llu flags=%s", state->start,
103 state->end + 1 - state->start, flags_str);
104 node = rb_next(node);
108 static int test_find_delalloc(u32 sectorsize)
111 struct extent_io_tree *tmp;
113 struct page *locked_page = NULL;
114 unsigned long index = 0;
115 /* In this test we need at least 2 file extents at its maximum size */
116 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
117 u64 total_dirty = 2 * max_bytes;
118 u64 start, end, test_start;
122 test_msg("running find delalloc tests");
124 inode = btrfs_new_test_inode();
126 test_std_err(TEST_ALLOC_INODE);
129 tmp = &BTRFS_I(inode)->io_tree;
132 * Passing NULL as we don't have fs_info but tracepoints are not used
135 extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
138 * First go through and create and mark all of our pages dirty, we pin
139 * everything to make sure our pages don't get evicted and screw up our
142 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
143 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
145 test_err("failed to allocate test page");
158 /* Test this scenario
162 set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
164 end = start + PAGE_SIZE - 1;
165 found = find_lock_delalloc_range(inode, locked_page, &start,
168 test_err("should have found at least one delalloc");
171 if (start != 0 || end != (sectorsize - 1)) {
172 test_err("expected start 0 end %u, got start %llu end %llu",
173 sectorsize - 1, start, end);
176 unlock_extent(tmp, start, end, NULL);
177 unlock_page(locked_page);
178 put_page(locked_page);
187 locked_page = find_lock_page(inode->i_mapping,
188 test_start >> PAGE_SHIFT);
190 test_err("couldn't find the locked page");
193 set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
195 end = start + PAGE_SIZE - 1;
196 found = find_lock_delalloc_range(inode, locked_page, &start,
199 test_err("couldn't find delalloc in our range");
202 if (start != test_start || end != max_bytes - 1) {
203 test_err("expected start %llu end %llu, got start %llu, end %llu",
204 test_start, max_bytes - 1, start, end);
207 if (process_page_range(inode, start, end,
208 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
209 test_err("there were unlocked pages in the range");
212 unlock_extent(tmp, start, end, NULL);
213 /* locked_page was unlocked above */
214 put_page(locked_page);
221 test_start = max_bytes + sectorsize;
222 locked_page = find_lock_page(inode->i_mapping, test_start >>
225 test_err("couldn't find the locked page");
229 end = start + PAGE_SIZE - 1;
230 found = find_lock_delalloc_range(inode, locked_page, &start,
233 test_err("found range when we shouldn't have");
236 if (end != test_start + PAGE_SIZE - 1) {
237 test_err("did not return the proper end offset");
243 * [------- delalloc -------|
244 * [max_bytes]|-- search--|
246 * We are re-using our test_start from above since it works out well.
248 set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
250 end = start + PAGE_SIZE - 1;
251 found = find_lock_delalloc_range(inode, locked_page, &start,
254 test_err("didn't find our range");
257 if (start != test_start || end != total_dirty - 1) {
258 test_err("expected start %llu end %llu, got start %llu end %llu",
259 test_start, total_dirty - 1, start, end);
262 if (process_page_range(inode, start, end,
263 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
264 test_err("pages in range were not all locked");
267 unlock_extent(tmp, start, end, NULL);
270 * Now to test where we run into a page that is no longer dirty in the
271 * range we want to find.
273 page = find_get_page(inode->i_mapping,
274 (max_bytes + SZ_1M) >> PAGE_SHIFT);
276 test_err("couldn't find our page");
279 ClearPageDirty(page);
282 /* We unlocked it in the previous test */
283 lock_page(locked_page);
285 end = start + PAGE_SIZE - 1;
287 * Currently if we fail to find dirty pages in the delalloc range we
288 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
289 * this changes at any point in the future we will need to fix this
290 * tests expected behavior.
292 found = find_lock_delalloc_range(inode, locked_page, &start,
295 test_err("didn't find our range");
298 if (start != test_start && end != test_start + PAGE_SIZE - 1) {
299 test_err("expected start %llu end %llu, got start %llu end %llu",
300 test_start, test_start + PAGE_SIZE - 1, start, end);
303 if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
305 test_err("pages in range were not all locked");
311 dump_extent_io_tree(tmp);
312 clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
315 put_page(locked_page);
316 process_page_range(inode, 0, total_dirty - 1,
317 PROCESS_UNLOCK | PROCESS_RELEASE);
322 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
326 for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
329 bit = !!test_bit(i, bitmap);
330 bit1 = !!extent_buffer_test_bit(eb, 0, i);
335 read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
336 expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
339 "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
340 i, i / BITS_PER_BYTE, has, expect);
344 bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
350 read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
351 expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
354 "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
355 i / BITS_PER_BYTE, i % BITS_PER_BYTE,
356 i / BITS_PER_BYTE, has, expect);
363 static int test_bitmap_set(const char *name, unsigned long *bitmap,
364 struct extent_buffer *eb,
365 unsigned long byte_start, unsigned long bit_start,
366 unsigned long bit_len)
370 bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
371 extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len);
372 ret = check_eb_bitmap(bitmap, eb);
374 test_err("%s test failed", name);
378 static int test_bitmap_clear(const char *name, unsigned long *bitmap,
379 struct extent_buffer *eb,
380 unsigned long byte_start, unsigned long bit_start,
381 unsigned long bit_len)
385 bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
386 extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len);
387 ret = check_eb_bitmap(bitmap, eb);
389 test_err("%s test failed", name);
392 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
395 unsigned long byte_len = eb->len;
399 ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0,
400 byte_len * BITS_PER_BYTE);
404 ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE);
408 ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0,
409 byte_len * BITS_PER_BYTE);
413 ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4);
417 ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1);
421 ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8);
425 ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24);
429 ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4);
433 ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20);
437 /* Straddling pages test */
438 if (byte_len > PAGE_SIZE) {
439 ret = test_bitmap_set("cross page set", bitmap, eb,
440 PAGE_SIZE - sizeof(long) / 2, 0,
441 sizeof(long) * BITS_PER_BYTE);
445 ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0,
446 byte_len * BITS_PER_BYTE);
450 ret = test_bitmap_clear("cross page clear", bitmap, eb,
451 PAGE_SIZE - sizeof(long) / 2, 0,
452 sizeof(long) * BITS_PER_BYTE);
458 * Generate a wonky pseudo-random bit pattern for the sake of not using
459 * something repetitive that could miss some hypothetical off-by-n bug.
462 ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
463 byte_len * BITS_PER_BYTE);
467 for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
468 x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
469 for (j = 0; j < 32; j++) {
471 bitmap_set(bitmap, i * 32 + j, 1);
472 extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
477 ret = check_eb_bitmap(bitmap, eb);
479 test_err("random bit pattern failed");
486 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
488 struct btrfs_fs_info *fs_info;
489 unsigned long *bitmap = NULL;
490 struct extent_buffer *eb = NULL;
493 test_msg("running extent buffer bitmap tests");
495 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
497 test_std_err(TEST_ALLOC_FS_INFO);
501 bitmap = kmalloc(nodesize, GFP_KERNEL);
503 test_err("couldn't allocate test bitmap");
508 eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
510 test_std_err(TEST_ALLOC_ROOT);
515 ret = __test_eb_bitmaps(bitmap, eb);
519 free_extent_buffer(eb);
522 * Test again for case where the tree block is sectorsize aligned but
523 * not nodesize aligned.
525 eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
527 test_std_err(TEST_ALLOC_ROOT);
532 ret = __test_eb_bitmaps(bitmap, eb);
534 free_extent_buffer(eb);
536 btrfs_free_dummy_fs_info(fs_info);
540 static int test_find_first_clear_extent_bit(void)
542 struct extent_io_tree tree;
546 test_msg("running find_first_clear_extent_bit test");
548 extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
550 /* Test correct handling of empty tree */
551 find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
552 if (start != 0 || end != -1) {
554 "error getting a range from completely empty tree: start %llu end %llu",
559 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
562 set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
563 CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
565 find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
566 CHUNK_TRIMMED | CHUNK_ALLOCATED);
568 if (start != 0 || end != SZ_1M - 1) {
569 test_err("error finding beginning range: start %llu end %llu",
574 /* Now add 32M-64M so that we have a hole between 4M-32M */
575 set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
576 CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
579 * Request first hole starting at 12M, we should get 4M-32M
581 find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
582 CHUNK_TRIMMED | CHUNK_ALLOCATED);
584 if (start != SZ_4M || end != SZ_32M - 1) {
585 test_err("error finding trimmed range: start %llu end %llu",
591 * Search in the middle of allocated range, should get the next one
592 * available, which happens to be unallocated -> 4M-32M
594 find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
595 CHUNK_TRIMMED | CHUNK_ALLOCATED);
597 if (start != SZ_4M || end != SZ_32M - 1) {
598 test_err("error finding next unalloc range: start %llu end %llu",
604 * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
605 * being unset in this range, we should get the entry in range 64M-72M
607 set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
608 find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
611 if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
612 test_err("error finding exact range: start %llu end %llu",
617 find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
621 * Search in the middle of set range whose immediate neighbour doesn't
622 * have the bits set so it must be returned
624 if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
625 test_err("error finding next alloc range: start %llu end %llu",
631 * Search beyond any known range, shall return after last known range
632 * and end should be -1
634 find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
635 if (start != SZ_64M + SZ_8M || end != -1) {
637 "error handling beyond end of range search: start %llu end %llu",
645 dump_extent_io_tree(&tree);
646 clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
651 static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory,
652 const char *test_name)
654 for (int i = 0; i < eb->len; i++) {
655 struct page *page = eb->pages[i >> PAGE_SHIFT];
656 void *addr = page_address(page) + offset_in_page(i);
658 if (memcmp(addr, memory + i, 1) != 0) {
659 test_err("%s failed", test_name);
660 test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
661 i, *(u8 *)addr, *(u8 *)(memory + i));
667 static int verify_eb_and_memory(struct extent_buffer *eb, void *memory,
668 const char *test_name)
670 for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) {
671 void *eb_addr = page_address(eb->pages[i]);
673 if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
674 dump_eb_and_memory_contents(eb, memory, test_name);
682 * Init both memory and extent buffer contents to the same randomly generated
685 static void init_eb_and_memory(struct extent_buffer *eb, void *memory)
687 get_random_bytes(memory, eb->len);
688 write_extent_buffer(eb, memory, 0, eb->len);
691 static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
693 struct btrfs_fs_info *fs_info;
694 struct extent_buffer *eb = NULL;
698 test_msg("running extent buffer memory operation tests");
700 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
702 test_std_err(TEST_ALLOC_FS_INFO);
706 memory = kvzalloc(nodesize, GFP_KERNEL);
708 test_err("failed to allocate memory");
713 eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize);
715 test_std_err(TEST_ALLOC_EXTENT_BUFFER);
720 init_eb_and_memory(eb, memory);
721 ret = verify_eb_and_memory(eb, memory, "full eb write");
725 memcpy(memory, memory + 16, 16);
726 memcpy_extent_buffer(eb, 0, 16, 16);
727 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1");
731 memcpy(memory, memory + 2048, 16);
732 memcpy_extent_buffer(eb, 0, 2048, 16);
733 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2");
736 memcpy(memory, memory + 2048, 2048);
737 memcpy_extent_buffer(eb, 0, 2048, 2048);
738 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3");
742 memmove(memory + 512, memory + 256, 512);
743 memmove_extent_buffer(eb, 512, 256, 512);
744 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1");
748 memmove(memory + 2048, memory + 512, 2048);
749 memmove_extent_buffer(eb, 2048, 512, 2048);
750 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2");
753 memmove(memory + 512, memory + 2048, 2048);
754 memmove_extent_buffer(eb, 512, 2048, 2048);
755 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3");
759 if (nodesize > PAGE_SIZE) {
760 memcpy(memory, memory + 4096 - 128, 256);
761 memcpy_extent_buffer(eb, 0, 4096 - 128, 256);
762 ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1");
766 memcpy(memory + 4096 - 128, memory + 4096 + 128, 256);
767 memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256);
768 ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2");
772 memmove(memory + 4096 - 128, memory + 4096 - 64, 256);
773 memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256);
774 ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1");
778 memmove(memory + 4096 - 64, memory + 4096 - 128, 256);
779 memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256);
780 ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2");
785 free_extent_buffer(eb);
787 btrfs_free_dummy_fs_info(fs_info);
791 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
795 test_msg("running extent I/O tests");
797 ret = test_find_delalloc(sectorsize);
801 ret = test_find_first_clear_extent_bit();
805 ret = test_eb_bitmaps(sectorsize, nodesize);
809 ret = test_eb_mem_ops(sectorsize, nodesize);