1 // SPDX-License-Identifier: GPL-2.0
3 * KUnit test of ext4 multiblocks allocation.
6 #include <kunit/test.h>
7 #include <kunit/static_stub.h>
8 #include <linux/random.h>
13 struct buffer_head bitmap_bh;
14 /* desc and gd_bh are just the place holders for now */
15 struct ext4_group_desc desc;
16 struct buffer_head gd_bh;
20 struct mbt_grp_ctx *grp_ctx;
23 struct mbt_ext4_super_block {
24 struct ext4_super_block es;
25 struct ext4_sb_info sbi;
26 struct mbt_ctx mbt_ctx;
29 #define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
30 #define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
31 #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
33 static const struct super_operations mbt_sops = {
36 static void mbt_kill_sb(struct super_block *sb)
38 generic_shutdown_super(sb);
41 static struct file_system_type mbt_fs_type = {
42 .name = "mballoc test",
43 .kill_sb = mbt_kill_sb,
46 static int mbt_mb_init(struct super_block *sb)
51 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
52 sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
53 if (sb->s_bdev == NULL)
56 sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
57 if (sb->s_bdev->bd_queue == NULL) {
63 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
66 INIT_LIST_HEAD(&sb->s_inodes);
69 ret = ext4_mb_init(sb);
73 block = ext4_count_free_clusters(sb);
74 ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
79 ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
82 goto err_freeclusters;
87 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
91 kfree(sb->s_bdev->bd_queue);
96 static void mbt_mb_release(struct super_block *sb)
98 percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
99 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
101 kfree(sb->s_bdev->bd_queue);
105 static int mbt_set(struct super_block *sb, void *data)
110 static struct super_block *mbt_ext4_alloc_super_block(void)
112 struct mbt_ext4_super_block *fsb;
113 struct super_block *sb;
114 struct ext4_sb_info *sbi;
116 fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
120 sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
126 sbi->s_blockgroup_lock =
127 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
128 if (!sbi->s_blockgroup_lock)
131 bgl_lock_init(sbi->s_blockgroup_lock);
133 sbi->s_es = &fsb->es;
136 up_write(&sb->s_umount);
140 deactivate_locked_super(sb);
146 static void mbt_ext4_free_super_block(struct super_block *sb)
148 struct mbt_ext4_super_block *fsb = MBT_SB(sb);
149 struct ext4_sb_info *sbi = EXT4_SB(sb);
151 kfree(sbi->s_blockgroup_lock);
152 deactivate_super(sb);
156 struct mbt_ext4_block_layout {
157 unsigned char blocksize_bits;
158 unsigned int cluster_bits;
159 uint32_t blocks_per_group;
160 ext4_group_t group_count;
164 static void mbt_init_sb_layout(struct super_block *sb,
165 struct mbt_ext4_block_layout *layout)
167 struct ext4_sb_info *sbi = EXT4_SB(sb);
168 struct ext4_super_block *es = sbi->s_es;
170 sb->s_blocksize = 1UL << layout->blocksize_bits;
171 sb->s_blocksize_bits = layout->blocksize_bits;
173 sbi->s_groups_count = layout->group_count;
174 sbi->s_blocks_per_group = layout->blocks_per_group;
175 sbi->s_cluster_bits = layout->cluster_bits;
176 sbi->s_cluster_ratio = 1U << layout->cluster_bits;
177 sbi->s_clusters_per_group = layout->blocks_per_group >>
178 layout->cluster_bits;
179 sbi->s_desc_size = layout->desc_size;
180 sbi->s_desc_per_block_bits =
181 sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
182 sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
184 es->s_first_data_block = cpu_to_le32(0);
185 es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
186 layout->group_count);
189 static int mbt_grp_ctx_init(struct super_block *sb,
190 struct mbt_grp_ctx *grp_ctx)
192 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
194 grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
195 if (grp_ctx->bitmap_bh.b_data == NULL)
197 mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
198 ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
203 static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
205 kfree(grp_ctx->bitmap_bh.b_data);
206 grp_ctx->bitmap_bh.b_data = NULL;
209 static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
210 unsigned int start, unsigned int len)
212 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
214 mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
217 static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
219 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
221 return grp_ctx->bitmap_bh.b_data;
224 /* called after mbt_init_sb_layout */
225 static int mbt_ctx_init(struct super_block *sb)
227 struct mbt_ctx *ctx = MBT_CTX(sb);
228 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
230 ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
232 if (ctx->grp_ctx == NULL)
235 for (i = 0; i < ngroups; i++)
236 if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
240 * first data block(first cluster in first group) is used by
241 * metadata, mark it used to avoid to alloc data block at first
242 * block which will fail ext4_sb_block_valid check.
244 mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
245 ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
246 EXT4_CLUSTERS_PER_GROUP(sb) - 1);
251 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
256 static void mbt_ctx_release(struct super_block *sb)
258 struct mbt_ctx *ctx = MBT_CTX(sb);
259 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
261 for (i = 0; i < ngroups; i++)
262 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
266 static struct buffer_head *
267 ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
270 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
272 /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
273 get_bh(&grp_ctx->bitmap_bh);
274 return &grp_ctx->bitmap_bh;
277 static int ext4_wait_block_bitmap_stub(struct super_block *sb,
278 ext4_group_t block_group,
279 struct buffer_head *bh)
282 * real ext4_wait_block_bitmap will set these flags and
283 * functions like ext4_mb_init_cache will verify the flags.
285 set_buffer_uptodate(bh);
286 set_bitmap_uptodate(bh);
287 set_buffer_verified(bh);
291 static struct ext4_group_desc *
292 ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
293 struct buffer_head **bh)
295 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
298 *bh = &grp_ctx->gd_bh;
300 return &grp_ctx->desc;
304 ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
305 ext4_group_t group, ext4_grpblk_t blkoff,
306 ext4_grpblk_t len, int flags,
307 ext4_grpblk_t *ret_changed)
309 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
310 struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
313 mb_set_bits(bitmap_bh->b_data, blkoff, len);
315 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
320 #define TEST_GOAL_GROUP 1
321 static int mbt_kunit_init(struct kunit *test)
323 struct mbt_ext4_block_layout *layout =
324 (struct mbt_ext4_block_layout *)(test->param_value);
325 struct super_block *sb;
328 sb = mbt_ext4_alloc_super_block();
332 mbt_init_sb_layout(sb, layout);
334 ret = mbt_ctx_init(sb);
336 mbt_ext4_free_super_block(sb);
341 kunit_activate_static_stub(test,
342 ext4_read_block_bitmap_nowait,
343 ext4_read_block_bitmap_nowait_stub);
344 kunit_activate_static_stub(test,
345 ext4_wait_block_bitmap,
346 ext4_wait_block_bitmap_stub);
347 kunit_activate_static_stub(test,
349 ext4_get_group_desc_stub);
350 kunit_activate_static_stub(test,
351 ext4_mb_mark_context,
352 ext4_mb_mark_context_stub);
354 /* stub function will be called in mbt_mb_init->ext4_mb_init */
355 if (mbt_mb_init(sb) != 0) {
357 mbt_ext4_free_super_block(sb);
364 static void mbt_kunit_exit(struct kunit *test)
366 struct super_block *sb = (struct super_block *)test->priv;
370 mbt_ext4_free_super_block(sb);
373 static void test_new_blocks_simple(struct kunit *test)
375 struct super_block *sb = (struct super_block *)test->priv;
377 struct ext4_allocation_request ar;
378 ext4_group_t i, goal_group = TEST_GOAL_GROUP;
381 struct ext4_sb_info *sbi = EXT4_SB(sb);
383 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
390 /* get block at goal */
391 ar.goal = ext4_group_first_block_no(sb, goal_group);
392 found = ext4_mb_new_blocks_simple(&ar, &err);
393 KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
394 "failed to alloc block at goal, expected %llu found %llu",
397 /* get block after goal in goal group */
398 ar.goal = ext4_group_first_block_no(sb, goal_group);
399 found = ext4_mb_new_blocks_simple(&ar, &err);
400 KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
401 "failed to alloc block after goal in goal group, expected %llu found %llu",
404 /* get block after goal group */
405 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
406 ar.goal = ext4_group_first_block_no(sb, goal_group);
407 found = ext4_mb_new_blocks_simple(&ar, &err);
408 KUNIT_ASSERT_EQ_MSG(test,
409 ext4_group_first_block_no(sb, goal_group + 1), found,
410 "failed to alloc block after goal group, expected %llu found %llu",
411 ext4_group_first_block_no(sb, goal_group + 1), found);
413 /* get block before goal group */
414 for (i = goal_group; i < ext4_get_groups_count(sb); i++)
415 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
416 ar.goal = ext4_group_first_block_no(sb, goal_group);
417 found = ext4_mb_new_blocks_simple(&ar, &err);
418 KUNIT_ASSERT_EQ_MSG(test,
419 ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
420 "failed to alloc block before goal group, expected %llu found %llu",
421 ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
423 /* no block available, fail to allocate block */
424 for (i = 0; i < ext4_get_groups_count(sb); i++)
425 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
426 ar.goal = ext4_group_first_block_no(sb, goal_group);
427 found = ext4_mb_new_blocks_simple(&ar, &err);
428 KUNIT_ASSERT_NE_MSG(test, err, 0,
429 "unexpectedly get block when no block is available");
432 #define TEST_RANGE_COUNT 8
440 mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
443 ext4_grpblk_t start, len, max;
446 max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
447 for (i = 0; i < count; i++) {
448 start = get_random_u32() % max;
449 len = get_random_u32() % max;
450 len = min(len, max - start);
452 ranges[i].start = start + i * max;
458 validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
459 ext4_group_t goal_group, ext4_grpblk_t start,
463 ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
466 for (i = 0; i < ext4_get_groups_count(sb); i++) {
470 bitmap = mbt_ctx_bitmap(sb, i);
471 bit = mb_find_next_zero_bit(bitmap, max, 0);
472 KUNIT_ASSERT_EQ_MSG(test, bit, max,
473 "free block on unexpected group %d", i);
476 bitmap = mbt_ctx_bitmap(sb, goal_group);
477 bit = mb_find_next_zero_bit(bitmap, max, 0);
478 KUNIT_ASSERT_EQ(test, bit, start);
480 bit = mb_find_next_bit(bitmap, max, bit + 1);
481 KUNIT_ASSERT_EQ(test, bit, start + len);
485 test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
486 ext4_grpblk_t start, ext4_grpblk_t len)
488 struct super_block *sb = (struct super_block *)test->priv;
489 struct ext4_sb_info *sbi = EXT4_SB(sb);
493 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
501 block = ext4_group_first_block_no(sb, goal_group) +
502 EXT4_C2B(sbi, start);
503 ext4_free_blocks_simple(inode, block, len);
504 validate_free_blocks_simple(test, sb, goal_group, start, len);
505 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
508 static void test_free_blocks_simple(struct kunit *test)
510 struct super_block *sb = (struct super_block *)test->priv;
511 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
513 struct test_range ranges[TEST_RANGE_COUNT];
515 for (i = 0; i < ext4_get_groups_count(sb); i++)
516 mbt_ctx_mark_used(sb, i, 0, max);
518 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
519 for (i = 0; i < TEST_RANGE_COUNT; i++)
520 test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
521 ranges[i].start, ranges[i].len);
525 test_mark_diskspace_used_range(struct kunit *test,
526 struct ext4_allocation_context *ac,
530 struct super_block *sb = (struct super_block *)test->priv;
533 ext4_grpblk_t i, max;
535 /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
539 ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
540 ac->ac_b_ex.fe_start = start;
541 ac->ac_b_ex.fe_len = len;
543 bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
544 memset(bitmap, 0, sb->s_blocksize);
545 ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
546 KUNIT_ASSERT_EQ(test, ret, 0);
548 max = EXT4_CLUSTERS_PER_GROUP(sb);
549 i = mb_find_next_bit(bitmap, max, 0);
550 KUNIT_ASSERT_EQ(test, i, start);
551 i = mb_find_next_zero_bit(bitmap, max, i + 1);
552 KUNIT_ASSERT_EQ(test, i, start + len);
553 i = mb_find_next_bit(bitmap, max, i + 1);
554 KUNIT_ASSERT_EQ(test, max, i);
557 static void test_mark_diskspace_used(struct kunit *test)
559 struct super_block *sb = (struct super_block *)test->priv;
561 struct ext4_allocation_context ac;
562 struct test_range ranges[TEST_RANGE_COUNT];
565 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
567 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
572 ac.ac_status = AC_STATUS_FOUND;
575 for (i = 0; i < TEST_RANGE_COUNT; i++)
576 test_mark_diskspace_used_range(test, &ac, ranges[i].start,
580 static void mbt_generate_buddy(struct super_block *sb, void *buddy,
581 void *bitmap, struct ext4_group_info *grp)
583 struct ext4_sb_info *sbi = EXT4_SB(sb);
588 memset(buddy, 0xff, sb->s_blocksize);
589 memset(grp, 0, offsetof(struct ext4_group_info,
590 bb_counters[MB_NUM_ORDERS(sb)]));
593 max = EXT4_CLUSTERS_PER_GROUP(sb);
594 bb_h = buddy + sbi->s_mb_offsets[1];
596 off = mb_find_next_zero_bit(bb, max, 0);
597 grp->bb_first_free = off;
599 grp->bb_counters[0]++;
602 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
604 grp->bb_counters[0]--;
605 mb_clear_bit(off >> 1, bb_h);
606 grp->bb_counters[1]++;
607 grp->bb_largest_free_order = 1;
611 off = mb_find_next_zero_bit(bb, max, off + 1);
614 for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
615 bb = buddy + sbi->s_mb_offsets[order];
616 bb_h = buddy + sbi->s_mb_offsets[order + 1];
618 off = mb_find_next_zero_bit(bb, max, 0);
621 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
622 mb_set_bits(bb, off, 2);
623 grp->bb_counters[order] -= 2;
624 mb_clear_bit(off >> 1, bb_h);
625 grp->bb_counters[order + 1]++;
626 grp->bb_largest_free_order = order + 1;
630 off = mb_find_next_zero_bit(bb, max, off + 1);
634 max = EXT4_CLUSTERS_PER_GROUP(sb);
635 off = mb_find_next_zero_bit(bitmap, max, 0);
639 off = mb_find_next_bit(bitmap, max, off + 1);
643 off = mb_find_next_zero_bit(bitmap, max, off + 1);
648 mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
649 struct ext4_group_info *grp2)
651 struct super_block *sb = (struct super_block *)test->priv;
654 KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
655 grp2->bb_first_free);
656 KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
658 KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
659 KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
660 grp2->bb_largest_free_order);
662 for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
663 KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
664 grp2->bb_counters[i],
665 "bb_counters[%d] diffs, expected %d, generated %d",
666 i, grp1->bb_counters[i],
667 grp2->bb_counters[i]);
672 do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
673 void *mbt_buddy, struct ext4_group_info *mbt_grp,
674 void *ext4_buddy, struct ext4_group_info *ext4_grp)
678 mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
680 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
681 ext4_grp->bb_counters[i] = 0;
682 /* needed by validation in ext4_mb_generate_buddy */
683 ext4_grp->bb_free = mbt_grp->bb_free;
684 memset(ext4_buddy, 0xff, sb->s_blocksize);
685 ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
688 KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
690 mbt_validate_group_info(test, mbt_grp, ext4_grp);
693 static void test_mb_generate_buddy(struct kunit *test)
695 struct super_block *sb = (struct super_block *)test->priv;
696 void *bitmap, *expected_bb, *generate_bb;
697 struct ext4_group_info *expected_grp, *generate_grp;
698 struct test_range ranges[TEST_RANGE_COUNT];
701 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
702 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
703 expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
704 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
705 generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
706 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
707 expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
708 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
710 generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
711 KUNIT_ASSERT_NOT_NULL(test, generate_grp);
713 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
714 for (i = 0; i < TEST_RANGE_COUNT; i++) {
715 mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
716 do_test_generate_buddy(test, sb, bitmap, expected_bb,
717 expected_grp, generate_bb, generate_grp);
722 test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
723 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
724 void *buddy, struct ext4_group_info *grp)
726 struct super_block *sb = (struct super_block *)test->priv;
727 struct ext4_free_extent ex;
730 /* mb_mark_used only accepts non-zero len */
736 ex.fe_group = TEST_GOAL_GROUP;
738 ext4_lock_group(sb, TEST_GOAL_GROUP);
739 mb_mark_used(e4b, &ex);
740 ext4_unlock_group(sb, TEST_GOAL_GROUP);
742 mb_set_bits(bitmap, start, len);
743 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
745 memset(buddy, 0xff, sb->s_blocksize);
746 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
747 grp->bb_counters[i] = 0;
748 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
750 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
752 mbt_validate_group_info(test, grp, e4b->bd_info);
755 static void test_mb_mark_used(struct kunit *test)
757 struct ext4_buddy e4b;
758 struct super_block *sb = (struct super_block *)test->priv;
759 void *bitmap, *buddy;
760 struct ext4_group_info *grp;
762 struct test_range ranges[TEST_RANGE_COUNT];
765 /* buddy cache assumes that each page contains at least one block */
766 if (sb->s_blocksize > PAGE_SIZE)
767 kunit_skip(test, "blocksize exceeds pagesize");
769 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
771 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
772 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
773 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
774 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
776 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
777 KUNIT_ASSERT_EQ(test, ret, 0);
779 grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
780 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
781 for (i = 0; i < TEST_RANGE_COUNT; i++)
782 test_mb_mark_used_range(test, &e4b, ranges[i].start,
783 ranges[i].len, bitmap, buddy, grp);
785 ext4_mb_unload_buddy(&e4b);
789 test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
790 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
791 void *buddy, struct ext4_group_info *grp)
793 struct super_block *sb = (struct super_block *)test->priv;
796 /* mb_free_blocks will WARN if len is 0 */
800 ext4_lock_group(sb, e4b->bd_group);
801 mb_free_blocks(NULL, e4b, start, len);
802 ext4_unlock_group(sb, e4b->bd_group);
804 mb_clear_bits(bitmap, start, len);
805 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
807 memset(buddy, 0xff, sb->s_blocksize);
808 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
809 grp->bb_counters[i] = 0;
810 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
812 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
814 mbt_validate_group_info(test, grp, e4b->bd_info);
818 static void test_mb_free_blocks(struct kunit *test)
820 struct ext4_buddy e4b;
821 struct super_block *sb = (struct super_block *)test->priv;
822 void *bitmap, *buddy;
823 struct ext4_group_info *grp;
824 struct ext4_free_extent ex;
827 struct test_range ranges[TEST_RANGE_COUNT];
829 /* buddy cache assumes that each page contains at least one block */
830 if (sb->s_blocksize > PAGE_SIZE)
831 kunit_skip(test, "blocksize exceeds pagesize");
833 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
834 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
835 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
836 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
837 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
838 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
840 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
841 KUNIT_ASSERT_EQ(test, ret, 0);
844 ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
845 ex.fe_group = TEST_GOAL_GROUP;
847 ext4_lock_group(sb, TEST_GOAL_GROUP);
848 mb_mark_used(&e4b, &ex);
849 ext4_unlock_group(sb, TEST_GOAL_GROUP);
852 memset(bitmap, 0xff, sb->s_blocksize);
854 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
855 for (i = 0; i < TEST_RANGE_COUNT; i++)
856 test_mb_free_blocks_range(test, &e4b, ranges[i].start,
857 ranges[i].len, bitmap, buddy, grp);
859 ext4_mb_unload_buddy(&e4b);
862 static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
864 .blocksize_bits = 10,
866 .blocks_per_group = 8192,
871 .blocksize_bits = 12,
873 .blocks_per_group = 8192,
878 .blocksize_bits = 16,
880 .blocks_per_group = 8192,
886 static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
889 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
890 "blocks_per_group=%d group_count=%d desc_size=%d\n",
891 layout->blocksize_bits, layout->cluster_bits,
892 layout->blocks_per_group, layout->group_count,
895 KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
897 static struct kunit_case mbt_test_cases[] = {
898 KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
899 KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
900 KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
901 KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
902 KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
903 KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
907 static struct kunit_suite mbt_test_suite = {
908 .name = "ext4_mballoc_test",
909 .init = mbt_kunit_init,
910 .exit = mbt_kunit_exit,
911 .test_cases = mbt_test_cases,
914 kunit_test_suites(&mbt_test_suite);
916 MODULE_LICENSE("GPL");