2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/quotaops.h>
18 #include <linux/buffer_head.h>
20 #include "ext4_jbd2.h"
23 #include <trace/events/ext4.h>
25 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
26 ext4_group_t block_group);
28 * balloc.c contains the blocks allocation and deallocation routines
32 * Calculate block group number for a given block number
34 ext4_group_t ext4_get_group_number(struct super_block *sb,
39 if (test_opt2(sb, STD_GROUP_SIZE))
41 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
42 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
44 ext4_get_group_no_and_offset(sb, block, &group, NULL);
49 * Calculate the block group number and offset into the block/cluster
50 * allocation bitmap, given a block number
52 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
53 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
58 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
59 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
60 EXT4_SB(sb)->s_cluster_bits;
69 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
72 static inline int ext4_block_in_group(struct super_block *sb,
74 ext4_group_t block_group)
76 ext4_group_t actual_group;
78 actual_group = ext4_get_group_number(sb, block);
79 return (actual_group == block_group) ? 1 : 0;
82 /* Return the number of clusters used for file system metadata; this
83 * represents the overhead needed by the file system.
85 static unsigned ext4_num_overhead_clusters(struct super_block *sb,
86 ext4_group_t block_group,
87 struct ext4_group_desc *gdp)
89 unsigned num_clusters;
90 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
91 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
92 ext4_fsblk_t itbl_blk;
93 struct ext4_sb_info *sbi = EXT4_SB(sb);
95 /* This is the number of clusters used by the superblock,
96 * block group descriptors, and reserved block group
97 * descriptor blocks */
98 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
101 * For the allocation bitmaps and inode table, we first need
102 * to check to see if the block is in the block group. If it
103 * is, then check to see if the cluster is already accounted
104 * for in the clusters used for the base metadata cluster, or
105 * if we can increment the base metadata cluster to include
106 * that block. Otherwise, we will have to track the cluster
107 * used for the allocation bitmap or inode table explicitly.
108 * Normally all of these blocks are contiguous, so the special
109 * case handling shouldn't be necessary except for *very*
110 * unusual file system layouts.
112 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
113 block_cluster = EXT4_B2C(sbi,
114 ext4_block_bitmap(sb, gdp) - start);
115 if (block_cluster < num_clusters)
117 else if (block_cluster == num_clusters) {
123 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
124 inode_cluster = EXT4_B2C(sbi,
125 ext4_inode_bitmap(sb, gdp) - start);
126 if (inode_cluster < num_clusters)
128 else if (inode_cluster == num_clusters) {
134 itbl_blk = ext4_inode_table(sb, gdp);
135 for (i = 0; i < sbi->s_itb_per_group; i++) {
136 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
137 c = EXT4_B2C(sbi, itbl_blk + i - start);
138 if ((c < num_clusters) || (c == inode_cluster) ||
139 (c == block_cluster) || (c == itbl_cluster))
141 if (c == num_clusters) {
150 if (block_cluster != -1)
152 if (inode_cluster != -1)
158 static unsigned int num_clusters_in_group(struct super_block *sb,
159 ext4_group_t block_group)
163 if (block_group == ext4_get_groups_count(sb) - 1) {
165 * Even though mke2fs always initializes the first and
166 * last group, just in case some other tool was used,
167 * we need to make sure we calculate the right free
170 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
171 ext4_group_first_block_no(sb, block_group);
173 blocks = EXT4_BLOCKS_PER_GROUP(sb);
174 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
177 /* Initializes an uninitialized block bitmap */
178 static int ext4_init_block_bitmap(struct super_block *sb,
179 struct buffer_head *bh,
180 ext4_group_t block_group,
181 struct ext4_group_desc *gdp)
183 unsigned int bit, bit_max;
184 struct ext4_sb_info *sbi = EXT4_SB(sb);
185 ext4_fsblk_t start, tmp;
186 struct ext4_group_info *grp;
188 J_ASSERT_BH(bh, buffer_locked(bh));
190 /* If checksum is bad mark all blocks used to prevent allocation
191 * essentially implementing a per-group read-only flag. */
192 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
193 grp = ext4_get_group_info(sb, block_group);
194 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
195 percpu_counter_sub(&sbi->s_freeclusters_counter,
197 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
198 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
200 count = ext4_free_inodes_count(sb, gdp);
201 percpu_counter_sub(&sbi->s_freeinodes_counter,
204 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
207 memset(bh->b_data, 0, sb->s_blocksize);
209 bit_max = ext4_num_base_meta_clusters(sb, block_group);
210 if ((bit_max >> 3) >= bh->b_size)
211 return -EFSCORRUPTED;
213 for (bit = 0; bit < bit_max; bit++)
214 ext4_set_bit(bit, bh->b_data);
216 start = ext4_group_first_block_no(sb, block_group);
218 /* Set bits for block and inode bitmaps, and inode table */
219 tmp = ext4_block_bitmap(sb, gdp);
220 if (ext4_block_in_group(sb, tmp, block_group))
221 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
223 tmp = ext4_inode_bitmap(sb, gdp);
224 if (ext4_block_in_group(sb, tmp, block_group))
225 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
227 tmp = ext4_inode_table(sb, gdp);
228 for (; tmp < ext4_inode_table(sb, gdp) +
229 sbi->s_itb_per_group; tmp++) {
230 if (ext4_block_in_group(sb, tmp, block_group))
231 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
235 * Also if the number of blocks within the group is less than
236 * the blocksize * 8 ( which is the size of bitmap ), set rest
237 * of the block bitmap to 1
239 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
240 sb->s_blocksize * 8, bh->b_data);
244 /* Return the number of free blocks in a block group. It is used when
245 * the block bitmap is uninitialized, so we can't just count the bits
247 unsigned ext4_free_clusters_after_init(struct super_block *sb,
248 ext4_group_t block_group,
249 struct ext4_group_desc *gdp)
251 return num_clusters_in_group(sb, block_group) -
252 ext4_num_overhead_clusters(sb, block_group, gdp);
256 * The free blocks are managed by bitmaps. A file system contains several
257 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
258 * block for inodes, N blocks for the inode table and data blocks.
260 * The file system contains group descriptors which are located after the
261 * super block. Each descriptor contains the number of the bitmap block and
262 * the free blocks count in the block. The descriptors are loaded in memory
263 * when a file system is mounted (see ext4_fill_super).
267 * ext4_get_group_desc() -- load group descriptor from disk
269 * @block_group: given block group
270 * @bh: pointer to the buffer head to store the block
273 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
274 ext4_group_t block_group,
275 struct buffer_head **bh)
277 unsigned int group_desc;
279 ext4_group_t ngroups = ext4_get_groups_count(sb);
280 struct ext4_group_desc *desc;
281 struct ext4_sb_info *sbi = EXT4_SB(sb);
282 struct buffer_head *bh_p;
284 if (block_group >= ngroups) {
285 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
286 " groups_count = %u", block_group, ngroups);
291 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
292 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
293 bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
295 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
296 * the pointer being dereferenced won't be dereferenced again. By
297 * looking at the usage in add_new_gdb() the value isn't modified,
298 * just the pointer, and so it remains valid.
301 ext4_error(sb, "Group descriptor not loaded - "
302 "block_group = %u, group_desc = %u, desc = %u",
303 block_group, group_desc, offset);
307 desc = (struct ext4_group_desc *)(
308 (__u8 *)bh_p->b_data +
309 offset * EXT4_DESC_SIZE(sb));
316 * Return the block number which was discovered to be invalid, or 0 if
317 * the block bitmap is valid.
319 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
320 struct ext4_group_desc *desc,
321 ext4_group_t block_group,
322 struct buffer_head *bh)
324 struct ext4_sb_info *sbi = EXT4_SB(sb);
325 ext4_grpblk_t offset;
326 ext4_grpblk_t next_zero_bit;
327 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
329 ext4_fsblk_t group_first_block;
331 if (ext4_has_feature_flex_bg(sb)) {
332 /* with FLEX_BG, the inode/block bitmaps and itable
333 * blocks may not be in the group at all
334 * so the bitmap validation will be skipped for those groups
335 * or it has to also read the block group where the bitmaps
336 * are located to verify they are set.
340 group_first_block = ext4_group_first_block_no(sb, block_group);
342 /* check whether block bitmap block number is set */
343 blk = ext4_block_bitmap(sb, desc);
344 offset = blk - group_first_block;
345 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
346 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
347 /* bad block bitmap */
350 /* check whether the inode bitmap block number is set */
351 blk = ext4_inode_bitmap(sb, desc);
352 offset = blk - group_first_block;
353 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
354 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
355 /* bad block bitmap */
358 /* check whether the inode table block number is set */
359 blk = ext4_inode_table(sb, desc);
360 offset = blk - group_first_block;
361 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
362 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
364 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
365 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
366 EXT4_B2C(sbi, offset));
368 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
369 /* bad bitmap for inode tables */
374 static int ext4_validate_block_bitmap(struct super_block *sb,
375 struct ext4_group_desc *desc,
376 ext4_group_t block_group,
377 struct buffer_head *bh)
380 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
381 struct ext4_sb_info *sbi = EXT4_SB(sb);
383 if (buffer_verified(bh))
385 if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
386 return -EFSCORRUPTED;
388 ext4_lock_group(sb, block_group);
389 if (buffer_verified(bh))
391 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
393 ext4_unlock_group(sb, block_group);
394 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
395 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
396 percpu_counter_sub(&sbi->s_freeclusters_counter,
398 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
401 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
402 if (unlikely(blk != 0)) {
403 ext4_unlock_group(sb, block_group);
404 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
406 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
407 percpu_counter_sub(&sbi->s_freeclusters_counter,
409 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
410 return -EFSCORRUPTED;
412 set_buffer_verified(bh);
414 ext4_unlock_group(sb, block_group);
419 * ext4_read_block_bitmap_nowait()
421 * @block_group: given block group
423 * Read the bitmap for a given block_group,and validate the
424 * bits for block/inode/inode tables are set in the bitmaps
426 * Return buffer_head on success or NULL in case of failure.
429 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
431 struct ext4_group_desc *desc;
432 struct ext4_sb_info *sbi = EXT4_SB(sb);
433 struct buffer_head *bh;
434 ext4_fsblk_t bitmap_blk;
437 desc = ext4_get_group_desc(sb, block_group, NULL);
439 return ERR_PTR(-EFSCORRUPTED);
440 bitmap_blk = ext4_block_bitmap(sb, desc);
441 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
442 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
443 ext4_error(sb, "Invalid block bitmap block %llu in "
444 "block_group %u", bitmap_blk, block_group);
445 return ERR_PTR(-EFSCORRUPTED);
447 bh = sb_getblk(sb, bitmap_blk);
449 ext4_error(sb, "Cannot get buffer for block bitmap - "
450 "block_group = %u, block_bitmap = %llu",
451 block_group, bitmap_blk);
452 return ERR_PTR(-ENOMEM);
455 if (bitmap_uptodate(bh))
459 if (bitmap_uptodate(bh)) {
463 ext4_lock_group(sb, block_group);
464 if (ext4_has_group_desc_csum(sb) &&
465 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
466 if (block_group == 0) {
467 ext4_unlock_group(sb, block_group);
469 ext4_error(sb, "Block bitmap for bg 0 marked "
474 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
475 set_bitmap_uptodate(bh);
476 set_buffer_uptodate(bh);
477 set_buffer_verified(bh);
478 ext4_unlock_group(sb, block_group);
481 ext4_error(sb, "Failed to init block bitmap for group "
482 "%u: %d", block_group, err);
487 ext4_unlock_group(sb, block_group);
488 if (buffer_uptodate(bh)) {
490 * if not uninit if bh is uptodate,
491 * bitmap is also uptodate
493 set_bitmap_uptodate(bh);
498 * submit the buffer_head for reading
501 trace_ext4_read_block_bitmap_load(sb, block_group);
502 bh->b_end_io = ext4_end_bitmap_read;
504 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
507 err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
516 /* Returns 0 on success, 1 on error */
517 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
518 struct buffer_head *bh)
520 struct ext4_group_desc *desc;
524 desc = ext4_get_group_desc(sb, block_group, NULL);
526 return -EFSCORRUPTED;
528 if (!buffer_uptodate(bh)) {
529 ext4_error(sb, "Cannot read block bitmap - "
530 "block_group = %u, block_bitmap = %llu",
531 block_group, (unsigned long long) bh->b_blocknr);
534 clear_buffer_new(bh);
535 /* Panic or remount fs read-only if block bitmap is invalid */
536 return ext4_validate_block_bitmap(sb, desc, block_group, bh);
540 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
542 struct buffer_head *bh;
545 bh = ext4_read_block_bitmap_nowait(sb, block_group);
548 err = ext4_wait_block_bitmap(sb, block_group, bh);
557 * ext4_has_free_clusters()
558 * @sbi: in-core super block structure.
559 * @nclusters: number of needed blocks
560 * @flags: flags from ext4_mb_new_blocks()
562 * Check if filesystem has nclusters free & available for allocation.
563 * On success return 1, return 0 on failure.
565 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
566 s64 nclusters, unsigned int flags)
568 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
569 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
570 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
572 free_clusters = percpu_counter_read_positive(fcc);
573 dirty_clusters = percpu_counter_read_positive(dcc);
574 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
577 * r_blocks_count should always be multiple of the cluster ratio so
578 * we are safe to do a plane bit shift only.
580 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
583 if (free_clusters - (nclusters + rsv + dirty_clusters) <
584 EXT4_FREECLUSTERS_WATERMARK) {
585 free_clusters = percpu_counter_sum_positive(fcc);
586 dirty_clusters = percpu_counter_sum_positive(dcc);
588 /* Check whether we have space after accounting for current
589 * dirty clusters & root reserved clusters.
591 if (free_clusters >= (rsv + nclusters + dirty_clusters))
594 /* Hm, nope. Are (enough) root reserved clusters available? */
595 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
596 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
597 capable(CAP_SYS_RESOURCE) ||
598 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
600 if (free_clusters >= (nclusters + dirty_clusters +
604 /* No free blocks. Let's see if we can dip into reserved pool */
605 if (flags & EXT4_MB_USE_RESERVED) {
606 if (free_clusters >= (nclusters + dirty_clusters))
613 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
614 s64 nclusters, unsigned int flags)
616 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
617 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
624 * ext4_should_retry_alloc()
626 * @retries number of attemps has been made
628 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
629 * it is profitable to retry the operation, this function will wait
630 * for the current or committing transaction to complete, and then
633 * if the total number of retries exceed three times, return FALSE.
635 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
637 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
639 !EXT4_SB(sb)->s_journal)
642 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
645 if (EXT4_SB(sb)->s_mb_free_pending)
646 jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
651 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
653 * @handle: handle to this transaction
655 * @goal: given target block(filesystem wide)
656 * @count: pointer to total number of clusters needed
659 * Return 1st allocated block number on success, *count stores total account
660 * error stores in errp pointer
662 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
663 ext4_fsblk_t goal, unsigned int flags,
664 unsigned long *count, int *errp)
666 struct ext4_allocation_request ar;
669 memset(&ar, 0, sizeof(ar));
670 /* Fill with neighbour allocated blocks */
673 ar.len = count ? *count : 1;
676 ret = ext4_mb_new_blocks(handle, &ar, errp);
680 * Account for the allocated meta blocks. We will never
681 * fail EDQUOT for metdata, but we do account for it.
683 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
684 dquot_alloc_block_nofail(inode,
685 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
691 * ext4_count_free_clusters() -- count filesystem free clusters
694 * Adds up the number of free clusters from each block group.
696 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
698 ext4_fsblk_t desc_count;
699 struct ext4_group_desc *gdp;
701 ext4_group_t ngroups = ext4_get_groups_count(sb);
702 struct ext4_group_info *grp;
704 struct ext4_super_block *es;
705 ext4_fsblk_t bitmap_count;
707 struct buffer_head *bitmap_bh = NULL;
709 es = EXT4_SB(sb)->s_es;
714 for (i = 0; i < ngroups; i++) {
715 gdp = ext4_get_group_desc(sb, i, NULL);
719 if (EXT4_SB(sb)->s_group_info)
720 grp = ext4_get_group_info(sb, i);
721 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
722 desc_count += ext4_free_group_clusters(sb, gdp);
724 bitmap_bh = ext4_read_block_bitmap(sb, i);
725 if (IS_ERR(bitmap_bh)) {
730 x = ext4_count_free(bitmap_bh->b_data,
731 EXT4_CLUSTERS_PER_GROUP(sb) / 8);
732 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
733 i, ext4_free_group_clusters(sb, gdp), x);
737 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
738 ", computed = %llu, %llu\n",
739 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
740 desc_count, bitmap_count);
744 for (i = 0; i < ngroups; i++) {
745 gdp = ext4_get_group_desc(sb, i, NULL);
749 if (EXT4_SB(sb)->s_group_info)
750 grp = ext4_get_group_info(sb, i);
751 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
752 desc_count += ext4_free_group_clusters(sb, gdp);
759 static inline int test_root(ext4_group_t a, int b)
773 * ext4_bg_has_super - number of blocks used by the superblock in group
774 * @sb: superblock for filesystem
775 * @group: group number to check
777 * Return the number of blocks used by the superblock (primary or backup)
778 * in this group. Currently this will be only 0 or 1.
780 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
782 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
786 if (ext4_has_feature_sparse_super2(sb)) {
787 if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
788 group == le32_to_cpu(es->s_backup_bgs[1]))
792 if ((group <= 1) || !ext4_has_feature_sparse_super(sb))
796 if (test_root(group, 3) || (test_root(group, 5)) ||
803 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
806 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
807 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
808 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
810 if (group == first || group == first + 1 || group == last)
815 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
818 if (!ext4_bg_has_super(sb, group))
821 if (ext4_has_feature_meta_bg(sb))
822 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
824 return EXT4_SB(sb)->s_gdb_count;
828 * ext4_bg_num_gdb - number of blocks used by the group table in group
829 * @sb: superblock for filesystem
830 * @group: group number to check
832 * Return the number of blocks used by the group descriptor table
833 * (primary or backup) in this group. In the future there may be a
834 * different number of descriptor blocks in each group.
836 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
838 unsigned long first_meta_bg =
839 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
840 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
842 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg)
843 return ext4_bg_num_gdb_nometa(sb, group);
845 return ext4_bg_num_gdb_meta(sb,group);
850 * This function returns the number of file system metadata clusters at
851 * the beginning of a block group, including the reserved gdt blocks.
853 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
854 ext4_group_t block_group)
856 struct ext4_sb_info *sbi = EXT4_SB(sb);
859 /* Check for superblock and gdt backups in this group */
860 num = ext4_bg_has_super(sb, block_group);
862 if (!ext4_has_feature_meta_bg(sb) ||
863 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
864 sbi->s_desc_per_block) {
866 num += ext4_bg_num_gdb(sb, block_group);
867 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
869 } else { /* For META_BG_BLOCK_GROUPS */
870 num += ext4_bg_num_gdb(sb, block_group);
872 return EXT4_NUM_B2C(sbi, num);
875 * ext4_inode_to_goal_block - return a hint for block allocation
876 * @inode: inode for block allocation
878 * Return the ideal location to start allocating blocks for a
879 * newly created inode.
881 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
883 struct ext4_inode_info *ei = EXT4_I(inode);
884 ext4_group_t block_group;
885 ext4_grpblk_t colour;
886 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
887 ext4_fsblk_t bg_start;
888 ext4_fsblk_t last_block;
890 block_group = ei->i_block_group;
891 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
893 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
894 * block groups per flexgroup, reserve the first block
895 * group for directories and special files. Regular
896 * files will start at the second block group. This
897 * tends to speed up directory access and improves
900 block_group &= ~(flex_size-1);
901 if (S_ISREG(inode->i_mode))
904 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
905 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
908 * If we are doing delayed allocation, we don't need take
909 * colour into account.
911 if (test_opt(inode->i_sb, DELALLOC))
914 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
915 colour = (current->pid % 16) *
916 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
918 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
919 return bg_start + colour;