1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/ialloc.c
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
10 * BSD ufs-inspired inode and directory allocation by
11 * Stephen Tweedie (sct@redhat.com), 1993
12 * Big-endian to little-endian byte-swapping/bitmaps by
13 * David S. Miller (davem@caip.rutgers.edu), 1995
16 #include <linux/time.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <linux/cred.h>
27 #include <asm/byteorder.h>
30 #include "ext4_jbd2.h"
34 #include <trace/events/ext4.h>
37 * ialloc.c contains the inodes allocation and deallocation routines
41 * The free inodes are managed by bitmaps. A file system contains several
42 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
43 * block for inodes, N blocks for the inode table and data blocks.
45 * The file system contains group descriptors which are located after the
46 * super block. Each descriptor contains the number of the bitmap block and
47 * the free blocks count in the block.
51 * To avoid calling the atomic setbit hundreds or thousands of times, we only
52 * need to use it within a single byte (to ensure we get endianness right).
53 * We can use memset for the rest of the bitmap as there are no other users.
55 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
59 if (start_bit >= end_bit)
62 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
63 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
64 ext4_set_bit(i, bitmap);
66 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
69 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
72 set_buffer_uptodate(bh);
73 set_bitmap_uptodate(bh);
79 static int ext4_validate_inode_bitmap(struct super_block *sb,
80 struct ext4_group_desc *desc,
81 ext4_group_t block_group,
82 struct buffer_head *bh)
85 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
86 struct ext4_sb_info *sbi = EXT4_SB(sb);
88 if (buffer_verified(bh))
90 if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
93 ext4_lock_group(sb, block_group);
94 if (buffer_verified(bh))
96 blk = ext4_inode_bitmap(sb, desc);
97 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
98 EXT4_INODES_PER_GROUP(sb) / 8)) {
99 ext4_unlock_group(sb, block_group);
100 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
101 "inode_bitmap = %llu", block_group, blk);
102 grp = ext4_get_group_info(sb, block_group);
103 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
105 count = ext4_free_inodes_count(sb, desc);
106 percpu_counter_sub(&sbi->s_freeinodes_counter,
109 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
112 set_buffer_verified(bh);
114 ext4_unlock_group(sb, block_group);
119 * Read the inode allocation bitmap for a given block_group, reading
120 * into the specified slot in the superblock's bitmap cache.
122 * Return buffer_head of bitmap on success or NULL.
124 static struct buffer_head *
125 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
127 struct ext4_group_desc *desc;
128 struct ext4_sb_info *sbi = EXT4_SB(sb);
129 struct buffer_head *bh = NULL;
130 ext4_fsblk_t bitmap_blk;
133 desc = ext4_get_group_desc(sb, block_group, NULL);
135 return ERR_PTR(-EFSCORRUPTED);
137 bitmap_blk = ext4_inode_bitmap(sb, desc);
138 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
139 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
140 ext4_error(sb, "Invalid inode bitmap blk %llu in "
141 "block_group %u", bitmap_blk, block_group);
142 return ERR_PTR(-EFSCORRUPTED);
144 bh = sb_getblk(sb, bitmap_blk);
146 ext4_error(sb, "Cannot read inode bitmap - "
147 "block_group = %u, inode_bitmap = %llu",
148 block_group, bitmap_blk);
149 return ERR_PTR(-EIO);
151 if (bitmap_uptodate(bh))
155 if (bitmap_uptodate(bh)) {
160 ext4_lock_group(sb, block_group);
161 if (ext4_has_group_desc_csum(sb) &&
162 (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
163 if (block_group == 0) {
164 ext4_unlock_group(sb, block_group);
166 ext4_error(sb, "Inode bitmap for bg 0 marked "
171 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
172 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
173 sb->s_blocksize * 8, bh->b_data);
174 set_bitmap_uptodate(bh);
175 set_buffer_uptodate(bh);
176 set_buffer_verified(bh);
177 ext4_unlock_group(sb, block_group);
181 ext4_unlock_group(sb, block_group);
183 if (buffer_uptodate(bh)) {
185 * if not uninit if bh is uptodate,
186 * bitmap is also uptodate
188 set_bitmap_uptodate(bh);
193 * submit the buffer_head for reading
195 trace_ext4_load_inode_bitmap(sb, block_group);
196 bh->b_end_io = ext4_end_bitmap_read;
198 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
200 if (!buffer_uptodate(bh)) {
202 ext4_error(sb, "Cannot read inode bitmap - "
203 "block_group = %u, inode_bitmap = %llu",
204 block_group, bitmap_blk);
205 return ERR_PTR(-EIO);
209 err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
219 * NOTE! When we get the inode, we're the only people
220 * that have access to it, and as such there are no
221 * race conditions we have to worry about. The inode
222 * is not on the hash-lists, and it cannot be reached
223 * through the filesystem because the directory entry
224 * has been deleted earlier.
226 * HOWEVER: we must make sure that we get no aliases,
227 * which means that we have to call "clear_inode()"
228 * _before_ we mark the inode not in use in the inode
229 * bitmaps. Otherwise a newly created file might use
230 * the same inode number (not actually the same pointer
231 * though), and then we'd have two inodes sharing the
232 * same inode number and space on the harddisk.
234 void ext4_free_inode(handle_t *handle, struct inode *inode)
236 struct super_block *sb = inode->i_sb;
239 struct buffer_head *bitmap_bh = NULL;
240 struct buffer_head *bh2;
241 ext4_group_t block_group;
243 struct ext4_group_desc *gdp;
244 struct ext4_super_block *es;
245 struct ext4_sb_info *sbi;
246 int fatal = 0, err, count, cleared;
247 struct ext4_group_info *grp;
250 printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
251 "nonexistent device\n", __func__, __LINE__);
254 if (atomic_read(&inode->i_count) > 1) {
255 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
256 __func__, __LINE__, inode->i_ino,
257 atomic_read(&inode->i_count));
260 if (inode->i_nlink) {
261 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
262 __func__, __LINE__, inode->i_ino, inode->i_nlink);
268 ext4_debug("freeing inode %lu\n", ino);
269 trace_ext4_free_inode(inode);
272 * Note: we must free any quota before locking the superblock,
273 * as writing the quota to disk may need the lock as well.
275 dquot_initialize(inode);
276 dquot_free_inode(inode);
279 is_directory = S_ISDIR(inode->i_mode);
281 /* Do this BEFORE marking the inode not in use or returning an error */
282 ext4_clear_inode(inode);
284 es = EXT4_SB(sb)->s_es;
285 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
286 ext4_error(sb, "reserved or nonexistent inode %lu", ino);
289 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
290 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
291 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
292 /* Don't bother if the inode bitmap is corrupt. */
293 grp = ext4_get_group_info(sb, block_group);
294 if (IS_ERR(bitmap_bh)) {
295 fatal = PTR_ERR(bitmap_bh);
299 if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
300 fatal = -EFSCORRUPTED;
304 BUFFER_TRACE(bitmap_bh, "get_write_access");
305 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
310 gdp = ext4_get_group_desc(sb, block_group, &bh2);
312 BUFFER_TRACE(bh2, "get_write_access");
313 fatal = ext4_journal_get_write_access(handle, bh2);
315 ext4_lock_group(sb, block_group);
316 cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
317 if (fatal || !cleared) {
318 ext4_unlock_group(sb, block_group);
322 count = ext4_free_inodes_count(sb, gdp) + 1;
323 ext4_free_inodes_set(sb, gdp, count);
325 count = ext4_used_dirs_count(sb, gdp) - 1;
326 ext4_used_dirs_set(sb, gdp, count);
327 percpu_counter_dec(&sbi->s_dirs_counter);
329 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
330 EXT4_INODES_PER_GROUP(sb) / 8);
331 ext4_group_desc_csum_set(sb, block_group, gdp);
332 ext4_unlock_group(sb, block_group);
334 percpu_counter_inc(&sbi->s_freeinodes_counter);
335 if (sbi->s_log_groups_per_flex) {
336 struct flex_groups *fg;
338 fg = sbi_array_rcu_deref(sbi, s_flex_groups,
339 ext4_flex_group(sbi, block_group));
340 atomic_inc(&fg->free_inodes);
342 atomic_dec(&fg->used_dirs);
344 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
345 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
348 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
349 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
353 ext4_error(sb, "bit already cleared for inode %lu", ino);
354 if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
356 count = ext4_free_inodes_count(sb, gdp);
357 percpu_counter_sub(&sbi->s_freeinodes_counter,
360 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
365 ext4_std_error(sb, fatal);
375 * Helper function for Orlov's allocator; returns critical information
376 * for a particular block group or flex_bg. If flex_size is 1, then g
377 * is a block group number; otherwise it is flex_bg number.
379 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
380 int flex_size, struct orlov_stats *stats)
382 struct ext4_group_desc *desc;
385 struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
387 stats->free_inodes = atomic_read(&fg->free_inodes);
388 stats->free_clusters = atomic64_read(&fg->free_clusters);
389 stats->used_dirs = atomic_read(&fg->used_dirs);
393 desc = ext4_get_group_desc(sb, g, NULL);
395 stats->free_inodes = ext4_free_inodes_count(sb, desc);
396 stats->free_clusters = ext4_free_group_clusters(sb, desc);
397 stats->used_dirs = ext4_used_dirs_count(sb, desc);
399 stats->free_inodes = 0;
400 stats->free_clusters = 0;
401 stats->used_dirs = 0;
406 * Orlov's allocator for directories.
408 * We always try to spread first-level directories.
410 * If there are blockgroups with both free inodes and free clusters counts
411 * not worse than average we return one with smallest directory count.
412 * Otherwise we simply return a random group.
414 * For the rest rules look so:
416 * It's OK to put directory into a group unless
417 * it has too many directories already (max_dirs) or
418 * it has too few free inodes left (min_inodes) or
419 * it has too few free clusters left (min_clusters) or
420 * Parent's group is preferred, if it doesn't satisfy these
421 * conditions we search cyclically through the rest. If none
422 * of the groups look good we just look for a group with more
423 * free inodes than average (starting at parent's group).
426 static int find_group_orlov(struct super_block *sb, struct inode *parent,
427 ext4_group_t *group, umode_t mode,
428 const struct qstr *qstr)
430 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
431 struct ext4_sb_info *sbi = EXT4_SB(sb);
432 ext4_group_t real_ngroups = ext4_get_groups_count(sb);
433 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
434 unsigned int freei, avefreei, grp_free;
435 ext4_fsblk_t freec, avefreec;
437 int max_dirs, min_inodes;
438 ext4_grpblk_t min_clusters;
439 ext4_group_t i, grp, g, ngroups;
440 struct ext4_group_desc *desc;
441 struct orlov_stats stats;
442 int flex_size = ext4_flex_bg_size(sbi);
443 struct dx_hash_info hinfo;
445 ngroups = real_ngroups;
447 ngroups = (real_ngroups + flex_size - 1) >>
448 sbi->s_log_groups_per_flex;
449 parent_group >>= sbi->s_log_groups_per_flex;
452 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
453 avefreei = freei / ngroups;
454 freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
456 do_div(avefreec, ngroups);
457 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
460 ((parent == d_inode(sb->s_root)) ||
461 (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
462 int best_ndir = inodes_per_group;
466 hinfo.hash_version = DX_HASH_HALF_MD4;
467 hinfo.seed = sbi->s_hash_seed;
468 ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
472 parent_group = (unsigned)grp % ngroups;
473 for (i = 0; i < ngroups; i++) {
474 g = (parent_group + i) % ngroups;
475 get_orlov_stats(sb, g, flex_size, &stats);
476 if (!stats.free_inodes)
478 if (stats.used_dirs >= best_ndir)
480 if (stats.free_inodes < avefreei)
482 if (stats.free_clusters < avefreec)
486 best_ndir = stats.used_dirs;
491 if (flex_size == 1) {
497 * We pack inodes at the beginning of the flexgroup's
498 * inode tables. Block allocation decisions will do
499 * something similar, although regular files will
500 * start at 2nd block group of the flexgroup. See
501 * ext4_ext_find_goal() and ext4_find_near().
504 for (i = 0; i < flex_size; i++) {
505 if (grp+i >= real_ngroups)
507 desc = ext4_get_group_desc(sb, grp+i, NULL);
508 if (desc && ext4_free_inodes_count(sb, desc)) {
516 max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
517 min_inodes = avefreei - inodes_per_group*flex_size / 4;
520 min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
523 * Start looking in the flex group where we last allocated an
524 * inode for this parent directory
526 if (EXT4_I(parent)->i_last_alloc_group != ~0) {
527 parent_group = EXT4_I(parent)->i_last_alloc_group;
529 parent_group >>= sbi->s_log_groups_per_flex;
532 for (i = 0; i < ngroups; i++) {
533 grp = (parent_group + i) % ngroups;
534 get_orlov_stats(sb, grp, flex_size, &stats);
535 if (stats.used_dirs >= max_dirs)
537 if (stats.free_inodes < min_inodes)
539 if (stats.free_clusters < min_clusters)
545 ngroups = real_ngroups;
546 avefreei = freei / ngroups;
548 parent_group = EXT4_I(parent)->i_block_group;
549 for (i = 0; i < ngroups; i++) {
550 grp = (parent_group + i) % ngroups;
551 desc = ext4_get_group_desc(sb, grp, NULL);
553 grp_free = ext4_free_inodes_count(sb, desc);
554 if (grp_free && grp_free >= avefreei) {
563 * The free-inodes counter is approximate, and for really small
564 * filesystems the above test can fail to find any blockgroups
573 static int find_group_other(struct super_block *sb, struct inode *parent,
574 ext4_group_t *group, umode_t mode)
576 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
577 ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
578 struct ext4_group_desc *desc;
579 int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
582 * Try to place the inode is the same flex group as its
583 * parent. If we can't find space, use the Orlov algorithm to
584 * find another flex group, and store that information in the
585 * parent directory's inode information so that use that flex
586 * group for future allocations.
592 parent_group &= ~(flex_size-1);
593 last = parent_group + flex_size;
596 for (i = parent_group; i < last; i++) {
597 desc = ext4_get_group_desc(sb, i, NULL);
598 if (desc && ext4_free_inodes_count(sb, desc)) {
603 if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
605 parent_group = EXT4_I(parent)->i_last_alloc_group;
609 * If this didn't work, use the Orlov search algorithm
610 * to find a new flex group; we pass in the mode to
611 * avoid the topdir algorithms.
613 *group = parent_group + flex_size;
614 if (*group > ngroups)
616 return find_group_orlov(sb, parent, group, mode, NULL);
620 * Try to place the inode in its parent directory
622 *group = parent_group;
623 desc = ext4_get_group_desc(sb, *group, NULL);
624 if (desc && ext4_free_inodes_count(sb, desc) &&
625 ext4_free_group_clusters(sb, desc))
629 * We're going to place this inode in a different blockgroup from its
630 * parent. We want to cause files in a common directory to all land in
631 * the same blockgroup. But we want files which are in a different
632 * directory which shares a blockgroup with our parent to land in a
633 * different blockgroup.
635 * So add our directory's i_ino into the starting point for the hash.
637 *group = (*group + parent->i_ino) % ngroups;
640 * Use a quadratic hash to find a group with a free inode and some free
643 for (i = 1; i < ngroups; i <<= 1) {
645 if (*group >= ngroups)
647 desc = ext4_get_group_desc(sb, *group, NULL);
648 if (desc && ext4_free_inodes_count(sb, desc) &&
649 ext4_free_group_clusters(sb, desc))
654 * That failed: try linear search for a free inode, even if that group
655 * has no free blocks.
657 *group = parent_group;
658 for (i = 0; i < ngroups; i++) {
659 if (++*group >= ngroups)
661 desc = ext4_get_group_desc(sb, *group, NULL);
662 if (desc && ext4_free_inodes_count(sb, desc))
670 * In no journal mode, if an inode has recently been deleted, we want
671 * to avoid reusing it until we're reasonably sure the inode table
672 * block has been written back to disk. (Yes, these values are
673 * somewhat arbitrary...)
675 #define RECENTCY_MIN 60
676 #define RECENTCY_DIRTY 300
678 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
680 struct ext4_group_desc *gdp;
681 struct ext4_inode *raw_inode;
682 struct buffer_head *bh;
683 int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
685 int recentcy = RECENTCY_MIN;
688 gdp = ext4_get_group_desc(sb, group, NULL);
692 bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
693 (ino / inodes_per_block));
694 if (!bh || !buffer_uptodate(bh))
696 * If the block is not in the buffer cache, then it
697 * must have been written out.
701 offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
702 raw_inode = (struct ext4_inode *) (bh->b_data + offset);
704 /* i_dtime is only 32 bits on disk, but we only care about relative
705 * times in the range of a few minutes (i.e. long enough to sync a
706 * recently-deleted inode to disk), so using the low 32 bits of the
707 * clock (a 68 year range) is enough, see time_before32() */
708 dtime = le32_to_cpu(raw_inode->i_dtime);
709 now = ktime_get_real_seconds();
710 if (buffer_dirty(bh))
711 recentcy += RECENTCY_DIRTY;
713 if (dtime && time_before32(dtime, now) &&
714 time_before32(now, dtime + recentcy))
721 static int find_inode_bit(struct super_block *sb, ext4_group_t group,
722 struct buffer_head *bitmap, unsigned long *ino)
725 *ino = ext4_find_next_zero_bit((unsigned long *)
727 EXT4_INODES_PER_GROUP(sb), *ino);
728 if (*ino >= EXT4_INODES_PER_GROUP(sb))
731 if ((EXT4_SB(sb)->s_journal == NULL) &&
732 recently_deleted(sb, group, *ino)) {
734 if (*ino < EXT4_INODES_PER_GROUP(sb))
743 * There are two policies for allocating an inode. If the new inode is
744 * a directory, then a forward search is made for a block group with both
745 * free space and a low directory-to-inode ratio; if that fails, then of
746 * the groups with above-average free space, that group with the fewest
747 * directories already is chosen.
749 * For other inodes, search forward from the parent directory's block
750 * group to find a free inode.
752 struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
753 umode_t mode, const struct qstr *qstr,
754 __u32 goal, uid_t *owner, __u32 i_flags,
755 int handle_type, unsigned int line_no,
758 struct super_block *sb;
759 struct buffer_head *inode_bitmap_bh = NULL;
760 struct buffer_head *group_desc_bh;
761 ext4_group_t ngroups, group = 0;
762 unsigned long ino = 0;
764 struct ext4_group_desc *gdp = NULL;
765 struct ext4_inode_info *ei;
766 struct ext4_sb_info *sbi;
770 ext4_group_t flex_group;
771 struct ext4_group_info *grp;
774 /* Cannot create files in a deleted directory */
775 if (!dir || !dir->i_nlink)
776 return ERR_PTR(-EPERM);
781 if (unlikely(ext4_forced_shutdown(sbi)))
782 return ERR_PTR(-EIO);
784 if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
785 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
786 !(i_flags & EXT4_EA_INODE_FL)) {
787 err = fscrypt_get_encryption_info(dir);
790 if (!fscrypt_has_encryption_key(dir))
791 return ERR_PTR(-ENOKEY);
795 if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
796 #ifdef CONFIG_EXT4_FS_POSIX_ACL
797 struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
802 int acl_size = p->a_count * sizeof(ext4_acl_entry);
804 nblocks += (S_ISDIR(mode) ? 2 : 1) *
805 __ext4_xattr_set_credits(sb, NULL /* inode */,
806 NULL /* block_bh */, acl_size,
807 true /* is_create */);
808 posix_acl_release(p);
812 #ifdef CONFIG_SECURITY
814 int num_security_xattrs = 1;
816 #ifdef CONFIG_INTEGRITY
817 num_security_xattrs++;
820 * We assume that security xattrs are never
821 * more than 1k. In practice they are under
824 nblocks += num_security_xattrs *
825 __ext4_xattr_set_credits(sb, NULL /* inode */,
826 NULL /* block_bh */, 1024,
827 true /* is_create */);
831 nblocks += __ext4_xattr_set_credits(sb,
832 NULL /* inode */, NULL /* block_bh */,
833 FSCRYPT_SET_CONTEXT_MAX_SIZE,
834 true /* is_create */);
837 ngroups = ext4_get_groups_count(sb);
838 trace_ext4_request_inode(dir, mode);
839 inode = new_inode(sb);
841 return ERR_PTR(-ENOMEM);
845 * Initialize owners and quota early so that we don't have to account
846 * for quota initialization worst case in standard inode creating
850 inode->i_mode = mode;
851 i_uid_write(inode, owner[0]);
852 i_gid_write(inode, owner[1]);
853 } else if (test_opt(sb, GRPID)) {
854 inode->i_mode = mode;
855 inode->i_uid = current_fsuid();
856 inode->i_gid = dir->i_gid;
858 inode_init_owner(inode, dir, mode);
860 if (ext4_has_feature_project(sb) &&
861 ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
862 ei->i_projid = EXT4_I(dir)->i_projid;
864 ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID);
866 err = dquot_initialize(inode);
871 goal = sbi->s_inode_goal;
873 if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
874 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
875 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
881 ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
883 ret2 = find_group_other(sb, dir, &group, mode);
886 EXT4_I(dir)->i_last_alloc_group = group;
892 * Normally we will only go through one pass of this loop,
893 * unless we get unlucky and it turns out the group we selected
894 * had its last inode grabbed by someone else.
896 for (i = 0; i < ngroups; i++, ino = 0) {
899 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
904 * Check free inodes count before loading bitmap.
906 if (ext4_free_inodes_count(sb, gdp) == 0)
909 grp = ext4_get_group_info(sb, group);
910 /* Skip groups with already-known suspicious inode tables */
911 if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
914 brelse(inode_bitmap_bh);
915 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
916 /* Skip groups with suspicious inode tables */
917 if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) ||
918 IS_ERR(inode_bitmap_bh)) {
919 inode_bitmap_bh = NULL;
923 repeat_in_this_group:
924 ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
928 if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) {
929 ext4_error(sb, "reserved inode found cleared - "
930 "inode=%lu", ino + 1);
935 BUG_ON(nblocks <= 0);
936 handle = __ext4_journal_start_sb(dir->i_sb, line_no,
937 handle_type, nblocks,
939 if (IS_ERR(handle)) {
940 err = PTR_ERR(handle);
941 ext4_std_error(sb, err);
945 BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
946 err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
948 ext4_std_error(sb, err);
951 ext4_lock_group(sb, group);
952 ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
954 /* Someone already took the bit. Repeat the search
957 ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
959 ext4_set_bit(ino, inode_bitmap_bh->b_data);
962 ret2 = 1; /* we didn't grab the inode */
965 ext4_unlock_group(sb, group);
966 ino++; /* the inode bitmap is zero-based */
968 goto got; /* we grabbed the inode! */
970 if (ino < EXT4_INODES_PER_GROUP(sb))
971 goto repeat_in_this_group;
973 if (++group == ngroups)
980 BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
981 err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
983 ext4_std_error(sb, err);
987 BUFFER_TRACE(group_desc_bh, "get_write_access");
988 err = ext4_journal_get_write_access(handle, group_desc_bh);
990 ext4_std_error(sb, err);
994 /* We may have to initialize the block bitmap if it isn't already */
995 if (ext4_has_group_desc_csum(sb) &&
996 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
997 struct buffer_head *block_bitmap_bh;
999 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
1000 if (IS_ERR(block_bitmap_bh)) {
1001 err = PTR_ERR(block_bitmap_bh);
1004 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
1005 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
1007 brelse(block_bitmap_bh);
1008 ext4_std_error(sb, err);
1012 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
1013 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
1015 /* recheck and clear flag under lock if we still need to */
1016 ext4_lock_group(sb, group);
1017 if (ext4_has_group_desc_csum(sb) &&
1018 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1019 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1020 ext4_free_group_clusters_set(sb, gdp,
1021 ext4_free_clusters_after_init(sb, group, gdp));
1022 ext4_block_bitmap_csum_set(sb, group, gdp,
1024 ext4_group_desc_csum_set(sb, group, gdp);
1026 ext4_unlock_group(sb, group);
1027 brelse(block_bitmap_bh);
1030 ext4_std_error(sb, err);
1035 /* Update the relevant bg descriptor fields */
1036 if (ext4_has_group_desc_csum(sb)) {
1038 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1040 down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
1041 ext4_lock_group(sb, group); /* while we modify the bg desc */
1042 free = EXT4_INODES_PER_GROUP(sb) -
1043 ext4_itable_unused_count(sb, gdp);
1044 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
1045 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
1049 * Check the relative inode number against the last used
1050 * relative inode number in this group. if it is greater
1051 * we need to update the bg_itable_unused count
1054 ext4_itable_unused_set(sb, gdp,
1055 (EXT4_INODES_PER_GROUP(sb) - ino));
1056 up_read(&grp->alloc_sem);
1058 ext4_lock_group(sb, group);
1061 ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
1062 if (S_ISDIR(mode)) {
1063 ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
1064 if (sbi->s_log_groups_per_flex) {
1065 ext4_group_t f = ext4_flex_group(sbi, group);
1067 atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
1071 if (ext4_has_group_desc_csum(sb)) {
1072 ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
1073 EXT4_INODES_PER_GROUP(sb) / 8);
1074 ext4_group_desc_csum_set(sb, group, gdp);
1076 ext4_unlock_group(sb, group);
1078 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
1079 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
1081 ext4_std_error(sb, err);
1085 percpu_counter_dec(&sbi->s_freeinodes_counter);
1087 percpu_counter_inc(&sbi->s_dirs_counter);
1089 if (sbi->s_log_groups_per_flex) {
1090 flex_group = ext4_flex_group(sbi, group);
1091 atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1092 flex_group)->free_inodes);
1095 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1096 /* This is the optimal IO size (for stat), not the fs block size */
1097 inode->i_blocks = 0;
1098 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
1099 current_time(inode);
1101 memset(ei->i_data, 0, sizeof(ei->i_data));
1102 ei->i_dir_start_lookup = 0;
1105 /* Don't inherit extent flag from directory, amongst others. */
1107 ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1108 ei->i_flags |= i_flags;
1111 ei->i_block_group = group;
1112 ei->i_last_alloc_group = ~0;
1114 ext4_set_inode_flags(inode);
1115 if (IS_DIRSYNC(inode))
1116 ext4_handle_sync(handle);
1117 if (insert_inode_locked(inode) < 0) {
1119 * Likely a bitmap corruption causing inode to be allocated
1123 ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1127 spin_lock(&sbi->s_next_gen_lock);
1128 inode->i_generation = sbi->s_next_generation++;
1129 spin_unlock(&sbi->s_next_gen_lock);
1131 /* Precompute checksum seed for inode metadata */
1132 if (ext4_has_metadata_csum(sb)) {
1134 __le32 inum = cpu_to_le32(inode->i_ino);
1135 __le32 gen = cpu_to_le32(inode->i_generation);
1136 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1138 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1142 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1143 ext4_set_inode_state(inode, EXT4_STATE_NEW);
1145 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1146 ei->i_inline_off = 0;
1147 if (ext4_has_feature_inline_data(sb))
1148 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1150 err = dquot_alloc_inode(inode);
1155 * Since the encryption xattr will always be unique, create it first so
1156 * that it's less likely to end up in an external xattr block and
1157 * prevent its deduplication.
1160 err = fscrypt_inherit_context(dir, inode, handle, true);
1162 goto fail_free_drop;
1165 if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
1166 err = ext4_init_acl(handle, inode, dir);
1168 goto fail_free_drop;
1170 err = ext4_init_security(handle, inode, dir, qstr);
1172 goto fail_free_drop;
1175 if (ext4_has_feature_extents(sb)) {
1176 /* set extent flag only for directory, file and normal symlink*/
1177 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1178 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1179 ext4_ext_tree_init(handle, inode);
1183 if (ext4_handle_valid(handle)) {
1184 ei->i_sync_tid = handle->h_transaction->t_tid;
1185 ei->i_datasync_tid = handle->h_transaction->t_tid;
1188 err = ext4_mark_inode_dirty(handle, inode);
1190 ext4_std_error(sb, err);
1191 goto fail_free_drop;
1194 ext4_debug("allocating inode %lu\n", inode->i_ino);
1195 trace_ext4_allocate_inode(inode, dir, mode);
1196 brelse(inode_bitmap_bh);
1200 dquot_free_inode(inode);
1203 unlock_new_inode(inode);
1206 inode->i_flags |= S_NOQUOTA;
1208 brelse(inode_bitmap_bh);
1209 return ERR_PTR(err);
1212 /* Verify that we are loading a valid orphan from disk */
1213 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1215 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1216 ext4_group_t block_group;
1218 struct buffer_head *bitmap_bh = NULL;
1219 struct inode *inode = NULL;
1220 int err = -EFSCORRUPTED;
1222 if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
1225 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1226 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1227 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1228 if (IS_ERR(bitmap_bh)) {
1229 ext4_error(sb, "inode bitmap error %ld for orphan %lu",
1230 ino, PTR_ERR(bitmap_bh));
1231 return (struct inode *) bitmap_bh;
1234 /* Having the inode bit set should be a 100% indicator that this
1235 * is a valid orphan (no e2fsck run on fs). Orphans also include
1236 * inodes that were being truncated, so we can't check i_nlink==0.
1238 if (!ext4_test_bit(bit, bitmap_bh->b_data))
1241 inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1242 if (IS_ERR(inode)) {
1243 err = PTR_ERR(inode);
1244 ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
1250 * If the orphans has i_nlinks > 0 then it should be able to
1251 * be truncated, otherwise it won't be removed from the orphan
1252 * list during processing and an infinite loop will result.
1253 * Similarly, it must not be a bad inode.
1255 if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
1256 is_bad_inode(inode))
1259 if (NEXT_ORPHAN(inode) > max_ino)
1265 ext4_error(sb, "bad orphan inode %lu", ino);
1267 printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1268 bit, (unsigned long long)bitmap_bh->b_blocknr,
1269 ext4_test_bit(bit, bitmap_bh->b_data));
1271 printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1272 is_bad_inode(inode));
1273 printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1274 NEXT_ORPHAN(inode));
1275 printk(KERN_ERR "max_ino=%lu\n", max_ino);
1276 printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1277 /* Avoid freeing blocks if we got a bad deleted inode */
1278 if (inode->i_nlink == 0)
1279 inode->i_blocks = 0;
1283 return ERR_PTR(err);
1286 unsigned long ext4_count_free_inodes(struct super_block *sb)
1288 unsigned long desc_count;
1289 struct ext4_group_desc *gdp;
1290 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1292 struct ext4_super_block *es;
1293 unsigned long bitmap_count, x;
1294 struct buffer_head *bitmap_bh = NULL;
1296 es = EXT4_SB(sb)->s_es;
1300 for (i = 0; i < ngroups; i++) {
1301 gdp = ext4_get_group_desc(sb, i, NULL);
1304 desc_count += ext4_free_inodes_count(sb, gdp);
1306 bitmap_bh = ext4_read_inode_bitmap(sb, i);
1307 if (IS_ERR(bitmap_bh)) {
1312 x = ext4_count_free(bitmap_bh->b_data,
1313 EXT4_INODES_PER_GROUP(sb) / 8);
1314 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1315 (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1319 printk(KERN_DEBUG "ext4_count_free_inodes: "
1320 "stored = %u, computed = %lu, %lu\n",
1321 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1325 for (i = 0; i < ngroups; i++) {
1326 gdp = ext4_get_group_desc(sb, i, NULL);
1329 desc_count += ext4_free_inodes_count(sb, gdp);
1336 /* Called at mount-time, super-block is locked */
1337 unsigned long ext4_count_dirs(struct super_block * sb)
1339 unsigned long count = 0;
1340 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1342 for (i = 0; i < ngroups; i++) {
1343 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1346 count += ext4_used_dirs_count(sb, gdp);
1352 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1353 * inode table. Must be called without any spinlock held. The only place
1354 * where it is called from on active part of filesystem is ext4lazyinit
1355 * thread, so we do not need any special locks, however we have to prevent
1356 * inode allocation from the current group, so we take alloc_sem lock, to
1357 * block ext4_new_inode() until we are finished.
1359 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1362 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1363 struct ext4_sb_info *sbi = EXT4_SB(sb);
1364 struct ext4_group_desc *gdp = NULL;
1365 struct buffer_head *group_desc_bh;
1368 int num, ret = 0, used_blks = 0;
1369 unsigned long used_inos = 0;
1371 /* This should not happen, but just to be sure check this */
1372 if (sb_rdonly(sb)) {
1377 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1382 * We do not need to lock this, because we are the only one
1383 * handling this flag.
1385 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1388 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1389 if (IS_ERR(handle)) {
1390 ret = PTR_ERR(handle);
1394 down_write(&grp->alloc_sem);
1396 * If inode bitmap was already initialized there may be some
1397 * used inodes so we need to skip blocks with used inodes in
1400 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1401 used_inos = EXT4_INODES_PER_GROUP(sb) -
1402 ext4_itable_unused_count(sb, gdp);
1403 used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
1405 /* Bogus inode unused count? */
1406 if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
1407 ext4_error(sb, "Something is wrong with group %u: "
1408 "used itable blocks: %d; "
1409 "itable unused count: %u",
1411 ext4_itable_unused_count(sb, gdp));
1416 used_inos += group * EXT4_INODES_PER_GROUP(sb);
1418 * Are there some uninitialized inodes in the inode table
1419 * before the first normal inode?
1421 if ((used_blks != sbi->s_itb_per_group) &&
1422 (used_inos < EXT4_FIRST_INO(sb))) {
1423 ext4_error(sb, "Something is wrong with group %u: "
1424 "itable unused count: %u; "
1425 "itables initialized count: %ld",
1426 group, ext4_itable_unused_count(sb, gdp),
1433 blk = ext4_inode_table(sb, gdp) + used_blks;
1434 num = sbi->s_itb_per_group - used_blks;
1436 BUFFER_TRACE(group_desc_bh, "get_write_access");
1437 ret = ext4_journal_get_write_access(handle,
1443 * Skip zeroout if the inode table is full. But we set the ZEROED
1444 * flag anyway, because obviously, when it is full it does not need
1447 if (unlikely(num == 0))
1450 ext4_debug("going to zero out inode table in group %d\n",
1452 ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1456 blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1459 ext4_lock_group(sb, group);
1460 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1461 ext4_group_desc_csum_set(sb, group, gdp);
1462 ext4_unlock_group(sb, group);
1464 BUFFER_TRACE(group_desc_bh,
1465 "call ext4_handle_dirty_metadata");
1466 ret = ext4_handle_dirty_metadata(handle, NULL,
1470 up_write(&grp->alloc_sem);
1471 ext4_journal_stop(handle);