GNU Linux-libre 5.10.153-gnu1
[releases.git] / fs / ext4 / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/super.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/inode.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  Big-endian to little-endian byte-swapping/bitmaps by
17  *        David S. Miller (davem@caip.rutgers.edu), 1995
18  */
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/cleancache.h>
43 #include <linux/uaccess.h>
44 #include <linux/iversion.h>
45 #include <linux/unicode.h>
46 #include <linux/part_stat.h>
47 #include <linux/kthread.h>
48 #include <linux/freezer.h>
49
50 #include "ext4.h"
51 #include "ext4_extents.h"       /* Needed for trace points definition */
52 #include "ext4_jbd2.h"
53 #include "xattr.h"
54 #include "acl.h"
55 #include "mballoc.h"
56 #include "fsmap.h"
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/ext4.h>
60
61 static struct ext4_lazy_init *ext4_li_info;
62 static struct mutex ext4_li_mtx;
63 static struct ratelimit_state ext4_mount_msg_ratelimit;
64
65 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66                              unsigned long journal_devnum);
67 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68 static int ext4_commit_super(struct super_block *sb, int sync);
69 static int ext4_mark_recovery_complete(struct super_block *sb,
70                                         struct ext4_super_block *es);
71 static int ext4_clear_journal_err(struct super_block *sb,
72                                   struct ext4_super_block *es);
73 static int ext4_sync_fs(struct super_block *sb, int wait);
74 static int ext4_remount(struct super_block *sb, int *flags, char *data);
75 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76 static int ext4_unfreeze(struct super_block *sb);
77 static int ext4_freeze(struct super_block *sb);
78 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
79                        const char *dev_name, void *data);
80 static inline int ext2_feature_set_ok(struct super_block *sb);
81 static inline int ext3_feature_set_ok(struct super_block *sb);
82 static int ext4_feature_set_ok(struct super_block *sb, int readonly);
83 static void ext4_destroy_lazyinit_thread(void);
84 static void ext4_unregister_li_request(struct super_block *sb);
85 static void ext4_clear_request_list(void);
86 static struct inode *ext4_get_journal_inode(struct super_block *sb,
87                                             unsigned int journal_inum);
88
89 /*
90  * Lock ordering
91  *
92  * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
93  * i_mmap_rwsem (inode->i_mmap_rwsem)!
94  *
95  * page fault path:
96  * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
97  *   page lock -> i_data_sem (rw)
98  *
99  * buffered write path:
100  * sb_start_write -> i_mutex -> mmap_lock
101  * sb_start_write -> i_mutex -> transaction start -> page lock ->
102  *   i_data_sem (rw)
103  *
104  * truncate:
105  * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
106  * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
107  *   i_data_sem (rw)
108  *
109  * direct IO:
110  * sb_start_write -> i_mutex -> mmap_lock
111  * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
112  *
113  * writepages:
114  * transaction start -> page lock(s) -> i_data_sem (rw)
115  */
116
117 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118 static struct file_system_type ext2_fs_type = {
119         .owner          = THIS_MODULE,
120         .name           = "ext2",
121         .mount          = ext4_mount,
122         .kill_sb        = kill_block_super,
123         .fs_flags       = FS_REQUIRES_DEV,
124 };
125 MODULE_ALIAS_FS("ext2");
126 MODULE_ALIAS("ext2");
127 #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
128 #else
129 #define IS_EXT2_SB(sb) (0)
130 #endif
131
132
133 static struct file_system_type ext3_fs_type = {
134         .owner          = THIS_MODULE,
135         .name           = "ext3",
136         .mount          = ext4_mount,
137         .kill_sb        = kill_block_super,
138         .fs_flags       = FS_REQUIRES_DEV,
139 };
140 MODULE_ALIAS_FS("ext3");
141 MODULE_ALIAS("ext3");
142 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
143
144
145 static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
146                                   bh_end_io_t *end_io)
147 {
148         /*
149          * buffer's verified bit is no longer valid after reading from
150          * disk again due to write out error, clear it to make sure we
151          * recheck the buffer contents.
152          */
153         clear_buffer_verified(bh);
154
155         bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
156         get_bh(bh);
157         submit_bh(REQ_OP_READ, op_flags, bh);
158 }
159
160 void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
161                          bh_end_io_t *end_io)
162 {
163         BUG_ON(!buffer_locked(bh));
164
165         if (ext4_buffer_uptodate(bh)) {
166                 unlock_buffer(bh);
167                 return;
168         }
169         __ext4_read_bh(bh, op_flags, end_io);
170 }
171
172 int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
173 {
174         BUG_ON(!buffer_locked(bh));
175
176         if (ext4_buffer_uptodate(bh)) {
177                 unlock_buffer(bh);
178                 return 0;
179         }
180
181         __ext4_read_bh(bh, op_flags, end_io);
182
183         wait_on_buffer(bh);
184         if (buffer_uptodate(bh))
185                 return 0;
186         return -EIO;
187 }
188
189 int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
190 {
191         lock_buffer(bh);
192         if (!wait) {
193                 ext4_read_bh_nowait(bh, op_flags, NULL);
194                 return 0;
195         }
196         return ext4_read_bh(bh, op_flags, NULL);
197 }
198
199 /*
200  * This works like __bread_gfp() except it uses ERR_PTR for error
201  * returns.  Currently with sb_bread it's impossible to distinguish
202  * between ENOMEM and EIO situations (since both result in a NULL
203  * return.
204  */
205 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
206                                                sector_t block, int op_flags,
207                                                gfp_t gfp)
208 {
209         struct buffer_head *bh;
210         int ret;
211
212         bh = sb_getblk_gfp(sb, block, gfp);
213         if (bh == NULL)
214                 return ERR_PTR(-ENOMEM);
215         if (ext4_buffer_uptodate(bh))
216                 return bh;
217
218         ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
219         if (ret) {
220                 put_bh(bh);
221                 return ERR_PTR(ret);
222         }
223         return bh;
224 }
225
226 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
227                                    int op_flags)
228 {
229         return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
230 }
231
232 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
233                                             sector_t block)
234 {
235         return __ext4_sb_bread_gfp(sb, block, 0, 0);
236 }
237
238 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
239 {
240         struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);
241
242         if (likely(bh)) {
243                 if (trylock_buffer(bh))
244                         ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL);
245                 brelse(bh);
246         }
247 }
248
249 static int ext4_verify_csum_type(struct super_block *sb,
250                                  struct ext4_super_block *es)
251 {
252         if (!ext4_has_feature_metadata_csum(sb))
253                 return 1;
254
255         return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
256 }
257
258 static __le32 ext4_superblock_csum(struct super_block *sb,
259                                    struct ext4_super_block *es)
260 {
261         struct ext4_sb_info *sbi = EXT4_SB(sb);
262         int offset = offsetof(struct ext4_super_block, s_checksum);
263         __u32 csum;
264
265         csum = ext4_chksum(sbi, ~0, (char *)es, offset);
266
267         return cpu_to_le32(csum);
268 }
269
270 static int ext4_superblock_csum_verify(struct super_block *sb,
271                                        struct ext4_super_block *es)
272 {
273         if (!ext4_has_metadata_csum(sb))
274                 return 1;
275
276         return es->s_checksum == ext4_superblock_csum(sb, es);
277 }
278
279 void ext4_superblock_csum_set(struct super_block *sb)
280 {
281         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
282
283         if (!ext4_has_metadata_csum(sb))
284                 return;
285
286         es->s_checksum = ext4_superblock_csum(sb, es);
287 }
288
289 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
290                                struct ext4_group_desc *bg)
291 {
292         return le32_to_cpu(bg->bg_block_bitmap_lo) |
293                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
294                  (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
295 }
296
297 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
298                                struct ext4_group_desc *bg)
299 {
300         return le32_to_cpu(bg->bg_inode_bitmap_lo) |
301                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
302                  (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
303 }
304
305 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
306                               struct ext4_group_desc *bg)
307 {
308         return le32_to_cpu(bg->bg_inode_table_lo) |
309                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
310                  (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
311 }
312
313 __u32 ext4_free_group_clusters(struct super_block *sb,
314                                struct ext4_group_desc *bg)
315 {
316         return le16_to_cpu(bg->bg_free_blocks_count_lo) |
317                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
318                  (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
319 }
320
321 __u32 ext4_free_inodes_count(struct super_block *sb,
322                               struct ext4_group_desc *bg)
323 {
324         return le16_to_cpu(bg->bg_free_inodes_count_lo) |
325                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
326                  (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
327 }
328
329 __u32 ext4_used_dirs_count(struct super_block *sb,
330                               struct ext4_group_desc *bg)
331 {
332         return le16_to_cpu(bg->bg_used_dirs_count_lo) |
333                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
334                  (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
335 }
336
337 __u32 ext4_itable_unused_count(struct super_block *sb,
338                               struct ext4_group_desc *bg)
339 {
340         return le16_to_cpu(bg->bg_itable_unused_lo) |
341                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
342                  (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
343 }
344
345 void ext4_block_bitmap_set(struct super_block *sb,
346                            struct ext4_group_desc *bg, ext4_fsblk_t blk)
347 {
348         bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
349         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
350                 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
351 }
352
353 void ext4_inode_bitmap_set(struct super_block *sb,
354                            struct ext4_group_desc *bg, ext4_fsblk_t blk)
355 {
356         bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
357         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
358                 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
359 }
360
361 void ext4_inode_table_set(struct super_block *sb,
362                           struct ext4_group_desc *bg, ext4_fsblk_t blk)
363 {
364         bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
365         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
366                 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
367 }
368
369 void ext4_free_group_clusters_set(struct super_block *sb,
370                                   struct ext4_group_desc *bg, __u32 count)
371 {
372         bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
373         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
374                 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
375 }
376
377 void ext4_free_inodes_set(struct super_block *sb,
378                           struct ext4_group_desc *bg, __u32 count)
379 {
380         bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
381         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
382                 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
383 }
384
385 void ext4_used_dirs_set(struct super_block *sb,
386                           struct ext4_group_desc *bg, __u32 count)
387 {
388         bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
389         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
390                 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
391 }
392
393 void ext4_itable_unused_set(struct super_block *sb,
394                           struct ext4_group_desc *bg, __u32 count)
395 {
396         bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
397         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
398                 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
399 }
400
401 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
402 {
403         time64_t now = ktime_get_real_seconds();
404
405         now = clamp_val(now, 0, (1ull << 40) - 1);
406
407         *lo = cpu_to_le32(lower_32_bits(now));
408         *hi = upper_32_bits(now);
409 }
410
411 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
412 {
413         return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
414 }
415 #define ext4_update_tstamp(es, tstamp) \
416         __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
417 #define ext4_get_tstamp(es, tstamp) \
418         __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
419
420 static void __save_error_info(struct super_block *sb, int error,
421                               __u32 ino, __u64 block,
422                               const char *func, unsigned int line)
423 {
424         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
425         int err;
426
427         EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
428         if (bdev_read_only(sb->s_bdev))
429                 return;
430         es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
431         ext4_update_tstamp(es, s_last_error_time);
432         strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
433         es->s_last_error_line = cpu_to_le32(line);
434         es->s_last_error_ino = cpu_to_le32(ino);
435         es->s_last_error_block = cpu_to_le64(block);
436         switch (error) {
437         case EIO:
438                 err = EXT4_ERR_EIO;
439                 break;
440         case ENOMEM:
441                 err = EXT4_ERR_ENOMEM;
442                 break;
443         case EFSBADCRC:
444                 err = EXT4_ERR_EFSBADCRC;
445                 break;
446         case 0:
447         case EFSCORRUPTED:
448                 err = EXT4_ERR_EFSCORRUPTED;
449                 break;
450         case ENOSPC:
451                 err = EXT4_ERR_ENOSPC;
452                 break;
453         case ENOKEY:
454                 err = EXT4_ERR_ENOKEY;
455                 break;
456         case EROFS:
457                 err = EXT4_ERR_EROFS;
458                 break;
459         case EFBIG:
460                 err = EXT4_ERR_EFBIG;
461                 break;
462         case EEXIST:
463                 err = EXT4_ERR_EEXIST;
464                 break;
465         case ERANGE:
466                 err = EXT4_ERR_ERANGE;
467                 break;
468         case EOVERFLOW:
469                 err = EXT4_ERR_EOVERFLOW;
470                 break;
471         case EBUSY:
472                 err = EXT4_ERR_EBUSY;
473                 break;
474         case ENOTDIR:
475                 err = EXT4_ERR_ENOTDIR;
476                 break;
477         case ENOTEMPTY:
478                 err = EXT4_ERR_ENOTEMPTY;
479                 break;
480         case ESHUTDOWN:
481                 err = EXT4_ERR_ESHUTDOWN;
482                 break;
483         case EFAULT:
484                 err = EXT4_ERR_EFAULT;
485                 break;
486         default:
487                 err = EXT4_ERR_UNKNOWN;
488         }
489         es->s_last_error_errcode = err;
490         if (!es->s_first_error_time) {
491                 es->s_first_error_time = es->s_last_error_time;
492                 es->s_first_error_time_hi = es->s_last_error_time_hi;
493                 strncpy(es->s_first_error_func, func,
494                         sizeof(es->s_first_error_func));
495                 es->s_first_error_line = cpu_to_le32(line);
496                 es->s_first_error_ino = es->s_last_error_ino;
497                 es->s_first_error_block = es->s_last_error_block;
498                 es->s_first_error_errcode = es->s_last_error_errcode;
499         }
500         /*
501          * Start the daily error reporting function if it hasn't been
502          * started already
503          */
504         if (!es->s_error_count)
505                 mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
506         le32_add_cpu(&es->s_error_count, 1);
507 }
508
509 static void save_error_info(struct super_block *sb, int error,
510                             __u32 ino, __u64 block,
511                             const char *func, unsigned int line)
512 {
513         __save_error_info(sb, error, ino, block, func, line);
514         if (!bdev_read_only(sb->s_bdev))
515                 ext4_commit_super(sb, 1);
516 }
517
518 /*
519  * The del_gendisk() function uninitializes the disk-specific data
520  * structures, including the bdi structure, without telling anyone
521  * else.  Once this happens, any attempt to call mark_buffer_dirty()
522  * (for example, by ext4_commit_super), will cause a kernel OOPS.
523  * This is a kludge to prevent these oops until we can put in a proper
524  * hook in del_gendisk() to inform the VFS and file system layers.
525  */
526 static int block_device_ejected(struct super_block *sb)
527 {
528         struct inode *bd_inode = sb->s_bdev->bd_inode;
529         struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
530
531         return bdi->dev == NULL;
532 }
533
534 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
535 {
536         struct super_block              *sb = journal->j_private;
537         struct ext4_sb_info             *sbi = EXT4_SB(sb);
538         int                             error = is_journal_aborted(journal);
539         struct ext4_journal_cb_entry    *jce;
540
541         BUG_ON(txn->t_state == T_FINISHED);
542
543         ext4_process_freed_data(sb, txn->t_tid);
544
545         spin_lock(&sbi->s_md_lock);
546         while (!list_empty(&txn->t_private_list)) {
547                 jce = list_entry(txn->t_private_list.next,
548                                  struct ext4_journal_cb_entry, jce_list);
549                 list_del_init(&jce->jce_list);
550                 spin_unlock(&sbi->s_md_lock);
551                 jce->jce_func(sb, jce, error);
552                 spin_lock(&sbi->s_md_lock);
553         }
554         spin_unlock(&sbi->s_md_lock);
555 }
556
557 /*
558  * This writepage callback for write_cache_pages()
559  * takes care of a few cases after page cleaning.
560  *
561  * write_cache_pages() already checks for dirty pages
562  * and calls clear_page_dirty_for_io(), which we want,
563  * to write protect the pages.
564  *
565  * However, we may have to redirty a page (see below.)
566  */
567 static int ext4_journalled_writepage_callback(struct page *page,
568                                               struct writeback_control *wbc,
569                                               void *data)
570 {
571         transaction_t *transaction = (transaction_t *) data;
572         struct buffer_head *bh, *head;
573         struct journal_head *jh;
574
575         bh = head = page_buffers(page);
576         do {
577                 /*
578                  * We have to redirty a page in these cases:
579                  * 1) If buffer is dirty, it means the page was dirty because it
580                  * contains a buffer that needs checkpointing. So the dirty bit
581                  * needs to be preserved so that checkpointing writes the buffer
582                  * properly.
583                  * 2) If buffer is not part of the committing transaction
584                  * (we may have just accidentally come across this buffer because
585                  * inode range tracking is not exact) or if the currently running
586                  * transaction already contains this buffer as well, dirty bit
587                  * needs to be preserved so that the buffer gets writeprotected
588                  * properly on running transaction's commit.
589                  */
590                 jh = bh2jh(bh);
591                 if (buffer_dirty(bh) ||
592                     (jh && (jh->b_transaction != transaction ||
593                             jh->b_next_transaction))) {
594                         redirty_page_for_writepage(wbc, page);
595                         goto out;
596                 }
597         } while ((bh = bh->b_this_page) != head);
598
599 out:
600         return AOP_WRITEPAGE_ACTIVATE;
601 }
602
603 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
604 {
605         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
606         struct writeback_control wbc = {
607                 .sync_mode =  WB_SYNC_ALL,
608                 .nr_to_write = LONG_MAX,
609                 .range_start = jinode->i_dirty_start,
610                 .range_end = jinode->i_dirty_end,
611         };
612
613         return write_cache_pages(mapping, &wbc,
614                                  ext4_journalled_writepage_callback,
615                                  jinode->i_transaction);
616 }
617
618 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
619 {
620         int ret;
621
622         if (ext4_should_journal_data(jinode->i_vfs_inode))
623                 ret = ext4_journalled_submit_inode_data_buffers(jinode);
624         else
625                 ret = jbd2_journal_submit_inode_data_buffers(jinode);
626
627         return ret;
628 }
629
630 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
631 {
632         int ret = 0;
633
634         if (!ext4_should_journal_data(jinode->i_vfs_inode))
635                 ret = jbd2_journal_finish_inode_data_buffers(jinode);
636
637         return ret;
638 }
639
640 static bool system_going_down(void)
641 {
642         return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
643                 || system_state == SYSTEM_RESTART;
644 }
645
646 /* Deal with the reporting of failure conditions on a filesystem such as
647  * inconsistencies detected or read IO failures.
648  *
649  * On ext2, we can store the error state of the filesystem in the
650  * superblock.  That is not possible on ext4, because we may have other
651  * write ordering constraints on the superblock which prevent us from
652  * writing it out straight away; and given that the journal is about to
653  * be aborted, we can't rely on the current, or future, transactions to
654  * write out the superblock safely.
655  *
656  * We'll just use the jbd2_journal_abort() error code to record an error in
657  * the journal instead.  On recovery, the journal will complain about
658  * that error until we've noted it down and cleared it.
659  */
660
661 static void ext4_handle_error(struct super_block *sb)
662 {
663         journal_t *journal = EXT4_SB(sb)->s_journal;
664
665         if (test_opt(sb, WARN_ON_ERROR))
666                 WARN_ON_ONCE(1);
667
668         if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
669                 return;
670
671         ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
672         if (journal)
673                 jbd2_journal_abort(journal, -EIO);
674         /*
675          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
676          * could panic during 'reboot -f' as the underlying device got already
677          * disabled.
678          */
679         if (test_opt(sb, ERRORS_RO) || system_going_down()) {
680                 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
681                 /*
682                  * Make sure updated value of ->s_mount_flags will be visible
683                  * before ->s_flags update
684                  */
685                 smp_wmb();
686                 sb->s_flags |= SB_RDONLY;
687         } else if (test_opt(sb, ERRORS_PANIC)) {
688                 panic("EXT4-fs (device %s): panic forced after error\n",
689                         sb->s_id);
690         }
691 }
692
693 #define ext4_error_ratelimit(sb)                                        \
694                 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),     \
695                              "EXT4-fs error")
696
697 void __ext4_error(struct super_block *sb, const char *function,
698                   unsigned int line, int error, __u64 block,
699                   const char *fmt, ...)
700 {
701         struct va_format vaf;
702         va_list args;
703
704         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
705                 return;
706
707         trace_ext4_error(sb, function, line);
708         if (ext4_error_ratelimit(sb)) {
709                 va_start(args, fmt);
710                 vaf.fmt = fmt;
711                 vaf.va = &args;
712                 printk(KERN_CRIT
713                        "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
714                        sb->s_id, function, line, current->comm, &vaf);
715                 va_end(args);
716         }
717         save_error_info(sb, error, 0, block, function, line);
718         ext4_handle_error(sb);
719 }
720
721 void __ext4_error_inode(struct inode *inode, const char *function,
722                         unsigned int line, ext4_fsblk_t block, int error,
723                         const char *fmt, ...)
724 {
725         va_list args;
726         struct va_format vaf;
727
728         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
729                 return;
730
731         trace_ext4_error(inode->i_sb, function, line);
732         if (ext4_error_ratelimit(inode->i_sb)) {
733                 va_start(args, fmt);
734                 vaf.fmt = fmt;
735                 vaf.va = &args;
736                 if (block)
737                         printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
738                                "inode #%lu: block %llu: comm %s: %pV\n",
739                                inode->i_sb->s_id, function, line, inode->i_ino,
740                                block, current->comm, &vaf);
741                 else
742                         printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
743                                "inode #%lu: comm %s: %pV\n",
744                                inode->i_sb->s_id, function, line, inode->i_ino,
745                                current->comm, &vaf);
746                 va_end(args);
747         }
748         save_error_info(inode->i_sb, error, inode->i_ino, block,
749                         function, line);
750         ext4_handle_error(inode->i_sb);
751 }
752
753 void __ext4_error_file(struct file *file, const char *function,
754                        unsigned int line, ext4_fsblk_t block,
755                        const char *fmt, ...)
756 {
757         va_list args;
758         struct va_format vaf;
759         struct inode *inode = file_inode(file);
760         char pathname[80], *path;
761
762         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
763                 return;
764
765         trace_ext4_error(inode->i_sb, function, line);
766         if (ext4_error_ratelimit(inode->i_sb)) {
767                 path = file_path(file, pathname, sizeof(pathname));
768                 if (IS_ERR(path))
769                         path = "(unknown)";
770                 va_start(args, fmt);
771                 vaf.fmt = fmt;
772                 vaf.va = &args;
773                 if (block)
774                         printk(KERN_CRIT
775                                "EXT4-fs error (device %s): %s:%d: inode #%lu: "
776                                "block %llu: comm %s: path %s: %pV\n",
777                                inode->i_sb->s_id, function, line, inode->i_ino,
778                                block, current->comm, path, &vaf);
779                 else
780                         printk(KERN_CRIT
781                                "EXT4-fs error (device %s): %s:%d: inode #%lu: "
782                                "comm %s: path %s: %pV\n",
783                                inode->i_sb->s_id, function, line, inode->i_ino,
784                                current->comm, path, &vaf);
785                 va_end(args);
786         }
787         save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
788                         function, line);
789         ext4_handle_error(inode->i_sb);
790 }
791
792 const char *ext4_decode_error(struct super_block *sb, int errno,
793                               char nbuf[16])
794 {
795         char *errstr = NULL;
796
797         switch (errno) {
798         case -EFSCORRUPTED:
799                 errstr = "Corrupt filesystem";
800                 break;
801         case -EFSBADCRC:
802                 errstr = "Filesystem failed CRC";
803                 break;
804         case -EIO:
805                 errstr = "IO failure";
806                 break;
807         case -ENOMEM:
808                 errstr = "Out of memory";
809                 break;
810         case -EROFS:
811                 if (!sb || (EXT4_SB(sb)->s_journal &&
812                             EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
813                         errstr = "Journal has aborted";
814                 else
815                         errstr = "Readonly filesystem";
816                 break;
817         default:
818                 /* If the caller passed in an extra buffer for unknown
819                  * errors, textualise them now.  Else we just return
820                  * NULL. */
821                 if (nbuf) {
822                         /* Check for truncated error codes... */
823                         if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
824                                 errstr = nbuf;
825                 }
826                 break;
827         }
828
829         return errstr;
830 }
831
832 /* __ext4_std_error decodes expected errors from journaling functions
833  * automatically and invokes the appropriate error response.  */
834
835 void __ext4_std_error(struct super_block *sb, const char *function,
836                       unsigned int line, int errno)
837 {
838         char nbuf[16];
839         const char *errstr;
840
841         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
842                 return;
843
844         /* Special case: if the error is EROFS, and we're not already
845          * inside a transaction, then there's really no point in logging
846          * an error. */
847         if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
848                 return;
849
850         if (ext4_error_ratelimit(sb)) {
851                 errstr = ext4_decode_error(sb, errno, nbuf);
852                 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
853                        sb->s_id, function, line, errstr);
854         }
855
856         save_error_info(sb, -errno, 0, 0, function, line);
857         ext4_handle_error(sb);
858 }
859
860 /*
861  * ext4_abort is a much stronger failure handler than ext4_error.  The
862  * abort function may be used to deal with unrecoverable failures such
863  * as journal IO errors or ENOMEM at a critical moment in log management.
864  *
865  * We unconditionally force the filesystem into an ABORT|READONLY state,
866  * unless the error response on the fs has been set to panic in which
867  * case we take the easy way out and panic immediately.
868  */
869
870 void __ext4_abort(struct super_block *sb, const char *function,
871                   unsigned int line, int error, const char *fmt, ...)
872 {
873         struct va_format vaf;
874         va_list args;
875
876         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
877                 return;
878
879         save_error_info(sb, error, 0, 0, function, line);
880         va_start(args, fmt);
881         vaf.fmt = fmt;
882         vaf.va = &args;
883         printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
884                sb->s_id, function, line, &vaf);
885         va_end(args);
886
887         if (sb_rdonly(sb) == 0) {
888                 ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
889                 if (EXT4_SB(sb)->s_journal)
890                         jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
891
892                 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
893                 /*
894                  * Make sure updated value of ->s_mount_flags will be visible
895                  * before ->s_flags update
896                  */
897                 smp_wmb();
898                 sb->s_flags |= SB_RDONLY;
899         }
900         if (test_opt(sb, ERRORS_PANIC) && !system_going_down())
901                 panic("EXT4-fs panic from previous error\n");
902 }
903
904 void __ext4_msg(struct super_block *sb,
905                 const char *prefix, const char *fmt, ...)
906 {
907         struct va_format vaf;
908         va_list args;
909
910         atomic_inc(&EXT4_SB(sb)->s_msg_count);
911         if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
912                 return;
913
914         va_start(args, fmt);
915         vaf.fmt = fmt;
916         vaf.va = &args;
917         printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
918         va_end(args);
919 }
920
921 static int ext4_warning_ratelimit(struct super_block *sb)
922 {
923         atomic_inc(&EXT4_SB(sb)->s_warning_count);
924         return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
925                             "EXT4-fs warning");
926 }
927
928 void __ext4_warning(struct super_block *sb, const char *function,
929                     unsigned int line, const char *fmt, ...)
930 {
931         struct va_format vaf;
932         va_list args;
933
934         if (!ext4_warning_ratelimit(sb))
935                 return;
936
937         va_start(args, fmt);
938         vaf.fmt = fmt;
939         vaf.va = &args;
940         printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
941                sb->s_id, function, line, &vaf);
942         va_end(args);
943 }
944
945 void __ext4_warning_inode(const struct inode *inode, const char *function,
946                           unsigned int line, const char *fmt, ...)
947 {
948         struct va_format vaf;
949         va_list args;
950
951         if (!ext4_warning_ratelimit(inode->i_sb))
952                 return;
953
954         va_start(args, fmt);
955         vaf.fmt = fmt;
956         vaf.va = &args;
957         printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
958                "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
959                function, line, inode->i_ino, current->comm, &vaf);
960         va_end(args);
961 }
962
963 void __ext4_grp_locked_error(const char *function, unsigned int line,
964                              struct super_block *sb, ext4_group_t grp,
965                              unsigned long ino, ext4_fsblk_t block,
966                              const char *fmt, ...)
967 __releases(bitlock)
968 __acquires(bitlock)
969 {
970         struct va_format vaf;
971         va_list args;
972
973         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
974                 return;
975
976         trace_ext4_error(sb, function, line);
977         __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
978
979         if (ext4_error_ratelimit(sb)) {
980                 va_start(args, fmt);
981                 vaf.fmt = fmt;
982                 vaf.va = &args;
983                 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
984                        sb->s_id, function, line, grp);
985                 if (ino)
986                         printk(KERN_CONT "inode %lu: ", ino);
987                 if (block)
988                         printk(KERN_CONT "block %llu:",
989                                (unsigned long long) block);
990                 printk(KERN_CONT "%pV\n", &vaf);
991                 va_end(args);
992         }
993
994         if (test_opt(sb, WARN_ON_ERROR))
995                 WARN_ON_ONCE(1);
996
997         if (test_opt(sb, ERRORS_CONT)) {
998                 ext4_commit_super(sb, 0);
999                 return;
1000         }
1001
1002         ext4_unlock_group(sb, grp);
1003         ext4_commit_super(sb, 1);
1004         ext4_handle_error(sb);
1005         /*
1006          * We only get here in the ERRORS_RO case; relocking the group
1007          * may be dangerous, but nothing bad will happen since the
1008          * filesystem will have already been marked read/only and the
1009          * journal has been aborted.  We return 1 as a hint to callers
1010          * who might what to use the return value from
1011          * ext4_grp_locked_error() to distinguish between the
1012          * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1013          * aggressively from the ext4 function in question, with a
1014          * more appropriate error code.
1015          */
1016         ext4_lock_group(sb, grp);
1017         return;
1018 }
1019
1020 void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
1021                                      ext4_group_t group,
1022                                      unsigned int flags)
1023 {
1024         struct ext4_sb_info *sbi = EXT4_SB(sb);
1025         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1026         struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1027         int ret;
1028
1029         if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
1030                 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1031                                             &grp->bb_state);
1032                 if (!ret)
1033                         percpu_counter_sub(&sbi->s_freeclusters_counter,
1034                                            grp->bb_free);
1035         }
1036
1037         if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
1038                 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
1039                                             &grp->bb_state);
1040                 if (!ret && gdp) {
1041                         int count;
1042
1043                         count = ext4_free_inodes_count(sb, gdp);
1044                         percpu_counter_sub(&sbi->s_freeinodes_counter,
1045                                            count);
1046                 }
1047         }
1048 }
1049
1050 void ext4_update_dynamic_rev(struct super_block *sb)
1051 {
1052         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1053
1054         if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1055                 return;
1056
1057         ext4_warning(sb,
1058                      "updating to rev %d because of new feature flag, "
1059                      "running e2fsck is recommended",
1060                      EXT4_DYNAMIC_REV);
1061
1062         es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
1063         es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
1064         es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1065         /* leave es->s_feature_*compat flags alone */
1066         /* es->s_uuid will be set by e2fsck if empty */
1067
1068         /*
1069          * The rest of the superblock fields should be zero, and if not it
1070          * means they are likely already in use, so leave them alone.  We
1071          * can leave it up to e2fsck to clean up any inconsistencies there.
1072          */
1073 }
1074
1075 /*
1076  * Open the external journal device
1077  */
1078 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
1079 {
1080         struct block_device *bdev;
1081
1082         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
1083         if (IS_ERR(bdev))
1084                 goto fail;
1085         return bdev;
1086
1087 fail:
1088         ext4_msg(sb, KERN_ERR,
1089                  "failed to open journal device unknown-block(%u,%u) %ld",
1090                  MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
1091         return NULL;
1092 }
1093
1094 /*
1095  * Release the journal device
1096  */
1097 static void ext4_blkdev_put(struct block_device *bdev)
1098 {
1099         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1100 }
1101
1102 static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
1103 {
1104         struct block_device *bdev;
1105         bdev = sbi->s_journal_bdev;
1106         if (bdev) {
1107                 ext4_blkdev_put(bdev);
1108                 sbi->s_journal_bdev = NULL;
1109         }
1110 }
1111
1112 static inline struct inode *orphan_list_entry(struct list_head *l)
1113 {
1114         return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1115 }
1116
1117 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1118 {
1119         struct list_head *l;
1120
1121         ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
1122                  le32_to_cpu(sbi->s_es->s_last_orphan));
1123
1124         printk(KERN_ERR "sb_info orphan list:\n");
1125         list_for_each(l, &sbi->s_orphan) {
1126                 struct inode *inode = orphan_list_entry(l);
1127                 printk(KERN_ERR "  "
1128                        "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
1129                        inode->i_sb->s_id, inode->i_ino, inode,
1130                        inode->i_mode, inode->i_nlink,
1131                        NEXT_ORPHAN(inode));
1132         }
1133 }
1134
1135 #ifdef CONFIG_QUOTA
1136 static int ext4_quota_off(struct super_block *sb, int type);
1137
1138 static inline void ext4_quota_off_umount(struct super_block *sb)
1139 {
1140         int type;
1141
1142         /* Use our quota_off function to clear inode flags etc. */
1143         for (type = 0; type < EXT4_MAXQUOTAS; type++)
1144                 ext4_quota_off(sb, type);
1145 }
1146
1147 /*
1148  * This is a helper function which is used in the mount/remount
1149  * codepaths (which holds s_umount) to fetch the quota file name.
1150  */
1151 static inline char *get_qf_name(struct super_block *sb,
1152                                 struct ext4_sb_info *sbi,
1153                                 int type)
1154 {
1155         return rcu_dereference_protected(sbi->s_qf_names[type],
1156                                          lockdep_is_held(&sb->s_umount));
1157 }
1158 #else
1159 static inline void ext4_quota_off_umount(struct super_block *sb)
1160 {
1161 }
1162 #endif
1163
1164 static void ext4_put_super(struct super_block *sb)
1165 {
1166         struct ext4_sb_info *sbi = EXT4_SB(sb);
1167         struct ext4_super_block *es = sbi->s_es;
1168         struct buffer_head **group_desc;
1169         struct flex_groups **flex_groups;
1170         int aborted = 0;
1171         int i, err;
1172
1173         /*
1174          * Unregister sysfs before destroying jbd2 journal.
1175          * Since we could still access attr_journal_task attribute via sysfs
1176          * path which could have sbi->s_journal->j_task as NULL
1177          * Unregister sysfs before flush sbi->s_error_work.
1178          * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
1179          * read metadata verify failed then will queue error work.
1180          * flush_stashed_error_work will call start_this_handle may trigger
1181          * BUG_ON.
1182          */
1183         ext4_unregister_sysfs(sb);
1184
1185         ext4_unregister_li_request(sb);
1186         ext4_quota_off_umount(sb);
1187
1188         destroy_workqueue(sbi->rsv_conversion_wq);
1189
1190         if (sbi->s_journal) {
1191                 aborted = is_journal_aborted(sbi->s_journal);
1192                 err = jbd2_journal_destroy(sbi->s_journal);
1193                 sbi->s_journal = NULL;
1194                 if ((err < 0) && !aborted) {
1195                         ext4_abort(sb, -err, "Couldn't clean up the journal");
1196                 }
1197         }
1198
1199         ext4_es_unregister_shrinker(sbi);
1200         del_timer_sync(&sbi->s_err_report);
1201         ext4_release_system_zone(sb);
1202         ext4_mb_release(sb);
1203         ext4_ext_release(sb);
1204
1205         if (!sb_rdonly(sb) && !aborted) {
1206                 ext4_clear_feature_journal_needs_recovery(sb);
1207                 es->s_state = cpu_to_le16(sbi->s_mount_state);
1208         }
1209         if (!sb_rdonly(sb))
1210                 ext4_commit_super(sb, 1);
1211
1212         rcu_read_lock();
1213         group_desc = rcu_dereference(sbi->s_group_desc);
1214         for (i = 0; i < sbi->s_gdb_count; i++)
1215                 brelse(group_desc[i]);
1216         kvfree(group_desc);
1217         flex_groups = rcu_dereference(sbi->s_flex_groups);
1218         if (flex_groups) {
1219                 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1220                         kvfree(flex_groups[i]);
1221                 kvfree(flex_groups);
1222         }
1223         rcu_read_unlock();
1224         percpu_counter_destroy(&sbi->s_freeclusters_counter);
1225         percpu_counter_destroy(&sbi->s_freeinodes_counter);
1226         percpu_counter_destroy(&sbi->s_dirs_counter);
1227         percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1228         percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1229         percpu_free_rwsem(&sbi->s_writepages_rwsem);
1230 #ifdef CONFIG_QUOTA
1231         for (i = 0; i < EXT4_MAXQUOTAS; i++)
1232                 kfree(get_qf_name(sb, sbi, i));
1233 #endif
1234
1235         /* Debugging code just in case the in-memory inode orphan list
1236          * isn't empty.  The on-disk one can be non-empty if we've
1237          * detected an error and taken the fs readonly, but the
1238          * in-memory list had better be clean by this point. */
1239         if (!list_empty(&sbi->s_orphan))
1240                 dump_orphan_list(sb, sbi);
1241         J_ASSERT(list_empty(&sbi->s_orphan));
1242
1243         sync_blockdev(sb->s_bdev);
1244         invalidate_bdev(sb->s_bdev);
1245         if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
1246                 /*
1247                  * Invalidate the journal device's buffers.  We don't want them
1248                  * floating about in memory - the physical journal device may
1249                  * hotswapped, and it breaks the `ro-after' testing code.
1250                  */
1251                 sync_blockdev(sbi->s_journal_bdev);
1252                 invalidate_bdev(sbi->s_journal_bdev);
1253                 ext4_blkdev_remove(sbi);
1254         }
1255
1256         ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1257         sbi->s_ea_inode_cache = NULL;
1258
1259         ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1260         sbi->s_ea_block_cache = NULL;
1261
1262         ext4_stop_mmpd(sbi);
1263
1264         brelse(sbi->s_sbh);
1265         sb->s_fs_info = NULL;
1266         /*
1267          * Now that we are completely done shutting down the
1268          * superblock, we need to actually destroy the kobject.
1269          */
1270         kobject_put(&sbi->s_kobj);
1271         wait_for_completion(&sbi->s_kobj_unregister);
1272         if (sbi->s_chksum_driver)
1273                 crypto_free_shash(sbi->s_chksum_driver);
1274         kfree(sbi->s_blockgroup_lock);
1275         fs_put_dax(sbi->s_daxdev);
1276         fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1277 #ifdef CONFIG_UNICODE
1278         utf8_unload(sb->s_encoding);
1279 #endif
1280         kfree(sbi);
1281 }
1282
1283 static struct kmem_cache *ext4_inode_cachep;
1284
1285 /*
1286  * Called inside transaction, so use GFP_NOFS
1287  */
1288 static struct inode *ext4_alloc_inode(struct super_block *sb)
1289 {
1290         struct ext4_inode_info *ei;
1291
1292         ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1293         if (!ei)
1294                 return NULL;
1295
1296         inode_set_iversion(&ei->vfs_inode, 1);
1297         spin_lock_init(&ei->i_raw_lock);
1298         INIT_LIST_HEAD(&ei->i_prealloc_list);
1299         atomic_set(&ei->i_prealloc_active, 0);
1300         spin_lock_init(&ei->i_prealloc_lock);
1301         ext4_es_init_tree(&ei->i_es_tree);
1302         rwlock_init(&ei->i_es_lock);
1303         INIT_LIST_HEAD(&ei->i_es_list);
1304         ei->i_es_all_nr = 0;
1305         ei->i_es_shk_nr = 0;
1306         ei->i_es_shrink_lblk = 0;
1307         ei->i_reserved_data_blocks = 0;
1308         spin_lock_init(&(ei->i_block_reservation_lock));
1309         ext4_init_pending_tree(&ei->i_pending_tree);
1310 #ifdef CONFIG_QUOTA
1311         ei->i_reserved_quota = 0;
1312         memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1313 #endif
1314         ei->jinode = NULL;
1315         INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1316         spin_lock_init(&ei->i_completed_io_lock);
1317         ei->i_sync_tid = 0;
1318         ei->i_datasync_tid = 0;
1319         atomic_set(&ei->i_unwritten, 0);
1320         INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1321         ext4_fc_init_inode(&ei->vfs_inode);
1322         mutex_init(&ei->i_fc_lock);
1323         return &ei->vfs_inode;
1324 }
1325
1326 static int ext4_drop_inode(struct inode *inode)
1327 {
1328         int drop = generic_drop_inode(inode);
1329
1330         if (!drop)
1331                 drop = fscrypt_drop_inode(inode);
1332
1333         trace_ext4_drop_inode(inode, drop);
1334         return drop;
1335 }
1336
1337 static void ext4_free_in_core_inode(struct inode *inode)
1338 {
1339         fscrypt_free_inode(inode);
1340         if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
1341                 pr_warn("%s: inode %ld still in fc list",
1342                         __func__, inode->i_ino);
1343         }
1344         kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1345 }
1346
1347 static void ext4_destroy_inode(struct inode *inode)
1348 {
1349         if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1350                 ext4_msg(inode->i_sb, KERN_ERR,
1351                          "Inode %lu (%p): orphan list check failed!",
1352                          inode->i_ino, EXT4_I(inode));
1353                 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1354                                 EXT4_I(inode), sizeof(struct ext4_inode_info),
1355                                 true);
1356                 dump_stack();
1357         }
1358
1359         if (EXT4_I(inode)->i_reserved_data_blocks)
1360                 ext4_msg(inode->i_sb, KERN_ERR,
1361                          "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
1362                          inode->i_ino, EXT4_I(inode),
1363                          EXT4_I(inode)->i_reserved_data_blocks);
1364 }
1365
1366 static void init_once(void *foo)
1367 {
1368         struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1369
1370         INIT_LIST_HEAD(&ei->i_orphan);
1371         init_rwsem(&ei->xattr_sem);
1372         init_rwsem(&ei->i_data_sem);
1373         init_rwsem(&ei->i_mmap_sem);
1374         inode_init_once(&ei->vfs_inode);
1375         ext4_fc_init_inode(&ei->vfs_inode);
1376 }
1377
1378 static int __init init_inodecache(void)
1379 {
1380         ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
1381                                 sizeof(struct ext4_inode_info), 0,
1382                                 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
1383                                         SLAB_ACCOUNT),
1384                                 offsetof(struct ext4_inode_info, i_data),
1385                                 sizeof_field(struct ext4_inode_info, i_data),
1386                                 init_once);
1387         if (ext4_inode_cachep == NULL)
1388                 return -ENOMEM;
1389         return 0;
1390 }
1391
1392 static void destroy_inodecache(void)
1393 {
1394         /*
1395          * Make sure all delayed rcu free inodes are flushed before we
1396          * destroy cache.
1397          */
1398         rcu_barrier();
1399         kmem_cache_destroy(ext4_inode_cachep);
1400 }
1401
1402 void ext4_clear_inode(struct inode *inode)
1403 {
1404         ext4_fc_del(inode);
1405         invalidate_inode_buffers(inode);
1406         clear_inode(inode);
1407         ext4_discard_preallocations(inode, 0);
1408         ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1409         dquot_drop(inode);
1410         if (EXT4_I(inode)->jinode) {
1411                 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1412                                                EXT4_I(inode)->jinode);
1413                 jbd2_free_inode(EXT4_I(inode)->jinode);
1414                 EXT4_I(inode)->jinode = NULL;
1415         }
1416         fscrypt_put_encryption_info(inode);
1417         fsverity_cleanup_inode(inode);
1418 }
1419
1420 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1421                                         u64 ino, u32 generation)
1422 {
1423         struct inode *inode;
1424
1425         /*
1426          * Currently we don't know the generation for parent directory, so
1427          * a generation of 0 means "accept any"
1428          */
1429         inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1430         if (IS_ERR(inode))
1431                 return ERR_CAST(inode);
1432         if (generation && inode->i_generation != generation) {
1433                 iput(inode);
1434                 return ERR_PTR(-ESTALE);
1435         }
1436
1437         return inode;
1438 }
1439
1440 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1441                                         int fh_len, int fh_type)
1442 {
1443         return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1444                                     ext4_nfs_get_inode);
1445 }
1446
1447 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1448                                         int fh_len, int fh_type)
1449 {
1450         return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1451                                     ext4_nfs_get_inode);
1452 }
1453
1454 static int ext4_nfs_commit_metadata(struct inode *inode)
1455 {
1456         struct writeback_control wbc = {
1457                 .sync_mode = WB_SYNC_ALL
1458         };
1459
1460         trace_ext4_nfs_commit_metadata(inode);
1461         return ext4_write_inode(inode, &wbc);
1462 }
1463
1464 /*
1465  * Try to release metadata pages (indirect blocks, directories) which are
1466  * mapped via the block device.  Since these pages could have journal heads
1467  * which would prevent try_to_free_buffers() from freeing them, we must use
1468  * jbd2 layer's try_to_free_buffers() function to release them.
1469  */
1470 static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
1471                                  gfp_t wait)
1472 {
1473         journal_t *journal = EXT4_SB(sb)->s_journal;
1474
1475         WARN_ON(PageChecked(page));
1476         if (!page_has_buffers(page))
1477                 return 0;
1478         if (journal)
1479                 return jbd2_journal_try_to_free_buffers(journal, page);
1480
1481         return try_to_free_buffers(page);
1482 }
1483
1484 #ifdef CONFIG_FS_ENCRYPTION
1485 static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1486 {
1487         return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
1488                                  EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1489 }
1490
1491 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1492                                                         void *fs_data)
1493 {
1494         handle_t *handle = fs_data;
1495         int res, res2, credits, retries = 0;
1496
1497         /*
1498          * Encrypting the root directory is not allowed because e2fsck expects
1499          * lost+found to exist and be unencrypted, and encrypting the root
1500          * directory would imply encrypting the lost+found directory as well as
1501          * the filename "lost+found" itself.
1502          */
1503         if (inode->i_ino == EXT4_ROOT_INO)
1504                 return -EPERM;
1505
1506         if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
1507                 return -EINVAL;
1508
1509         if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
1510                 return -EOPNOTSUPP;
1511
1512         res = ext4_convert_inline_data(inode);
1513         if (res)
1514                 return res;
1515
1516         /*
1517          * If a journal handle was specified, then the encryption context is
1518          * being set on a new inode via inheritance and is part of a larger
1519          * transaction to create the inode.  Otherwise the encryption context is
1520          * being set on an existing inode in its own transaction.  Only in the
1521          * latter case should the "retry on ENOSPC" logic be used.
1522          */
1523
1524         if (handle) {
1525                 res = ext4_xattr_set_handle(handle, inode,
1526                                             EXT4_XATTR_INDEX_ENCRYPTION,
1527                                             EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1528                                             ctx, len, 0);
1529                 if (!res) {
1530                         ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1531                         ext4_clear_inode_state(inode,
1532                                         EXT4_STATE_MAY_INLINE_DATA);
1533                         /*
1534                          * Update inode->i_flags - S_ENCRYPTED will be enabled,
1535                          * S_DAX may be disabled
1536                          */
1537                         ext4_set_inode_flags(inode, false);
1538                 }
1539                 return res;
1540         }
1541
1542         res = dquot_initialize(inode);
1543         if (res)
1544                 return res;
1545 retry:
1546         res = ext4_xattr_set_credits(inode, len, false /* is_create */,
1547                                      &credits);
1548         if (res)
1549                 return res;
1550
1551         handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1552         if (IS_ERR(handle))
1553                 return PTR_ERR(handle);
1554
1555         res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
1556                                     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1557                                     ctx, len, 0);
1558         if (!res) {
1559                 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1560                 /*
1561                  * Update inode->i_flags - S_ENCRYPTED will be enabled,
1562                  * S_DAX may be disabled
1563                  */
1564                 ext4_set_inode_flags(inode, false);
1565                 res = ext4_mark_inode_dirty(handle, inode);
1566                 if (res)
1567                         EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
1568         }
1569         res2 = ext4_journal_stop(handle);
1570
1571         if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1572                 goto retry;
1573         if (!res)
1574                 res = res2;
1575         return res;
1576 }
1577
1578 static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
1579 {
1580         return EXT4_SB(sb)->s_dummy_enc_policy.policy;
1581 }
1582
1583 static bool ext4_has_stable_inodes(struct super_block *sb)
1584 {
1585         return ext4_has_feature_stable_inodes(sb);
1586 }
1587
1588 static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
1589                                        int *ino_bits_ret, int *lblk_bits_ret)
1590 {
1591         *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
1592         *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
1593 }
1594
1595 static const struct fscrypt_operations ext4_cryptops = {
1596         .key_prefix             = "ext4:",
1597         .get_context            = ext4_get_context,
1598         .set_context            = ext4_set_context,
1599         .get_dummy_policy       = ext4_get_dummy_policy,
1600         .empty_dir              = ext4_empty_dir,
1601         .max_namelen            = EXT4_NAME_LEN,
1602         .has_stable_inodes      = ext4_has_stable_inodes,
1603         .get_ino_and_lblk_bits  = ext4_get_ino_and_lblk_bits,
1604 };
1605 #endif
1606
1607 #ifdef CONFIG_QUOTA
1608 static const char * const quotatypes[] = INITQFNAMES;
1609 #define QTYPE2NAME(t) (quotatypes[t])
1610
1611 static int ext4_write_dquot(struct dquot *dquot);
1612 static int ext4_acquire_dquot(struct dquot *dquot);
1613 static int ext4_release_dquot(struct dquot *dquot);
1614 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1615 static int ext4_write_info(struct super_block *sb, int type);
1616 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1617                          const struct path *path);
1618 static int ext4_quota_on_mount(struct super_block *sb, int type);
1619 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1620                                size_t len, loff_t off);
1621 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1622                                 const char *data, size_t len, loff_t off);
1623 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1624                              unsigned int flags);
1625 static int ext4_enable_quotas(struct super_block *sb);
1626
1627 static struct dquot **ext4_get_dquots(struct inode *inode)
1628 {
1629         return EXT4_I(inode)->i_dquot;
1630 }
1631
1632 static const struct dquot_operations ext4_quota_operations = {
1633         .get_reserved_space     = ext4_get_reserved_space,
1634         .write_dquot            = ext4_write_dquot,
1635         .acquire_dquot          = ext4_acquire_dquot,
1636         .release_dquot          = ext4_release_dquot,
1637         .mark_dirty             = ext4_mark_dquot_dirty,
1638         .write_info             = ext4_write_info,
1639         .alloc_dquot            = dquot_alloc,
1640         .destroy_dquot          = dquot_destroy,
1641         .get_projid             = ext4_get_projid,
1642         .get_inode_usage        = ext4_get_inode_usage,
1643         .get_next_id            = dquot_get_next_id,
1644 };
1645
1646 static const struct quotactl_ops ext4_qctl_operations = {
1647         .quota_on       = ext4_quota_on,
1648         .quota_off      = ext4_quota_off,
1649         .quota_sync     = dquot_quota_sync,
1650         .get_state      = dquot_get_state,
1651         .set_info       = dquot_set_dqinfo,
1652         .get_dqblk      = dquot_get_dqblk,
1653         .set_dqblk      = dquot_set_dqblk,
1654         .get_nextdqblk  = dquot_get_next_dqblk,
1655 };
1656 #endif
1657
1658 static const struct super_operations ext4_sops = {
1659         .alloc_inode    = ext4_alloc_inode,
1660         .free_inode     = ext4_free_in_core_inode,
1661         .destroy_inode  = ext4_destroy_inode,
1662         .write_inode    = ext4_write_inode,
1663         .dirty_inode    = ext4_dirty_inode,
1664         .drop_inode     = ext4_drop_inode,
1665         .evict_inode    = ext4_evict_inode,
1666         .put_super      = ext4_put_super,
1667         .sync_fs        = ext4_sync_fs,
1668         .freeze_fs      = ext4_freeze,
1669         .unfreeze_fs    = ext4_unfreeze,
1670         .statfs         = ext4_statfs,
1671         .remount_fs     = ext4_remount,
1672         .show_options   = ext4_show_options,
1673 #ifdef CONFIG_QUOTA
1674         .quota_read     = ext4_quota_read,
1675         .quota_write    = ext4_quota_write,
1676         .get_dquots     = ext4_get_dquots,
1677 #endif
1678         .bdev_try_to_free_page = bdev_try_to_free_page,
1679 };
1680
1681 static const struct export_operations ext4_export_ops = {
1682         .fh_to_dentry = ext4_fh_to_dentry,
1683         .fh_to_parent = ext4_fh_to_parent,
1684         .get_parent = ext4_get_parent,
1685         .commit_metadata = ext4_nfs_commit_metadata,
1686 };
1687
1688 enum {
1689         Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1690         Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1691         Opt_nouid32, Opt_debug, Opt_removed,
1692         Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1693         Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1694         Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1695         Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1696         Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1697         Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1698         Opt_inlinecrypt,
1699         Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1700         Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1701         Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1702         Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
1703         Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1704         Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1705         Opt_nowarn_on_error, Opt_mblk_io_submit,
1706         Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1707         Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1708         Opt_inode_readahead_blks, Opt_journal_ioprio,
1709         Opt_dioread_nolock, Opt_dioread_lock,
1710         Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1711         Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1712         Opt_prefetch_block_bitmaps,
1713 #ifdef CONFIG_EXT4_DEBUG
1714         Opt_fc_debug_max_replay, Opt_fc_debug_force
1715 #endif
1716 };
1717
1718 static const match_table_t tokens = {
1719         {Opt_bsd_df, "bsddf"},
1720         {Opt_minix_df, "minixdf"},
1721         {Opt_grpid, "grpid"},
1722         {Opt_grpid, "bsdgroups"},
1723         {Opt_nogrpid, "nogrpid"},
1724         {Opt_nogrpid, "sysvgroups"},
1725         {Opt_resgid, "resgid=%u"},
1726         {Opt_resuid, "resuid=%u"},
1727         {Opt_sb, "sb=%u"},
1728         {Opt_err_cont, "errors=continue"},
1729         {Opt_err_panic, "errors=panic"},
1730         {Opt_err_ro, "errors=remount-ro"},
1731         {Opt_nouid32, "nouid32"},
1732         {Opt_debug, "debug"},
1733         {Opt_removed, "oldalloc"},
1734         {Opt_removed, "orlov"},
1735         {Opt_user_xattr, "user_xattr"},
1736         {Opt_nouser_xattr, "nouser_xattr"},
1737         {Opt_acl, "acl"},
1738         {Opt_noacl, "noacl"},
1739         {Opt_noload, "norecovery"},
1740         {Opt_noload, "noload"},
1741         {Opt_removed, "nobh"},
1742         {Opt_removed, "bh"},
1743         {Opt_commit, "commit=%u"},
1744         {Opt_min_batch_time, "min_batch_time=%u"},
1745         {Opt_max_batch_time, "max_batch_time=%u"},
1746         {Opt_journal_dev, "journal_dev=%u"},
1747         {Opt_journal_path, "journal_path=%s"},
1748         {Opt_journal_checksum, "journal_checksum"},
1749         {Opt_nojournal_checksum, "nojournal_checksum"},
1750         {Opt_journal_async_commit, "journal_async_commit"},
1751         {Opt_abort, "abort"},
1752         {Opt_data_journal, "data=journal"},
1753         {Opt_data_ordered, "data=ordered"},
1754         {Opt_data_writeback, "data=writeback"},
1755         {Opt_data_err_abort, "data_err=abort"},
1756         {Opt_data_err_ignore, "data_err=ignore"},
1757         {Opt_offusrjquota, "usrjquota="},
1758         {Opt_usrjquota, "usrjquota=%s"},
1759         {Opt_offgrpjquota, "grpjquota="},
1760         {Opt_grpjquota, "grpjquota=%s"},
1761         {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1762         {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1763         {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1764         {Opt_grpquota, "grpquota"},
1765         {Opt_noquota, "noquota"},
1766         {Opt_quota, "quota"},
1767         {Opt_usrquota, "usrquota"},
1768         {Opt_prjquota, "prjquota"},
1769         {Opt_barrier, "barrier=%u"},
1770         {Opt_barrier, "barrier"},
1771         {Opt_nobarrier, "nobarrier"},
1772         {Opt_i_version, "i_version"},
1773         {Opt_dax, "dax"},
1774         {Opt_dax_always, "dax=always"},
1775         {Opt_dax_inode, "dax=inode"},
1776         {Opt_dax_never, "dax=never"},
1777         {Opt_stripe, "stripe=%u"},
1778         {Opt_delalloc, "delalloc"},
1779         {Opt_warn_on_error, "warn_on_error"},
1780         {Opt_nowarn_on_error, "nowarn_on_error"},
1781         {Opt_lazytime, "lazytime"},
1782         {Opt_nolazytime, "nolazytime"},
1783         {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1784         {Opt_nodelalloc, "nodelalloc"},
1785         {Opt_removed, "mblk_io_submit"},
1786         {Opt_removed, "nomblk_io_submit"},
1787         {Opt_block_validity, "block_validity"},
1788         {Opt_noblock_validity, "noblock_validity"},
1789         {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1790         {Opt_journal_ioprio, "journal_ioprio=%u"},
1791         {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1792         {Opt_auto_da_alloc, "auto_da_alloc"},
1793         {Opt_noauto_da_alloc, "noauto_da_alloc"},
1794         {Opt_dioread_nolock, "dioread_nolock"},
1795         {Opt_dioread_lock, "nodioread_nolock"},
1796         {Opt_dioread_lock, "dioread_lock"},
1797         {Opt_discard, "discard"},
1798         {Opt_nodiscard, "nodiscard"},
1799         {Opt_init_itable, "init_itable=%u"},
1800         {Opt_init_itable, "init_itable"},
1801         {Opt_noinit_itable, "noinit_itable"},
1802 #ifdef CONFIG_EXT4_DEBUG
1803         {Opt_fc_debug_force, "fc_debug_force"},
1804         {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
1805 #endif
1806         {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1807         {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
1808         {Opt_test_dummy_encryption, "test_dummy_encryption"},
1809         {Opt_inlinecrypt, "inlinecrypt"},
1810         {Opt_nombcache, "nombcache"},
1811         {Opt_nombcache, "no_mbcache"},  /* for backward compatibility */
1812         {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"},
1813         {Opt_removed, "check=none"},    /* mount option from ext2/3 */
1814         {Opt_removed, "nocheck"},       /* mount option from ext2/3 */
1815         {Opt_removed, "reservation"},   /* mount option from ext2/3 */
1816         {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
1817         {Opt_removed, "journal=%u"},    /* mount option from ext2/3 */
1818         {Opt_err, NULL},
1819 };
1820
1821 static ext4_fsblk_t get_sb_block(void **data)
1822 {
1823         ext4_fsblk_t    sb_block;
1824         char            *options = (char *) *data;
1825
1826         if (!options || strncmp(options, "sb=", 3) != 0)
1827                 return 1;       /* Default location */
1828
1829         options += 3;
1830         /* TODO: use simple_strtoll with >32bit ext4 */
1831         sb_block = simple_strtoul(options, &options, 0);
1832         if (*options && *options != ',') {
1833                 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1834                        (char *) *data);
1835                 return 1;
1836         }
1837         if (*options == ',')
1838                 options++;
1839         *data = (void *) options;
1840
1841         return sb_block;
1842 }
1843
1844 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1845 static const char deprecated_msg[] =
1846         "Mount option \"%s\" will be removed by %s\n"
1847         "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1848
1849 #ifdef CONFIG_QUOTA
1850 static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1851 {
1852         struct ext4_sb_info *sbi = EXT4_SB(sb);
1853         char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1854         int ret = -1;
1855
1856         if (sb_any_quota_loaded(sb) && !old_qname) {
1857                 ext4_msg(sb, KERN_ERR,
1858                         "Cannot change journaled "
1859                         "quota options when quota turned on");
1860                 return -1;
1861         }
1862         if (ext4_has_feature_quota(sb)) {
1863                 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1864                          "ignored when QUOTA feature is enabled");
1865                 return 1;
1866         }
1867         qname = match_strdup(args);
1868         if (!qname) {
1869                 ext4_msg(sb, KERN_ERR,
1870                         "Not enough memory for storing quotafile name");
1871                 return -1;
1872         }
1873         if (old_qname) {
1874                 if (strcmp(old_qname, qname) == 0)
1875                         ret = 1;
1876                 else
1877                         ext4_msg(sb, KERN_ERR,
1878                                  "%s quota file already specified",
1879                                  QTYPE2NAME(qtype));
1880                 goto errout;
1881         }
1882         if (strchr(qname, '/')) {
1883                 ext4_msg(sb, KERN_ERR,
1884                         "quotafile must be on filesystem root");
1885                 goto errout;
1886         }
1887         rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1888         set_opt(sb, QUOTA);
1889         return 1;
1890 errout:
1891         kfree(qname);
1892         return ret;
1893 }
1894
1895 static int clear_qf_name(struct super_block *sb, int qtype)
1896 {
1897
1898         struct ext4_sb_info *sbi = EXT4_SB(sb);
1899         char *old_qname = get_qf_name(sb, sbi, qtype);
1900
1901         if (sb_any_quota_loaded(sb) && old_qname) {
1902                 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1903                         " when quota turned on");
1904                 return -1;
1905         }
1906         rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
1907         synchronize_rcu();
1908         kfree(old_qname);
1909         return 1;
1910 }
1911 #endif
1912
1913 #define MOPT_SET        0x0001
1914 #define MOPT_CLEAR      0x0002
1915 #define MOPT_NOSUPPORT  0x0004
1916 #define MOPT_EXPLICIT   0x0008
1917 #define MOPT_CLEAR_ERR  0x0010
1918 #define MOPT_GTE0       0x0020
1919 #ifdef CONFIG_QUOTA
1920 #define MOPT_Q          0
1921 #define MOPT_QFMT       0x0040
1922 #else
1923 #define MOPT_Q          MOPT_NOSUPPORT
1924 #define MOPT_QFMT       MOPT_NOSUPPORT
1925 #endif
1926 #define MOPT_DATAJ      0x0080
1927 #define MOPT_NO_EXT2    0x0100
1928 #define MOPT_NO_EXT3    0x0200
1929 #define MOPT_EXT4_ONLY  (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1930 #define MOPT_STRING     0x0400
1931 #define MOPT_SKIP       0x0800
1932 #define MOPT_2          0x1000
1933
1934 static const struct mount_opts {
1935         int     token;
1936         int     mount_opt;
1937         int     flags;
1938 } ext4_mount_opts[] = {
1939         {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1940         {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1941         {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1942         {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1943         {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1944         {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1945         {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1946          MOPT_EXT4_ONLY | MOPT_SET},
1947         {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1948          MOPT_EXT4_ONLY | MOPT_CLEAR},
1949         {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1950         {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1951         {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1952          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1953         {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1954          MOPT_EXT4_ONLY | MOPT_CLEAR},
1955         {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1956         {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1957         {Opt_commit, 0, MOPT_NO_EXT2},
1958         {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1959          MOPT_EXT4_ONLY | MOPT_CLEAR},
1960         {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1961          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1962         {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1963                                     EXT4_MOUNT_JOURNAL_CHECKSUM),
1964          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1965         {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1966         {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
1967         {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
1968         {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1969         {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1970          MOPT_NO_EXT2},
1971         {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1972          MOPT_NO_EXT2},
1973         {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1974         {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1975         {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1976         {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1977         {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1978         {Opt_commit, 0, MOPT_GTE0},
1979         {Opt_max_batch_time, 0, MOPT_GTE0},
1980         {Opt_min_batch_time, 0, MOPT_GTE0},
1981         {Opt_inode_readahead_blks, 0, MOPT_GTE0},
1982         {Opt_init_itable, 0, MOPT_GTE0},
1983         {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
1984         {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
1985                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1986         {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
1987                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1988         {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
1989                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1990         {Opt_stripe, 0, MOPT_GTE0},
1991         {Opt_resuid, 0, MOPT_GTE0},
1992         {Opt_resgid, 0, MOPT_GTE0},
1993         {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1994         {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
1995         {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1996         {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1997         {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1998         {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
1999          MOPT_NO_EXT2 | MOPT_DATAJ},
2000         {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
2001         {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
2002 #ifdef CONFIG_EXT4_FS_POSIX_ACL
2003         {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
2004         {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
2005 #else
2006         {Opt_acl, 0, MOPT_NOSUPPORT},
2007         {Opt_noacl, 0, MOPT_NOSUPPORT},
2008 #endif
2009         {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
2010         {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
2011         {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
2012         {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
2013         {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
2014                                                         MOPT_SET | MOPT_Q},
2015         {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
2016                                                         MOPT_SET | MOPT_Q},
2017         {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
2018                                                         MOPT_SET | MOPT_Q},
2019         {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2020                        EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
2021                                                         MOPT_CLEAR | MOPT_Q},
2022         {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
2023         {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
2024         {Opt_offusrjquota, 0, MOPT_Q},
2025         {Opt_offgrpjquota, 0, MOPT_Q},
2026         {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
2027         {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
2028         {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
2029         {Opt_max_dir_size_kb, 0, MOPT_GTE0},
2030         {Opt_test_dummy_encryption, 0, MOPT_STRING},
2031         {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
2032         {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
2033          MOPT_SET},
2034 #ifdef CONFIG_EXT4_DEBUG
2035         {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
2036          MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
2037         {Opt_fc_debug_max_replay, 0, MOPT_GTE0},
2038 #endif
2039         {Opt_err, 0, 0}
2040 };
2041
2042 #ifdef CONFIG_UNICODE
2043 static const struct ext4_sb_encodings {
2044         __u16 magic;
2045         char *name;
2046         char *version;
2047 } ext4_sb_encoding_map[] = {
2048         {EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
2049 };
2050
2051 static int ext4_sb_read_encoding(const struct ext4_super_block *es,
2052                                  const struct ext4_sb_encodings **encoding,
2053                                  __u16 *flags)
2054 {
2055         __u16 magic = le16_to_cpu(es->s_encoding);
2056         int i;
2057
2058         for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
2059                 if (magic == ext4_sb_encoding_map[i].magic)
2060                         break;
2061
2062         if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
2063                 return -EINVAL;
2064
2065         *encoding = &ext4_sb_encoding_map[i];
2066         *flags = le16_to_cpu(es->s_encoding_flags);
2067
2068         return 0;
2069 }
2070 #endif
2071
2072 static int ext4_set_test_dummy_encryption(struct super_block *sb,
2073                                           const char *opt,
2074                                           const substring_t *arg,
2075                                           bool is_remount)
2076 {
2077 #ifdef CONFIG_FS_ENCRYPTION
2078         struct ext4_sb_info *sbi = EXT4_SB(sb);
2079         int err;
2080
2081         if (!ext4_has_feature_encrypt(sb)) {
2082                 ext4_msg(sb, KERN_WARNING,
2083                          "test_dummy_encryption requires encrypt feature");
2084                 return -1;
2085         }
2086
2087         /*
2088          * This mount option is just for testing, and it's not worthwhile to
2089          * implement the extra complexity (e.g. RCU protection) that would be
2090          * needed to allow it to be set or changed during remount.  We do allow
2091          * it to be specified during remount, but only if there is no change.
2092          */
2093         if (is_remount && !sbi->s_dummy_enc_policy.policy) {
2094                 ext4_msg(sb, KERN_WARNING,
2095                          "Can't set test_dummy_encryption on remount");
2096                 return -1;
2097         }
2098         err = fscrypt_set_test_dummy_encryption(sb, arg->from,
2099                                                 &sbi->s_dummy_enc_policy);
2100         if (err) {
2101                 if (err == -EEXIST)
2102                         ext4_msg(sb, KERN_WARNING,
2103                                  "Can't change test_dummy_encryption on remount");
2104                 else if (err == -EINVAL)
2105                         ext4_msg(sb, KERN_WARNING,
2106                                  "Value of option \"%s\" is unrecognized", opt);
2107                 else
2108                         ext4_msg(sb, KERN_WARNING,
2109                                  "Error processing option \"%s\" [%d]",
2110                                  opt, err);
2111                 return -1;
2112         }
2113         ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
2114         return 1;
2115 #else
2116         ext4_msg(sb, KERN_WARNING,
2117                  "test_dummy_encryption option not supported");
2118         return -1;
2119
2120 #endif
2121 }
2122
2123 static int handle_mount_opt(struct super_block *sb, char *opt, int token,
2124                             substring_t *args, unsigned long *journal_devnum,
2125                             unsigned int *journal_ioprio, int is_remount)
2126 {
2127         struct ext4_sb_info *sbi = EXT4_SB(sb);
2128         const struct mount_opts *m;
2129         kuid_t uid;
2130         kgid_t gid;
2131         int arg = 0;
2132
2133 #ifdef CONFIG_QUOTA
2134         if (token == Opt_usrjquota)
2135                 return set_qf_name(sb, USRQUOTA, &args[0]);
2136         else if (token == Opt_grpjquota)
2137                 return set_qf_name(sb, GRPQUOTA, &args[0]);
2138         else if (token == Opt_offusrjquota)
2139                 return clear_qf_name(sb, USRQUOTA);
2140         else if (token == Opt_offgrpjquota)
2141                 return clear_qf_name(sb, GRPQUOTA);
2142 #endif
2143         switch (token) {
2144         case Opt_noacl:
2145         case Opt_nouser_xattr:
2146                 ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
2147                 break;
2148         case Opt_sb:
2149                 return 1;       /* handled by get_sb_block() */
2150         case Opt_removed:
2151                 ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
2152                 return 1;
2153         case Opt_abort:
2154                 ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
2155                 return 1;
2156         case Opt_i_version:
2157                 sb->s_flags |= SB_I_VERSION;
2158                 return 1;
2159         case Opt_lazytime:
2160                 sb->s_flags |= SB_LAZYTIME;
2161                 return 1;
2162         case Opt_nolazytime:
2163                 sb->s_flags &= ~SB_LAZYTIME;
2164                 return 1;
2165         case Opt_inlinecrypt:
2166 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2167                 sb->s_flags |= SB_INLINECRYPT;
2168 #else
2169                 ext4_msg(sb, KERN_ERR, "inline encryption not supported");
2170 #endif
2171                 return 1;
2172         }
2173
2174         for (m = ext4_mount_opts; m->token != Opt_err; m++)
2175                 if (token == m->token)
2176                         break;
2177
2178         if (m->token == Opt_err) {
2179                 ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
2180                          "or missing value", opt);
2181                 return -1;
2182         }
2183
2184         if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
2185                 ext4_msg(sb, KERN_ERR,
2186                          "Mount option \"%s\" incompatible with ext2", opt);
2187                 return -1;
2188         }
2189         if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
2190                 ext4_msg(sb, KERN_ERR,
2191                          "Mount option \"%s\" incompatible with ext3", opt);
2192                 return -1;
2193         }
2194
2195         if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
2196                 return -1;
2197         if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
2198                 return -1;
2199         if (m->flags & MOPT_EXPLICIT) {
2200                 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
2201                         set_opt2(sb, EXPLICIT_DELALLOC);
2202                 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
2203                         set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
2204                 } else
2205                         return -1;
2206         }
2207         if (m->flags & MOPT_CLEAR_ERR)
2208                 clear_opt(sb, ERRORS_MASK);
2209         if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
2210                 ext4_msg(sb, KERN_ERR, "Cannot change quota "
2211                          "options when quota turned on");
2212                 return -1;
2213         }
2214
2215         if (m->flags & MOPT_NOSUPPORT) {
2216                 ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
2217         } else if (token == Opt_commit) {
2218                 if (arg == 0)
2219                         arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
2220                 else if (arg > INT_MAX / HZ) {
2221                         ext4_msg(sb, KERN_ERR,
2222                                  "Invalid commit interval %d, "
2223                                  "must be smaller than %d",
2224                                  arg, INT_MAX / HZ);
2225                         return -1;
2226                 }
2227                 sbi->s_commit_interval = HZ * arg;
2228         } else if (token == Opt_debug_want_extra_isize) {
2229                 if ((arg & 1) ||
2230                     (arg < 4) ||
2231                     (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
2232                         ext4_msg(sb, KERN_ERR,
2233                                  "Invalid want_extra_isize %d", arg);
2234                         return -1;
2235                 }
2236                 sbi->s_want_extra_isize = arg;
2237         } else if (token == Opt_max_batch_time) {
2238                 sbi->s_max_batch_time = arg;
2239         } else if (token == Opt_min_batch_time) {
2240                 sbi->s_min_batch_time = arg;
2241         } else if (token == Opt_inode_readahead_blks) {
2242                 if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
2243                         ext4_msg(sb, KERN_ERR,
2244                                  "EXT4-fs: inode_readahead_blks must be "
2245                                  "0 or a power of 2 smaller than 2^31");
2246                         return -1;
2247                 }
2248                 sbi->s_inode_readahead_blks = arg;
2249         } else if (token == Opt_init_itable) {
2250                 set_opt(sb, INIT_INODE_TABLE);
2251                 if (!args->from)
2252                         arg = EXT4_DEF_LI_WAIT_MULT;
2253                 sbi->s_li_wait_mult = arg;
2254         } else if (token == Opt_max_dir_size_kb) {
2255                 sbi->s_max_dir_size_kb = arg;
2256 #ifdef CONFIG_EXT4_DEBUG
2257         } else if (token == Opt_fc_debug_max_replay) {
2258                 sbi->s_fc_debug_max_replay = arg;
2259 #endif
2260         } else if (token == Opt_stripe) {
2261                 sbi->s_stripe = arg;
2262         } else if (token == Opt_resuid) {
2263                 uid = make_kuid(current_user_ns(), arg);
2264                 if (!uid_valid(uid)) {
2265                         ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
2266                         return -1;
2267                 }
2268                 sbi->s_resuid = uid;
2269         } else if (token == Opt_resgid) {
2270                 gid = make_kgid(current_user_ns(), arg);
2271                 if (!gid_valid(gid)) {
2272                         ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
2273                         return -1;
2274                 }
2275                 sbi->s_resgid = gid;
2276         } else if (token == Opt_journal_dev) {
2277                 if (is_remount) {
2278                         ext4_msg(sb, KERN_ERR,
2279                                  "Cannot specify journal on remount");
2280                         return -1;
2281                 }
2282                 *journal_devnum = arg;
2283         } else if (token == Opt_journal_path) {
2284                 char *journal_path;
2285                 struct inode *journal_inode;
2286                 struct path path;
2287                 int error;
2288
2289                 if (is_remount) {
2290                         ext4_msg(sb, KERN_ERR,
2291                                  "Cannot specify journal on remount");
2292                         return -1;
2293                 }
2294                 journal_path = match_strdup(&args[0]);
2295                 if (!journal_path) {
2296                         ext4_msg(sb, KERN_ERR, "error: could not dup "
2297                                 "journal device string");
2298                         return -1;
2299                 }
2300
2301                 error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
2302                 if (error) {
2303                         ext4_msg(sb, KERN_ERR, "error: could not find "
2304                                 "journal device path: error %d", error);
2305                         kfree(journal_path);
2306                         return -1;
2307                 }
2308
2309                 journal_inode = d_inode(path.dentry);
2310                 if (!S_ISBLK(journal_inode->i_mode)) {
2311                         ext4_msg(sb, KERN_ERR, "error: journal path %s "
2312                                 "is not a block device", journal_path);
2313                         path_put(&path);
2314                         kfree(journal_path);
2315                         return -1;
2316                 }
2317
2318                 *journal_devnum = new_encode_dev(journal_inode->i_rdev);
2319                 path_put(&path);
2320                 kfree(journal_path);
2321         } else if (token == Opt_journal_ioprio) {
2322                 if (arg > 7) {
2323                         ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
2324                                  " (must be 0-7)");
2325                         return -1;
2326                 }
2327                 *journal_ioprio =
2328                         IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
2329         } else if (token == Opt_test_dummy_encryption) {
2330                 return ext4_set_test_dummy_encryption(sb, opt, &args[0],
2331                                                       is_remount);
2332         } else if (m->flags & MOPT_DATAJ) {
2333                 if (is_remount) {
2334                         if (!sbi->s_journal)
2335                                 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
2336                         else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
2337                                 ext4_msg(sb, KERN_ERR,
2338                                          "Cannot change data mode on remount");
2339                                 return -1;
2340                         }
2341                 } else {
2342                         clear_opt(sb, DATA_FLAGS);
2343                         sbi->s_mount_opt |= m->mount_opt;
2344                 }
2345 #ifdef CONFIG_QUOTA
2346         } else if (m->flags & MOPT_QFMT) {
2347                 if (sb_any_quota_loaded(sb) &&
2348                     sbi->s_jquota_fmt != m->mount_opt) {
2349                         ext4_msg(sb, KERN_ERR, "Cannot change journaled "
2350                                  "quota options when quota turned on");
2351                         return -1;
2352                 }
2353                 if (ext4_has_feature_quota(sb)) {
2354                         ext4_msg(sb, KERN_INFO,
2355                                  "Quota format mount options ignored "
2356                                  "when QUOTA feature is enabled");
2357                         return 1;
2358                 }
2359                 sbi->s_jquota_fmt = m->mount_opt;
2360 #endif
2361         } else if (token == Opt_dax || token == Opt_dax_always ||
2362                    token == Opt_dax_inode || token == Opt_dax_never) {
2363 #ifdef CONFIG_FS_DAX
2364                 switch (token) {
2365                 case Opt_dax:
2366                 case Opt_dax_always:
2367                         if (is_remount &&
2368                             (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2369                              (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
2370                         fail_dax_change_remount:
2371                                 ext4_msg(sb, KERN_ERR, "can't change "
2372                                          "dax mount option while remounting");
2373                                 return -1;
2374                         }
2375                         if (is_remount &&
2376                             (test_opt(sb, DATA_FLAGS) ==
2377                              EXT4_MOUNT_JOURNAL_DATA)) {
2378                                     ext4_msg(sb, KERN_ERR, "can't mount with "
2379                                              "both data=journal and dax");
2380                                     return -1;
2381                         }
2382                         ext4_msg(sb, KERN_WARNING,
2383                                 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
2384                         sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
2385                         sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
2386                         break;
2387                 case Opt_dax_never:
2388                         if (is_remount &&
2389                             (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2390                              (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
2391                                 goto fail_dax_change_remount;
2392                         sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
2393                         sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2394                         break;
2395                 case Opt_dax_inode:
2396                         if (is_remount &&
2397                             ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2398                              (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2399                              !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
2400                                 goto fail_dax_change_remount;
2401                         sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2402                         sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
2403                         /* Strictly for printing options */
2404                         sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
2405                         break;
2406                 }
2407 #else
2408                 ext4_msg(sb, KERN_INFO, "dax option not supported");
2409                 sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
2410                 sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2411                 return -1;
2412 #endif
2413         } else if (token == Opt_data_err_abort) {
2414                 sbi->s_mount_opt |= m->mount_opt;
2415         } else if (token == Opt_data_err_ignore) {
2416                 sbi->s_mount_opt &= ~m->mount_opt;
2417         } else {
2418                 if (!args->from)
2419                         arg = 1;
2420                 if (m->flags & MOPT_CLEAR)
2421                         arg = !arg;
2422                 else if (unlikely(!(m->flags & MOPT_SET))) {
2423                         ext4_msg(sb, KERN_WARNING,
2424                                  "buggy handling of option %s", opt);
2425                         WARN_ON(1);
2426                         return -1;
2427                 }
2428                 if (m->flags & MOPT_2) {
2429                         if (arg != 0)
2430                                 sbi->s_mount_opt2 |= m->mount_opt;
2431                         else
2432                                 sbi->s_mount_opt2 &= ~m->mount_opt;
2433                 } else {
2434                         if (arg != 0)
2435                                 sbi->s_mount_opt |= m->mount_opt;
2436                         else
2437                                 sbi->s_mount_opt &= ~m->mount_opt;
2438                 }
2439         }
2440         return 1;
2441 }
2442
2443 static int parse_options(char *options, struct super_block *sb,
2444                          unsigned long *journal_devnum,
2445                          unsigned int *journal_ioprio,
2446                          int is_remount)
2447 {
2448         struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
2449         char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2450         substring_t args[MAX_OPT_ARGS];
2451         int token;
2452
2453         if (!options)
2454                 return 1;
2455
2456         while ((p = strsep(&options, ",")) != NULL) {
2457                 if (!*p)
2458                         continue;
2459                 /*
2460                  * Initialize args struct so we know whether arg was
2461                  * found; some options take optional arguments.
2462                  */
2463                 args[0].to = args[0].from = NULL;
2464                 token = match_token(p, tokens, args);
2465                 if (handle_mount_opt(sb, p, token, args, journal_devnum,
2466                                      journal_ioprio, is_remount) < 0)
2467                         return 0;
2468         }
2469 #ifdef CONFIG_QUOTA
2470         /*
2471          * We do the test below only for project quotas. 'usrquota' and
2472          * 'grpquota' mount options are allowed even without quota feature
2473          * to support legacy quotas in quota files.
2474          */
2475         if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
2476                 ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
2477                          "Cannot enable project quota enforcement.");
2478                 return 0;
2479         }
2480         usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
2481         grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
2482         if (usr_qf_name || grp_qf_name) {
2483                 if (test_opt(sb, USRQUOTA) && usr_qf_name)
2484                         clear_opt(sb, USRQUOTA);
2485
2486                 if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2487                         clear_opt(sb, GRPQUOTA);
2488
2489                 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2490                         ext4_msg(sb, KERN_ERR, "old and new quota "
2491                                         "format mixing");
2492                         return 0;
2493                 }
2494
2495                 if (!sbi->s_jquota_fmt) {
2496                         ext4_msg(sb, KERN_ERR, "journaled quota format "
2497                                         "not specified");
2498                         return 0;
2499                 }
2500         }
2501 #endif
2502         if (test_opt(sb, DIOREAD_NOLOCK)) {
2503                 int blocksize =
2504                         BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
2505                 if (blocksize < PAGE_SIZE)
2506                         ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
2507                                  "experimental mount option 'dioread_nolock' "
2508                                  "for blocksize < PAGE_SIZE");
2509         }
2510         return 1;
2511 }
2512
2513 static inline void ext4_show_quota_options(struct seq_file *seq,
2514                                            struct super_block *sb)
2515 {
2516 #if defined(CONFIG_QUOTA)
2517         struct ext4_sb_info *sbi = EXT4_SB(sb);
2518         char *usr_qf_name, *grp_qf_name;
2519
2520         if (sbi->s_jquota_fmt) {
2521                 char *fmtname = "";
2522
2523                 switch (sbi->s_jquota_fmt) {
2524                 case QFMT_VFS_OLD:
2525                         fmtname = "vfsold";
2526                         break;
2527                 case QFMT_VFS_V0:
2528                         fmtname = "vfsv0";
2529                         break;
2530                 case QFMT_VFS_V1:
2531                         fmtname = "vfsv1";
2532                         break;
2533                 }
2534                 seq_printf(seq, ",jqfmt=%s", fmtname);
2535         }
2536
2537         rcu_read_lock();
2538         usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2539         grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2540         if (usr_qf_name)
2541                 seq_show_option(seq, "usrjquota", usr_qf_name);
2542         if (grp_qf_name)
2543                 seq_show_option(seq, "grpjquota", grp_qf_name);
2544         rcu_read_unlock();
2545 #endif
2546 }
2547
2548 static const char *token2str(int token)
2549 {
2550         const struct match_token *t;
2551
2552         for (t = tokens; t->token != Opt_err; t++)
2553                 if (t->token == token && !strchr(t->pattern, '='))
2554                         break;
2555         return t->pattern;
2556 }
2557
2558 /*
2559  * Show an option if
2560  *  - it's set to a non-default value OR
2561  *  - if the per-sb default is different from the global default
2562  */
2563 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2564                               int nodefs)
2565 {
2566         struct ext4_sb_info *sbi = EXT4_SB(sb);
2567         struct ext4_super_block *es = sbi->s_es;
2568         int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2569         const struct mount_opts *m;
2570         char sep = nodefs ? '\n' : ',';
2571
2572 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2573 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2574
2575         if (sbi->s_sb_block != 1)
2576                 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2577
2578         for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2579                 int want_set = m->flags & MOPT_SET;
2580                 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2581                     (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
2582                         continue;
2583                 if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2584                         continue; /* skip if same as the default */
2585                 if ((want_set &&
2586                      (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
2587                     (!want_set && (sbi->s_mount_opt & m->mount_opt)))
2588                         continue; /* select Opt_noFoo vs Opt_Foo */
2589                 SEQ_OPTS_PRINT("%s", token2str(m->token));
2590         }
2591
2592         if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2593             le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2594                 SEQ_OPTS_PRINT("resuid=%u",
2595                                 from_kuid_munged(&init_user_ns, sbi->s_resuid));
2596         if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2597             le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2598                 SEQ_OPTS_PRINT("resgid=%u",
2599                                 from_kgid_munged(&init_user_ns, sbi->s_resgid));
2600         def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2601         if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2602                 SEQ_OPTS_PUTS("errors=remount-ro");
2603         if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2604                 SEQ_OPTS_PUTS("errors=continue");
2605         if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2606                 SEQ_OPTS_PUTS("errors=panic");
2607         if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2608                 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2609         if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2610                 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2611         if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2612                 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2613         if (sb->s_flags & SB_I_VERSION)
2614                 SEQ_OPTS_PUTS("i_version");
2615         if (nodefs || sbi->s_stripe)
2616                 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2617         if (nodefs || EXT4_MOUNT_DATA_FLAGS &
2618                         (sbi->s_mount_opt ^ def_mount_opt)) {
2619                 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2620                         SEQ_OPTS_PUTS("data=journal");
2621                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2622                         SEQ_OPTS_PUTS("data=ordered");
2623                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
2624                         SEQ_OPTS_PUTS("data=writeback");
2625         }
2626         if (nodefs ||
2627             sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2628                 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
2629                                sbi->s_inode_readahead_blks);
2630
2631         if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2632                        (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2633                 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2634         if (nodefs || sbi->s_max_dir_size_kb)
2635                 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2636         if (test_opt(sb, DATA_ERR_ABORT))
2637                 SEQ_OPTS_PUTS("data_err=abort");
2638
2639         fscrypt_show_test_dummy_encryption(seq, sep, sb);
2640
2641         if (sb->s_flags & SB_INLINECRYPT)
2642                 SEQ_OPTS_PUTS("inlinecrypt");
2643
2644         if (test_opt(sb, DAX_ALWAYS)) {
2645                 if (IS_EXT2_SB(sb))
2646                         SEQ_OPTS_PUTS("dax");
2647                 else
2648                         SEQ_OPTS_PUTS("dax=always");
2649         } else if (test_opt2(sb, DAX_NEVER)) {
2650                 SEQ_OPTS_PUTS("dax=never");
2651         } else if (test_opt2(sb, DAX_INODE)) {
2652                 SEQ_OPTS_PUTS("dax=inode");
2653         }
2654         ext4_show_quota_options(seq, sb);
2655         return 0;
2656 }
2657
2658 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
2659 {
2660         return _ext4_show_options(seq, root->d_sb, 0);
2661 }
2662
2663 int ext4_seq_options_show(struct seq_file *seq, void *offset)
2664 {
2665         struct super_block *sb = seq->private;
2666         int rc;
2667
2668         seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2669         rc = _ext4_show_options(seq, sb, 1);
2670         seq_puts(seq, "\n");
2671         return rc;
2672 }
2673
2674 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2675                             int read_only)
2676 {
2677         struct ext4_sb_info *sbi = EXT4_SB(sb);
2678         int err = 0;
2679
2680         if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2681                 ext4_msg(sb, KERN_ERR, "revision level too high, "
2682                          "forcing read-only mode");
2683                 err = -EROFS;
2684                 goto done;
2685         }
2686         if (read_only)
2687                 goto done;
2688         if (!(sbi->s_mount_state & EXT4_VALID_FS))
2689                 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
2690                          "running e2fsck is recommended");
2691         else if (sbi->s_mount_state & EXT4_ERROR_FS)
2692                 ext4_msg(sb, KERN_WARNING,
2693                          "warning: mounting fs with errors, "
2694                          "running e2fsck is recommended");
2695         else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2696                  le16_to_cpu(es->s_mnt_count) >=
2697                  (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2698                 ext4_msg(sb, KERN_WARNING,
2699                          "warning: maximal mount count reached, "
2700                          "running e2fsck is recommended");
2701         else if (le32_to_cpu(es->s_checkinterval) &&
2702                  (ext4_get_tstamp(es, s_lastcheck) +
2703                   le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2704                 ext4_msg(sb, KERN_WARNING,
2705                          "warning: checktime reached, "
2706                          "running e2fsck is recommended");
2707         if (!sbi->s_journal)
2708                 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2709         if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2710                 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2711         le16_add_cpu(&es->s_mnt_count, 1);
2712         ext4_update_tstamp(es, s_mtime);
2713         if (sbi->s_journal)
2714                 ext4_set_feature_journal_needs_recovery(sb);
2715
2716         err = ext4_commit_super(sb, 1);
2717 done:
2718         if (test_opt(sb, DEBUG))
2719                 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2720                                 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2721                         sb->s_blocksize,
2722                         sbi->s_groups_count,
2723                         EXT4_BLOCKS_PER_GROUP(sb),
2724                         EXT4_INODES_PER_GROUP(sb),
2725                         sbi->s_mount_opt, sbi->s_mount_opt2);
2726
2727         cleancache_init_fs(sb);
2728         return err;
2729 }
2730
2731 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2732 {
2733         struct ext4_sb_info *sbi = EXT4_SB(sb);
2734         struct flex_groups **old_groups, **new_groups;
2735         int size, i, j;
2736
2737         if (!sbi->s_log_groups_per_flex)
2738                 return 0;
2739
2740         size = ext4_flex_group(sbi, ngroup - 1) + 1;
2741         if (size <= sbi->s_flex_groups_allocated)
2742                 return 0;
2743
2744         new_groups = kvzalloc(roundup_pow_of_two(size *
2745                               sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
2746         if (!new_groups) {
2747                 ext4_msg(sb, KERN_ERR,
2748                          "not enough memory for %d flex group pointers", size);
2749                 return -ENOMEM;
2750         }
2751         for (i = sbi->s_flex_groups_allocated; i < size; i++) {
2752                 new_groups[i] = kvzalloc(roundup_pow_of_two(
2753                                          sizeof(struct flex_groups)),
2754                                          GFP_KERNEL);
2755                 if (!new_groups[i]) {
2756                         for (j = sbi->s_flex_groups_allocated; j < i; j++)
2757                                 kvfree(new_groups[j]);
2758                         kvfree(new_groups);
2759                         ext4_msg(sb, KERN_ERR,
2760                                  "not enough memory for %d flex groups", size);
2761                         return -ENOMEM;
2762                 }
2763         }
2764         rcu_read_lock();
2765         old_groups = rcu_dereference(sbi->s_flex_groups);
2766         if (old_groups)
2767                 memcpy(new_groups, old_groups,
2768                        (sbi->s_flex_groups_allocated *
2769                         sizeof(struct flex_groups *)));
2770         rcu_read_unlock();
2771         rcu_assign_pointer(sbi->s_flex_groups, new_groups);
2772         sbi->s_flex_groups_allocated = size;
2773         if (old_groups)
2774                 ext4_kvfree_array_rcu(old_groups);
2775         return 0;
2776 }
2777
2778 static int ext4_fill_flex_info(struct super_block *sb)
2779 {
2780         struct ext4_sb_info *sbi = EXT4_SB(sb);
2781         struct ext4_group_desc *gdp = NULL;
2782         struct flex_groups *fg;
2783         ext4_group_t flex_group;
2784         int i, err;
2785
2786         sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2787         if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2788                 sbi->s_log_groups_per_flex = 0;
2789                 return 1;
2790         }
2791
2792         err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
2793         if (err)
2794                 goto failed;
2795
2796         for (i = 0; i < sbi->s_groups_count; i++) {
2797                 gdp = ext4_get_group_desc(sb, i, NULL);
2798
2799                 flex_group = ext4_flex_group(sbi, i);
2800                 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
2801                 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
2802                 atomic64_add(ext4_free_group_clusters(sb, gdp),
2803                              &fg->free_clusters);
2804                 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
2805         }
2806
2807         return 1;
2808 failed:
2809         return 0;
2810 }
2811
2812 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2813                                    struct ext4_group_desc *gdp)
2814 {
2815         int offset = offsetof(struct ext4_group_desc, bg_checksum);
2816         __u16 crc = 0;
2817         __le32 le_group = cpu_to_le32(block_group);
2818         struct ext4_sb_info *sbi = EXT4_SB(sb);
2819
2820         if (ext4_has_metadata_csum(sbi->s_sb)) {
2821                 /* Use new metadata_csum algorithm */
2822                 __u32 csum32;
2823                 __u16 dummy_csum = 0;
2824
2825                 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
2826                                      sizeof(le_group));
2827                 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
2828                 csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
2829                                      sizeof(dummy_csum));
2830                 offset += sizeof(dummy_csum);
2831                 if (offset < sbi->s_desc_size)
2832                         csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
2833                                              sbi->s_desc_size - offset);
2834
2835                 crc = csum32 & 0xFFFF;
2836                 goto out;
2837         }
2838
2839         /* old crc16 code */
2840         if (!ext4_has_feature_gdt_csum(sb))
2841                 return 0;
2842
2843         crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
2844         crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2845         crc = crc16(crc, (__u8 *)gdp, offset);
2846         offset += sizeof(gdp->bg_checksum); /* skip checksum */
2847         /* for checksum of struct ext4_group_desc do the rest...*/
2848         if (ext4_has_feature_64bit(sb) &&
2849             offset < le16_to_cpu(sbi->s_es->s_desc_size))
2850                 crc = crc16(crc, (__u8 *)gdp + offset,
2851                             le16_to_cpu(sbi->s_es->s_desc_size) -
2852                                 offset);
2853
2854 out:
2855         return cpu_to_le16(crc);
2856 }
2857
2858 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2859                                 struct ext4_group_desc *gdp)
2860 {
2861         if (ext4_has_group_desc_csum(sb) &&
2862             (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2863                 return 0;
2864
2865         return 1;
2866 }
2867
2868 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2869                               struct ext4_group_desc *gdp)
2870 {
2871         if (!ext4_has_group_desc_csum(sb))
2872                 return;
2873         gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2874 }
2875
2876 /* Called at mount-time, super-block is locked */
2877 static int ext4_check_descriptors(struct super_block *sb,
2878                                   ext4_fsblk_t sb_block,
2879                                   ext4_group_t *first_not_zeroed)
2880 {
2881         struct ext4_sb_info *sbi = EXT4_SB(sb);
2882         ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2883         ext4_fsblk_t last_block;
2884         ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2885         ext4_fsblk_t block_bitmap;
2886         ext4_fsblk_t inode_bitmap;
2887         ext4_fsblk_t inode_table;
2888         int flexbg_flag = 0;
2889         ext4_group_t i, grp = sbi->s_groups_count;
2890
2891         if (ext4_has_feature_flex_bg(sb))
2892                 flexbg_flag = 1;
2893
2894         ext4_debug("Checking group descriptors");
2895
2896         for (i = 0; i < sbi->s_groups_count; i++) {
2897                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
2898
2899                 if (i == sbi->s_groups_count - 1 || flexbg_flag)
2900                         last_block = ext4_blocks_count(sbi->s_es) - 1;
2901                 else
2902                         last_block = first_block +
2903                                 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
2904
2905                 if ((grp == sbi->s_groups_count) &&
2906                    !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2907                         grp = i;
2908
2909                 block_bitmap = ext4_block_bitmap(sb, gdp);
2910                 if (block_bitmap == sb_block) {
2911                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2912                                  "Block bitmap for group %u overlaps "
2913                                  "superblock", i);
2914                         if (!sb_rdonly(sb))
2915                                 return 0;
2916                 }
2917                 if (block_bitmap >= sb_block + 1 &&
2918                     block_bitmap <= last_bg_block) {
2919                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2920                                  "Block bitmap for group %u overlaps "
2921                                  "block group descriptors", i);
2922                         if (!sb_rdonly(sb))
2923                                 return 0;
2924                 }
2925                 if (block_bitmap < first_block || block_bitmap > last_block) {
2926                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2927                                "Block bitmap for group %u not in group "
2928                                "(block %llu)!", i, block_bitmap);
2929                         return 0;
2930                 }
2931                 inode_bitmap = ext4_inode_bitmap(sb, gdp);
2932                 if (inode_bitmap == sb_block) {
2933                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2934                                  "Inode bitmap for group %u overlaps "
2935                                  "superblock", i);
2936                         if (!sb_rdonly(sb))
2937                                 return 0;
2938                 }
2939                 if (inode_bitmap >= sb_block + 1 &&
2940                     inode_bitmap <= last_bg_block) {
2941                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2942                                  "Inode bitmap for group %u overlaps "
2943                                  "block group descriptors", i);
2944                         if (!sb_rdonly(sb))
2945                                 return 0;
2946                 }
2947                 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2948                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2949                                "Inode bitmap for group %u not in group "
2950                                "(block %llu)!", i, inode_bitmap);
2951                         return 0;
2952                 }
2953                 inode_table = ext4_inode_table(sb, gdp);
2954                 if (inode_table == sb_block) {
2955                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2956                                  "Inode table for group %u overlaps "
2957                                  "superblock", i);
2958                         if (!sb_rdonly(sb))
2959                                 return 0;
2960                 }
2961                 if (inode_table >= sb_block + 1 &&
2962                     inode_table <= last_bg_block) {
2963                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2964                                  "Inode table for group %u overlaps "
2965                                  "block group descriptors", i);
2966                         if (!sb_rdonly(sb))
2967                                 return 0;
2968                 }
2969                 if (inode_table < first_block ||
2970                     inode_table + sbi->s_itb_per_group - 1 > last_block) {
2971                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2972                                "Inode table for group %u not in group "
2973                                "(block %llu)!", i, inode_table);
2974                         return 0;
2975                 }
2976                 ext4_lock_group(sb, i);
2977                 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2978                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2979                                  "Checksum for group %u failed (%u!=%u)",
2980                                  i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2981                                      gdp)), le16_to_cpu(gdp->bg_checksum));
2982                         if (!sb_rdonly(sb)) {
2983                                 ext4_unlock_group(sb, i);
2984                                 return 0;
2985                         }
2986                 }
2987                 ext4_unlock_group(sb, i);
2988                 if (!flexbg_flag)
2989                         first_block += EXT4_BLOCKS_PER_GROUP(sb);
2990         }
2991         if (NULL != first_not_zeroed)
2992                 *first_not_zeroed = grp;
2993         return 1;
2994 }
2995
2996 /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2997  * the superblock) which were deleted from all directories, but held open by
2998  * a process at the time of a crash.  We walk the list and try to delete these
2999  * inodes at recovery time (only with a read-write filesystem).
3000  *
3001  * In order to keep the orphan inode chain consistent during traversal (in
3002  * case of crash during recovery), we link each inode into the superblock
3003  * orphan list_head and handle it the same way as an inode deletion during
3004  * normal operation (which journals the operations for us).
3005  *
3006  * We only do an iget() and an iput() on each inode, which is very safe if we
3007  * accidentally point at an in-use or already deleted inode.  The worst that
3008  * can happen in this case is that we get a "bit already cleared" message from
3009  * ext4_free_inode().  The only reason we would point at a wrong inode is if
3010  * e2fsck was run on this filesystem, and it must have already done the orphan
3011  * inode cleanup for us, so we can safely abort without any further action.
3012  */
3013 static void ext4_orphan_cleanup(struct super_block *sb,
3014                                 struct ext4_super_block *es)
3015 {
3016         unsigned int s_flags = sb->s_flags;
3017         int ret, nr_orphans = 0, nr_truncates = 0;
3018 #ifdef CONFIG_QUOTA
3019         int quota_update = 0;
3020         int i;
3021 #endif
3022         if (!es->s_last_orphan) {
3023                 jbd_debug(4, "no orphan inodes to clean up\n");
3024                 return;
3025         }
3026
3027         if (bdev_read_only(sb->s_bdev)) {
3028                 ext4_msg(sb, KERN_ERR, "write access "
3029                         "unavailable, skipping orphan cleanup");
3030                 return;
3031         }
3032
3033         /* Check if feature set would not allow a r/w mount */
3034         if (!ext4_feature_set_ok(sb, 0)) {
3035                 ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
3036                          "unknown ROCOMPAT features");
3037                 return;
3038         }
3039
3040         if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
3041                 /* don't clear list on RO mount w/ errors */
3042                 if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
3043                         ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
3044                                   "clearing orphan list.\n");
3045                         es->s_last_orphan = 0;
3046                 }
3047                 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
3048                 return;
3049         }
3050
3051         if (s_flags & SB_RDONLY) {
3052                 ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
3053                 sb->s_flags &= ~SB_RDONLY;
3054         }
3055 #ifdef CONFIG_QUOTA
3056         /*
3057          * Turn on quotas which were not enabled for read-only mounts if
3058          * filesystem has quota feature, so that they are updated correctly.
3059          */
3060         if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
3061                 int ret = ext4_enable_quotas(sb);
3062
3063                 if (!ret)
3064                         quota_update = 1;
3065                 else
3066                         ext4_msg(sb, KERN_ERR,
3067                                 "Cannot turn on quotas: error %d", ret);
3068         }
3069
3070         /* Turn on journaled quotas used for old sytle */
3071         for (i = 0; i < EXT4_MAXQUOTAS; i++) {
3072                 if (EXT4_SB(sb)->s_qf_names[i]) {
3073                         int ret = ext4_quota_on_mount(sb, i);
3074
3075                         if (!ret)
3076                                 quota_update = 1;
3077                         else
3078                                 ext4_msg(sb, KERN_ERR,
3079                                         "Cannot turn on journaled "
3080                                         "quota: type %d: error %d", i, ret);
3081                 }
3082         }
3083 #endif
3084
3085         while (es->s_last_orphan) {
3086                 struct inode *inode;
3087
3088                 /*
3089                  * We may have encountered an error during cleanup; if
3090                  * so, skip the rest.
3091                  */
3092                 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
3093                         jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
3094                         es->s_last_orphan = 0;
3095                         break;
3096                 }
3097
3098                 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
3099                 if (IS_ERR(inode)) {
3100                         es->s_last_orphan = 0;
3101                         break;
3102                 }
3103
3104                 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
3105                 dquot_initialize(inode);
3106                 if (inode->i_nlink) {
3107                         if (test_opt(sb, DEBUG))
3108                                 ext4_msg(sb, KERN_DEBUG,
3109                                         "%s: truncating inode %lu to %lld bytes",
3110                                         __func__, inode->i_ino, inode->i_size);
3111                         jbd_debug(2, "truncating inode %lu to %lld bytes\n",
3112                                   inode->i_ino, inode->i_size);
3113                         inode_lock(inode);
3114                         truncate_inode_pages(inode->i_mapping, inode->i_size);
3115                         ret = ext4_truncate(inode);
3116                         if (ret) {
3117                                 /*
3118                                  * We need to clean up the in-core orphan list
3119                                  * manually if ext4_truncate() failed to get a
3120                                  * transaction handle.
3121                                  */
3122                                 ext4_orphan_del(NULL, inode);
3123                                 ext4_std_error(inode->i_sb, ret);
3124                         }
3125                         inode_unlock(inode);
3126                         nr_truncates++;
3127                 } else {
3128                         if (test_opt(sb, DEBUG))
3129                                 ext4_msg(sb, KERN_DEBUG,
3130                                         "%s: deleting unreferenced inode %lu",
3131                                         __func__, inode->i_ino);
3132                         jbd_debug(2, "deleting unreferenced inode %lu\n",
3133                                   inode->i_ino);
3134                         nr_orphans++;
3135                 }
3136                 iput(inode);  /* The delete magic happens here! */
3137         }
3138
3139 #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
3140
3141         if (nr_orphans)
3142                 ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
3143                        PLURAL(nr_orphans));
3144         if (nr_truncates)
3145                 ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
3146                        PLURAL(nr_truncates));
3147 #ifdef CONFIG_QUOTA
3148         /* Turn off quotas if they were enabled for orphan cleanup */
3149         if (quota_update) {
3150                 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
3151                         if (sb_dqopt(sb)->files[i])
3152                                 dquot_quota_off(sb, i);
3153                 }
3154         }
3155 #endif
3156         sb->s_flags = s_flags; /* Restore SB_RDONLY status */
3157 }
3158
3159 /*
3160  * Maximal extent format file size.
3161  * Resulting logical blkno at s_maxbytes must fit in our on-disk
3162  * extent format containers, within a sector_t, and within i_blocks
3163  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
3164  * so that won't be a limiting factor.
3165  *
3166  * However there is other limiting factor. We do store extents in the form
3167  * of starting block and length, hence the resulting length of the extent
3168  * covering maximum file size must fit into on-disk format containers as
3169  * well. Given that length is always by 1 unit bigger than max unit (because
3170  * we count 0 as well) we have to lower the s_maxbytes by one fs block.
3171  *
3172  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
3173  */
3174 static loff_t ext4_max_size(int blkbits, int has_huge_files)
3175 {
3176         loff_t res;
3177         loff_t upper_limit = MAX_LFS_FILESIZE;
3178
3179         BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
3180
3181         if (!has_huge_files) {
3182                 upper_limit = (1LL << 32) - 1;
3183
3184                 /* total blocks in file system block size */
3185                 upper_limit >>= (blkbits - 9);
3186                 upper_limit <<= blkbits;
3187         }
3188
3189         /*
3190          * 32-bit extent-start container, ee_block. We lower the maxbytes
3191          * by one fs block, so ee_len can cover the extent of maximum file
3192          * size
3193          */
3194         res = (1LL << 32) - 1;
3195         res <<= blkbits;
3196
3197         /* Sanity check against vm- & vfs- imposed limits */
3198         if (res > upper_limit)
3199                 res = upper_limit;
3200
3201         return res;
3202 }
3203
3204 /*
3205  * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
3206  * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3207  * We need to be 1 filesystem block less than the 2^48 sector limit.
3208  */
3209 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3210 {
3211         unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
3212         int meta_blocks;
3213
3214         /*
3215          * This is calculated to be the largest file size for a dense, block
3216          * mapped file such that the file's total number of 512-byte sectors,
3217          * including data and all indirect blocks, does not exceed (2^48 - 1).
3218          *
3219          * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3220          * number of 512-byte sectors of the file.
3221          */
3222         if (!has_huge_files) {
3223                 /*
3224                  * !has_huge_files or implies that the inode i_block field
3225                  * represents total file blocks in 2^32 512-byte sectors ==
3226                  * size of vfs inode i_blocks * 8
3227                  */
3228                 upper_limit = (1LL << 32) - 1;
3229
3230                 /* total blocks in file system block size */
3231                 upper_limit >>= (bits - 9);
3232
3233         } else {
3234                 /*
3235                  * We use 48 bit ext4_inode i_blocks
3236                  * With EXT4_HUGE_FILE_FL set the i_blocks
3237                  * represent total number of blocks in
3238                  * file system block size
3239                  */
3240                 upper_limit = (1LL << 48) - 1;
3241
3242         }
3243
3244         /* indirect blocks */
3245         meta_blocks = 1;
3246         /* double indirect blocks */
3247         meta_blocks += 1 + (1LL << (bits-2));
3248         /* tripple indirect blocks */
3249         meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
3250
3251         upper_limit -= meta_blocks;
3252         upper_limit <<= bits;
3253
3254         res += 1LL << (bits-2);
3255         res += 1LL << (2*(bits-2));
3256         res += 1LL << (3*(bits-2));
3257         res <<= bits;
3258         if (res > upper_limit)
3259                 res = upper_limit;
3260
3261         if (res > MAX_LFS_FILESIZE)
3262                 res = MAX_LFS_FILESIZE;
3263
3264         return (loff_t)res;
3265 }
3266
3267 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3268                                    ext4_fsblk_t logical_sb_block, int nr)
3269 {
3270         struct ext4_sb_info *sbi = EXT4_SB(sb);
3271         ext4_group_t bg, first_meta_bg;
3272         int has_super = 0;
3273
3274         first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
3275
3276         if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3277                 return logical_sb_block + nr + 1;
3278         bg = sbi->s_desc_per_block * nr;
3279         if (ext4_bg_has_super(sb, bg))
3280                 has_super = 1;
3281
3282         /*
3283          * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3284          * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
3285          * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3286          * compensate.
3287          */
3288         if (sb->s_blocksize == 1024 && nr == 0 &&
3289             le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3290                 has_super++;
3291
3292         return (has_super + ext4_group_first_block_no(sb, bg));
3293 }
3294
3295 /**
3296  * ext4_get_stripe_size: Get the stripe size.
3297  * @sbi: In memory super block info
3298  *
3299  * If we have specified it via mount option, then
3300  * use the mount option value. If the value specified at mount time is
3301  * greater than the blocks per group use the super block value.
3302  * If the super block value is greater than blocks per group return 0.
3303  * Allocator needs it be less than blocks per group.
3304  *
3305  */
3306 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
3307 {
3308         unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
3309         unsigned long stripe_width =
3310                         le32_to_cpu(sbi->s_es->s_raid_stripe_width);
3311         int ret;
3312
3313         if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
3314                 ret = sbi->s_stripe;
3315         else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
3316                 ret = stripe_width;
3317         else if (stride && stride <= sbi->s_blocks_per_group)
3318                 ret = stride;
3319         else
3320                 ret = 0;
3321
3322         /*
3323          * If the stripe width is 1, this makes no sense and
3324          * we set it to 0 to turn off stripe handling code.
3325          */
3326         if (ret <= 1)
3327                 ret = 0;
3328
3329         return ret;
3330 }
3331
3332 /*
3333  * Check whether this filesystem can be mounted based on
3334  * the features present and the RDONLY/RDWR mount requested.
3335  * Returns 1 if this filesystem can be mounted as requested,
3336  * 0 if it cannot be.
3337  */
3338 static int ext4_feature_set_ok(struct super_block *sb, int readonly)
3339 {
3340         if (ext4_has_unknown_ext4_incompat_features(sb)) {
3341                 ext4_msg(sb, KERN_ERR,
3342                         "Couldn't mount because of "
3343                         "unsupported optional features (%x)",
3344                         (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
3345                         ~EXT4_FEATURE_INCOMPAT_SUPP));
3346                 return 0;
3347         }
3348
3349 #ifndef CONFIG_UNICODE
3350         if (ext4_has_feature_casefold(sb)) {
3351                 ext4_msg(sb, KERN_ERR,
3352                          "Filesystem with casefold feature cannot be "
3353                          "mounted without CONFIG_UNICODE");
3354                 return 0;
3355         }
3356 #endif
3357
3358         if (readonly)
3359                 return 1;
3360
3361         if (ext4_has_feature_readonly(sb)) {
3362                 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3363                 sb->s_flags |= SB_RDONLY;
3364                 return 1;
3365         }
3366
3367         /* Check that feature set is OK for a read-write mount */
3368         if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3369                 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
3370                          "unsupported optional features (%x)",
3371                          (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
3372                                 ~EXT4_FEATURE_RO_COMPAT_SUPP));
3373                 return 0;
3374         }
3375         if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3376                 ext4_msg(sb, KERN_ERR,
3377                          "Can't support bigalloc feature without "
3378                          "extents feature\n");
3379                 return 0;
3380         }
3381
3382 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3383         if (!readonly && (ext4_has_feature_quota(sb) ||
3384                           ext4_has_feature_project(sb))) {
3385                 ext4_msg(sb, KERN_ERR,
3386                          "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3387                 return 0;
3388         }
3389 #endif  /* CONFIG_QUOTA */
3390         return 1;
3391 }
3392
3393 /*
3394  * This function is called once a day if we have errors logged
3395  * on the file system
3396  */
3397 static void print_daily_error_info(struct timer_list *t)
3398 {
3399         struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
3400         struct super_block *sb = sbi->s_sb;
3401         struct ext4_super_block *es = sbi->s_es;
3402
3403         if (es->s_error_count)
3404                 /* fsck newer than v1.41.13 is needed to clean this condition. */
3405                 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3406                          le32_to_cpu(es->s_error_count));
3407         if (es->s_first_error_time) {
3408                 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3409                        sb->s_id,
3410                        ext4_get_tstamp(es, s_first_error_time),
3411                        (int) sizeof(es->s_first_error_func),
3412                        es->s_first_error_func,
3413                        le32_to_cpu(es->s_first_error_line));
3414                 if (es->s_first_error_ino)
3415                         printk(KERN_CONT ": inode %u",
3416                                le32_to_cpu(es->s_first_error_ino));
3417                 if (es->s_first_error_block)
3418                         printk(KERN_CONT ": block %llu", (unsigned long long)
3419                                le64_to_cpu(es->s_first_error_block));
3420                 printk(KERN_CONT "\n");
3421         }
3422         if (es->s_last_error_time) {
3423                 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3424                        sb->s_id,
3425                        ext4_get_tstamp(es, s_last_error_time),
3426                        (int) sizeof(es->s_last_error_func),
3427                        es->s_last_error_func,
3428                        le32_to_cpu(es->s_last_error_line));
3429                 if (es->s_last_error_ino)
3430                         printk(KERN_CONT ": inode %u",
3431                                le32_to_cpu(es->s_last_error_ino));
3432                 if (es->s_last_error_block)
3433                         printk(KERN_CONT ": block %llu", (unsigned long long)
3434                                le64_to_cpu(es->s_last_error_block));
3435                 printk(KERN_CONT "\n");
3436         }
3437         mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
3438 }
3439
3440 /* Find next suitable group and run ext4_init_inode_table */
3441 static int ext4_run_li_request(struct ext4_li_request *elr)
3442 {
3443         struct ext4_group_desc *gdp = NULL;
3444         struct super_block *sb = elr->lr_super;
3445         ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3446         ext4_group_t group = elr->lr_next_group;
3447         unsigned int prefetch_ios = 0;
3448         int ret = 0;
3449         u64 start_time;
3450
3451         if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
3452                 elr->lr_next_group = ext4_mb_prefetch(sb, group,
3453                                 EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
3454                 if (prefetch_ios)
3455                         ext4_mb_prefetch_fini(sb, elr->lr_next_group,
3456                                               prefetch_ios);
3457                 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
3458                                             prefetch_ios);
3459                 if (group >= elr->lr_next_group) {
3460                         ret = 1;
3461                         if (elr->lr_first_not_zeroed != ngroups &&
3462                             !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
3463                                 elr->lr_next_group = elr->lr_first_not_zeroed;
3464                                 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3465                                 ret = 0;
3466                         }
3467                 }
3468                 return ret;
3469         }
3470
3471         for (; group < ngroups; group++) {
3472                 gdp = ext4_get_group_desc(sb, group, NULL);
3473                 if (!gdp) {
3474                         ret = 1;
3475                         break;
3476                 }
3477
3478                 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3479                         break;
3480         }
3481
3482         if (group >= ngroups)
3483                 ret = 1;
3484
3485         if (!ret) {
3486                 start_time = ktime_get_real_ns();
3487                 ret = ext4_init_inode_table(sb, group,
3488                                             elr->lr_timeout ? 0 : 1);
3489                 trace_ext4_lazy_itable_init(sb, group);
3490                 if (elr->lr_timeout == 0) {
3491                         elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) *
3492                                 EXT4_SB(elr->lr_super)->s_li_wait_mult);
3493                 }
3494                 elr->lr_next_sched = jiffies + elr->lr_timeout;
3495                 elr->lr_next_group = group + 1;
3496         }
3497         return ret;
3498 }
3499
3500 /*
3501  * Remove lr_request from the list_request and free the
3502  * request structure. Should be called with li_list_mtx held
3503  */
3504 static void ext4_remove_li_request(struct ext4_li_request *elr)
3505 {
3506         if (!elr)
3507                 return;
3508
3509         list_del(&elr->lr_request);
3510         EXT4_SB(elr->lr_super)->s_li_request = NULL;
3511         kfree(elr);
3512 }
3513
3514 static void ext4_unregister_li_request(struct super_block *sb)
3515 {
3516         mutex_lock(&ext4_li_mtx);
3517         if (!ext4_li_info) {
3518                 mutex_unlock(&ext4_li_mtx);
3519                 return;
3520         }
3521
3522         mutex_lock(&ext4_li_info->li_list_mtx);
3523         ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3524         mutex_unlock(&ext4_li_info->li_list_mtx);
3525         mutex_unlock(&ext4_li_mtx);
3526 }
3527
3528 static struct task_struct *ext4_lazyinit_task;
3529
3530 /*
3531  * This is the function where ext4lazyinit thread lives. It walks
3532  * through the request list searching for next scheduled filesystem.
3533  * When such a fs is found, run the lazy initialization request
3534  * (ext4_rn_li_request) and keep track of the time spend in this
3535  * function. Based on that time we compute next schedule time of
3536  * the request. When walking through the list is complete, compute
3537  * next waking time and put itself into sleep.
3538  */
3539 static int ext4_lazyinit_thread(void *arg)
3540 {
3541         struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
3542         struct list_head *pos, *n;
3543         struct ext4_li_request *elr;
3544         unsigned long next_wakeup, cur;
3545
3546         BUG_ON(NULL == eli);
3547         set_freezable();
3548
3549 cont_thread:
3550         while (true) {
3551                 next_wakeup = MAX_JIFFY_OFFSET;
3552
3553                 mutex_lock(&eli->li_list_mtx);
3554                 if (list_empty(&eli->li_request_list)) {
3555                         mutex_unlock(&eli->li_list_mtx);
3556                         goto exit_thread;
3557                 }
3558                 list_for_each_safe(pos, n, &eli->li_request_list) {
3559                         int err = 0;
3560                         int progress = 0;
3561                         elr = list_entry(pos, struct ext4_li_request,
3562                                          lr_request);
3563
3564                         if (time_before(jiffies, elr->lr_next_sched)) {
3565                                 if (time_before(elr->lr_next_sched, next_wakeup))
3566                                         next_wakeup = elr->lr_next_sched;
3567                                 continue;
3568                         }
3569                         if (down_read_trylock(&elr->lr_super->s_umount)) {
3570                                 if (sb_start_write_trylock(elr->lr_super)) {
3571                                         progress = 1;
3572                                         /*
3573                                          * We hold sb->s_umount, sb can not
3574                                          * be removed from the list, it is
3575                                          * now safe to drop li_list_mtx
3576                                          */
3577                                         mutex_unlock(&eli->li_list_mtx);
3578                                         err = ext4_run_li_request(elr);
3579                                         sb_end_write(elr->lr_super);
3580                                         mutex_lock(&eli->li_list_mtx);
3581                                         n = pos->next;
3582                                 }
3583                                 up_read((&elr->lr_super->s_umount));
3584                         }
3585                         /* error, remove the lazy_init job */
3586                         if (err) {
3587                                 ext4_remove_li_request(elr);
3588                                 continue;
3589                         }
3590                         if (!progress) {
3591                                 elr->lr_next_sched = jiffies +
3592                                         (prandom_u32()
3593                                          % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3594                         }
3595                         if (time_before(elr->lr_next_sched, next_wakeup))
3596                                 next_wakeup = elr->lr_next_sched;
3597                 }
3598                 mutex_unlock(&eli->li_list_mtx);
3599
3600                 try_to_freeze();
3601
3602                 cur = jiffies;
3603                 if ((time_after_eq(cur, next_wakeup)) ||
3604                     (MAX_JIFFY_OFFSET == next_wakeup)) {
3605                         cond_resched();
3606                         continue;
3607                 }
3608
3609                 schedule_timeout_interruptible(next_wakeup - cur);
3610
3611                 if (kthread_should_stop()) {
3612                         ext4_clear_request_list();
3613                         goto exit_thread;
3614                 }
3615         }
3616
3617 exit_thread:
3618         /*
3619          * It looks like the request list is empty, but we need
3620          * to check it under the li_list_mtx lock, to prevent any
3621          * additions into it, and of course we should lock ext4_li_mtx
3622          * to atomically free the list and ext4_li_info, because at
3623          * this point another ext4 filesystem could be registering
3624          * new one.
3625          */
3626         mutex_lock(&ext4_li_mtx);
3627         mutex_lock(&eli->li_list_mtx);
3628         if (!list_empty(&eli->li_request_list)) {
3629                 mutex_unlock(&eli->li_list_mtx);
3630                 mutex_unlock(&ext4_li_mtx);
3631                 goto cont_thread;
3632         }
3633         mutex_unlock(&eli->li_list_mtx);
3634         kfree(ext4_li_info);
3635         ext4_li_info = NULL;
3636         mutex_unlock(&ext4_li_mtx);
3637
3638         return 0;
3639 }
3640
3641 static void ext4_clear_request_list(void)
3642 {
3643         struct list_head *pos, *n;
3644         struct ext4_li_request *elr;
3645
3646         mutex_lock(&ext4_li_info->li_list_mtx);
3647         list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3648                 elr = list_entry(pos, struct ext4_li_request,
3649                                  lr_request);
3650                 ext4_remove_li_request(elr);
3651         }
3652         mutex_unlock(&ext4_li_info->li_list_mtx);
3653 }
3654
3655 static int ext4_run_lazyinit_thread(void)
3656 {
3657         ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3658                                          ext4_li_info, "ext4lazyinit");
3659         if (IS_ERR(ext4_lazyinit_task)) {
3660                 int err = PTR_ERR(ext4_lazyinit_task);
3661                 ext4_clear_request_list();
3662                 kfree(ext4_li_info);
3663                 ext4_li_info = NULL;
3664                 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3665                                  "initialization thread\n",
3666                                  err);
3667                 return err;
3668         }
3669         ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3670         return 0;
3671 }
3672
3673 /*
3674  * Check whether it make sense to run itable init. thread or not.
3675  * If there is at least one uninitialized inode table, return
3676  * corresponding group number, else the loop goes through all
3677  * groups and return total number of groups.
3678  */
3679 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3680 {
3681         ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3682         struct ext4_group_desc *gdp = NULL;
3683
3684         if (!ext4_has_group_desc_csum(sb))
3685                 return ngroups;
3686
3687         for (group = 0; group < ngroups; group++) {
3688                 gdp = ext4_get_group_desc(sb, group, NULL);
3689                 if (!gdp)
3690                         continue;
3691
3692                 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3693                         break;
3694         }
3695
3696         return group;
3697 }
3698
3699 static int ext4_li_info_new(void)
3700 {
3701         struct ext4_lazy_init *eli = NULL;
3702
3703         eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3704         if (!eli)
3705                 return -ENOMEM;
3706
3707         INIT_LIST_HEAD(&eli->li_request_list);
3708         mutex_init(&eli->li_list_mtx);
3709
3710         eli->li_state |= EXT4_LAZYINIT_QUIT;
3711
3712         ext4_li_info = eli;
3713
3714         return 0;
3715 }
3716
3717 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3718                                             ext4_group_t start)
3719 {
3720         struct ext4_li_request *elr;
3721
3722         elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3723         if (!elr)
3724                 return NULL;
3725
3726         elr->lr_super = sb;
3727         elr->lr_first_not_zeroed = start;
3728         if (test_opt(sb, PREFETCH_BLOCK_BITMAPS))
3729                 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
3730         else {
3731                 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3732                 elr->lr_next_group = start;
3733         }
3734
3735         /*
3736          * Randomize first schedule time of the request to
3737          * spread the inode table initialization requests
3738          * better.
3739          */
3740         elr->lr_next_sched = jiffies + (prandom_u32() %
3741                                 (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3742         return elr;
3743 }
3744
3745 int ext4_register_li_request(struct super_block *sb,
3746                              ext4_group_t first_not_zeroed)
3747 {
3748         struct ext4_sb_info *sbi = EXT4_SB(sb);
3749         struct ext4_li_request *elr = NULL;
3750         ext4_group_t ngroups = sbi->s_groups_count;
3751         int ret = 0;
3752
3753         mutex_lock(&ext4_li_mtx);
3754         if (sbi->s_li_request != NULL) {
3755                 /*
3756                  * Reset timeout so it can be computed again, because
3757                  * s_li_wait_mult might have changed.
3758                  */
3759                 sbi->s_li_request->lr_timeout = 0;
3760                 goto out;
3761         }
3762
3763         if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) &&
3764             (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3765              !test_opt(sb, INIT_INODE_TABLE)))
3766                 goto out;
3767
3768         elr = ext4_li_request_new(sb, first_not_zeroed);
3769         if (!elr) {
3770                 ret = -ENOMEM;
3771                 goto out;
3772         }
3773
3774         if (NULL == ext4_li_info) {
3775                 ret = ext4_li_info_new();
3776                 if (ret)
3777                         goto out;
3778         }
3779
3780         mutex_lock(&ext4_li_info->li_list_mtx);
3781         list_add(&elr->lr_request, &ext4_li_info->li_request_list);
3782         mutex_unlock(&ext4_li_info->li_list_mtx);
3783
3784         sbi->s_li_request = elr;
3785         /*
3786          * set elr to NULL here since it has been inserted to
3787          * the request_list and the removal and free of it is
3788          * handled by ext4_clear_request_list from now on.
3789          */
3790         elr = NULL;
3791
3792         if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
3793                 ret = ext4_run_lazyinit_thread();
3794                 if (ret)
3795                         goto out;
3796         }
3797 out:
3798         mutex_unlock(&ext4_li_mtx);
3799         if (ret)
3800                 kfree(elr);
3801         return ret;
3802 }
3803
3804 /*
3805  * We do not need to lock anything since this is called on
3806  * module unload.
3807  */
3808 static void ext4_destroy_lazyinit_thread(void)
3809 {
3810         /*
3811          * If thread exited earlier
3812          * there's nothing to be done.
3813          */
3814         if (!ext4_li_info || !ext4_lazyinit_task)
3815                 return;
3816
3817         kthread_stop(ext4_lazyinit_task);
3818 }
3819
3820 static int set_journal_csum_feature_set(struct super_block *sb)
3821 {
3822         int ret = 1;
3823         int compat, incompat;
3824         struct ext4_sb_info *sbi = EXT4_SB(sb);
3825
3826         if (ext4_has_metadata_csum(sb)) {
3827                 /* journal checksum v3 */
3828                 compat = 0;
3829                 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3830         } else {
3831                 /* journal checksum v1 */
3832                 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3833                 incompat = 0;
3834         }
3835
3836         jbd2_journal_clear_features(sbi->s_journal,
3837                         JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3838                         JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3839                         JBD2_FEATURE_INCOMPAT_CSUM_V2);
3840         if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3841                 ret = jbd2_journal_set_features(sbi->s_journal,
3842                                 compat, 0,
3843                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3844                                 incompat);
3845         } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3846                 ret = jbd2_journal_set_features(sbi->s_journal,
3847                                 compat, 0,
3848                                 incompat);
3849                 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3850                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3851         } else {
3852                 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3853                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3854         }
3855
3856         return ret;
3857 }
3858
3859 /*
3860  * Note: calculating the overhead so we can be compatible with
3861  * historical BSD practice is quite difficult in the face of
3862  * clusters/bigalloc.  This is because multiple metadata blocks from
3863  * different block group can end up in the same allocation cluster.
3864  * Calculating the exact overhead in the face of clustered allocation
3865  * requires either O(all block bitmaps) in memory or O(number of block
3866  * groups**2) in time.  We will still calculate the superblock for
3867  * older file systems --- and if we come across with a bigalloc file
3868  * system with zero in s_overhead_clusters the estimate will be close to
3869  * correct especially for very large cluster sizes --- but for newer
3870  * file systems, it's better to calculate this figure once at mkfs
3871  * time, and store it in the superblock.  If the superblock value is
3872  * present (even for non-bigalloc file systems), we will use it.
3873  */
3874 static int count_overhead(struct super_block *sb, ext4_group_t grp,
3875                           char *buf)
3876 {
3877         struct ext4_sb_info     *sbi = EXT4_SB(sb);
3878         struct ext4_group_desc  *gdp;
3879         ext4_fsblk_t            first_block, last_block, b;
3880         ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
3881         int                     s, j, count = 0;
3882         int                     has_super = ext4_bg_has_super(sb, grp);
3883
3884         if (!ext4_has_feature_bigalloc(sb))
3885                 return (has_super + ext4_bg_num_gdb(sb, grp) +
3886                         (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
3887                         sbi->s_itb_per_group + 2);
3888
3889         first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3890                 (grp * EXT4_BLOCKS_PER_GROUP(sb));
3891         last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3892         for (i = 0; i < ngroups; i++) {
3893                 gdp = ext4_get_group_desc(sb, i, NULL);
3894                 b = ext4_block_bitmap(sb, gdp);
3895                 if (b >= first_block && b <= last_block) {
3896                         ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3897                         count++;
3898                 }
3899                 b = ext4_inode_bitmap(sb, gdp);
3900                 if (b >= first_block && b <= last_block) {
3901                         ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3902                         count++;
3903                 }
3904                 b = ext4_inode_table(sb, gdp);
3905                 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3906                         for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3907                                 int c = EXT4_B2C(sbi, b - first_block);
3908                                 ext4_set_bit(c, buf);
3909                                 count++;
3910                         }
3911                 if (i != grp)
3912                         continue;
3913                 s = 0;
3914                 if (ext4_bg_has_super(sb, grp)) {
3915                         ext4_set_bit(s++, buf);
3916                         count++;
3917                 }
3918                 j = ext4_bg_num_gdb(sb, grp);
3919                 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
3920                         ext4_error(sb, "Invalid number of block group "
3921                                    "descriptor blocks: %d", j);
3922                         j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3923                 }
3924                 count += j;
3925                 for (; j > 0; j--)
3926                         ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3927         }
3928         if (!count)
3929                 return 0;
3930         return EXT4_CLUSTERS_PER_GROUP(sb) -
3931                 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3932 }
3933
3934 /*
3935  * Compute the overhead and stash it in sbi->s_overhead
3936  */
3937 int ext4_calculate_overhead(struct super_block *sb)
3938 {
3939         struct ext4_sb_info *sbi = EXT4_SB(sb);
3940         struct ext4_super_block *es = sbi->s_es;
3941         struct inode *j_inode;
3942         unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3943         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3944         ext4_fsblk_t overhead = 0;
3945         char *buf = (char *) get_zeroed_page(GFP_NOFS);
3946
3947         if (!buf)
3948                 return -ENOMEM;
3949
3950         /*
3951          * Compute the overhead (FS structures).  This is constant
3952          * for a given filesystem unless the number of block groups
3953          * changes so we cache the previous value until it does.
3954          */
3955
3956         /*
3957          * All of the blocks before first_data_block are overhead
3958          */
3959         overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3960
3961         /*
3962          * Add the overhead found in each block group
3963          */
3964         for (i = 0; i < ngroups; i++) {
3965                 int blks;
3966
3967                 blks = count_overhead(sb, i, buf);
3968                 overhead += blks;
3969                 if (blks)
3970                         memset(buf, 0, PAGE_SIZE);
3971                 cond_resched();
3972         }
3973
3974         /*
3975          * Add the internal journal blocks whether the journal has been
3976          * loaded or not
3977          */
3978         if (sbi->s_journal && !sbi->s_journal_bdev)
3979                 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
3980         else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
3981                 /* j_inum for internal journal is non-zero */
3982                 j_inode = ext4_get_journal_inode(sb, j_inum);
3983                 if (j_inode) {
3984                         j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
3985                         overhead += EXT4_NUM_B2C(sbi, j_blocks);
3986                         iput(j_inode);
3987                 } else {
3988                         ext4_msg(sb, KERN_ERR, "can't get journal size");
3989                 }
3990         }
3991         sbi->s_overhead = overhead;
3992         smp_wmb();
3993         free_page((unsigned long) buf);
3994         return 0;
3995 }
3996
3997 static void ext4_set_resv_clusters(struct super_block *sb)
3998 {
3999         ext4_fsblk_t resv_clusters;
4000         struct ext4_sb_info *sbi = EXT4_SB(sb);
4001
4002         /*
4003          * There's no need to reserve anything when we aren't using extents.
4004          * The space estimates are exact, there are no unwritten extents,
4005          * hole punching doesn't need new metadata... This is needed especially
4006          * to keep ext2/3 backward compatibility.
4007          */
4008         if (!ext4_has_feature_extents(sb))
4009                 return;
4010         /*
4011          * By default we reserve 2% or 4096 clusters, whichever is smaller.
4012          * This should cover the situations where we can not afford to run
4013          * out of space like for example punch hole, or converting
4014          * unwritten extents in delalloc path. In most cases such
4015          * allocation would require 1, or 2 blocks, higher numbers are
4016          * very rare.
4017          */
4018         resv_clusters = (ext4_blocks_count(sbi->s_es) >>
4019                          sbi->s_cluster_bits);
4020
4021         do_div(resv_clusters, 50);
4022         resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
4023
4024         atomic64_set(&sbi->s_resv_clusters, resv_clusters);
4025 }
4026
4027 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4028 {
4029         struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
4030         char *orig_data = kstrdup(data, GFP_KERNEL);
4031         struct buffer_head *bh, **group_desc;
4032         struct ext4_super_block *es = NULL;
4033         struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4034         struct flex_groups **flex_groups;
4035         ext4_fsblk_t block;
4036         ext4_fsblk_t sb_block = get_sb_block(&data);
4037         ext4_fsblk_t logical_sb_block;
4038         unsigned long offset = 0;
4039         unsigned long journal_devnum = 0;
4040         unsigned long def_mount_opts;
4041         struct inode *root;
4042         const char *descr;
4043         int ret = -ENOMEM;
4044         int blocksize, clustersize;
4045         unsigned int db_count;
4046         unsigned int i;
4047         int needs_recovery, has_huge_files;
4048         __u64 blocks_count;
4049         int err = 0;
4050         unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4051         ext4_group_t first_not_zeroed;
4052
4053         if ((data && !orig_data) || !sbi)
4054                 goto out_free_base;
4055
4056         sbi->s_daxdev = dax_dev;
4057         sbi->s_blockgroup_lock =
4058                 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
4059         if (!sbi->s_blockgroup_lock)
4060                 goto out_free_base;
4061
4062         sb->s_fs_info = sbi;
4063         sbi->s_sb = sb;
4064         sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
4065         sbi->s_sb_block = sb_block;
4066         if (sb->s_bdev->bd_part)
4067                 sbi->s_sectors_written_start =
4068                         part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
4069
4070         /* Cleanup superblock name */
4071         strreplace(sb->s_id, '/', '!');
4072
4073         /* -EINVAL is default */
4074         ret = -EINVAL;
4075         blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
4076         if (!blocksize) {
4077                 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
4078                 goto out_fail;
4079         }
4080
4081         /*
4082          * The ext4 superblock will not be buffer aligned for other than 1kB
4083          * block sizes.  We need to calculate the offset from buffer start.
4084          */
4085         if (blocksize != EXT4_MIN_BLOCK_SIZE) {
4086                 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
4087                 offset = do_div(logical_sb_block, blocksize);
4088         } else {
4089                 logical_sb_block = sb_block;
4090         }
4091
4092         bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
4093         if (IS_ERR(bh)) {
4094                 ext4_msg(sb, KERN_ERR, "unable to read superblock");
4095                 ret = PTR_ERR(bh);
4096                 bh = NULL;
4097                 goto out_fail;
4098         }
4099         /*
4100          * Note: s_es must be initialized as soon as possible because
4101          *       some ext4 macro-instructions depend on its value
4102          */
4103         es = (struct ext4_super_block *) (bh->b_data + offset);
4104         sbi->s_es = es;
4105         sb->s_magic = le16_to_cpu(es->s_magic);
4106         if (sb->s_magic != EXT4_SUPER_MAGIC)
4107                 goto cantfind_ext4;
4108         sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
4109
4110         /* Warn if metadata_csum and gdt_csum are both set. */
4111         if (ext4_has_feature_metadata_csum(sb) &&
4112             ext4_has_feature_gdt_csum(sb))
4113                 ext4_warning(sb, "metadata_csum and uninit_bg are "
4114                              "redundant flags; please run fsck.");
4115
4116         /* Check for a known checksum algorithm */
4117         if (!ext4_verify_csum_type(sb, es)) {
4118                 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4119                          "unknown checksum algorithm.");
4120                 silent = 1;
4121                 goto cantfind_ext4;
4122         }
4123
4124         /* Load the checksum driver */
4125         sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
4126         if (IS_ERR(sbi->s_chksum_driver)) {
4127                 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
4128                 ret = PTR_ERR(sbi->s_chksum_driver);
4129                 sbi->s_chksum_driver = NULL;
4130                 goto failed_mount;
4131         }
4132
4133         /* Check superblock checksum */
4134         if (!ext4_superblock_csum_verify(sb, es)) {
4135                 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4136                          "invalid superblock checksum.  Run e2fsck?");
4137                 silent = 1;
4138                 ret = -EFSBADCRC;
4139                 goto cantfind_ext4;
4140         }
4141
4142         /* Precompute checksum seed for all metadata */
4143         if (ext4_has_feature_csum_seed(sb))
4144                 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
4145         else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
4146                 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
4147                                                sizeof(es->s_uuid));
4148
4149         /* Set defaults before we parse the mount options */
4150         def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4151         set_opt(sb, INIT_INODE_TABLE);
4152         if (def_mount_opts & EXT4_DEFM_DEBUG)
4153                 set_opt(sb, DEBUG);
4154         if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4155                 set_opt(sb, GRPID);
4156         if (def_mount_opts & EXT4_DEFM_UID16)
4157                 set_opt(sb, NO_UID32);
4158         /* xattr user namespace & acls are now defaulted on */
4159         set_opt(sb, XATTR_USER);
4160 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4161         set_opt(sb, POSIX_ACL);
4162 #endif
4163         if (ext4_has_feature_fast_commit(sb))
4164                 set_opt2(sb, JOURNAL_FAST_COMMIT);
4165         /* don't forget to enable journal_csum when metadata_csum is enabled. */
4166         if (ext4_has_metadata_csum(sb))
4167                 set_opt(sb, JOURNAL_CHECKSUM);
4168
4169         if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4170                 set_opt(sb, JOURNAL_DATA);
4171         else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4172                 set_opt(sb, ORDERED_DATA);
4173         else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4174                 set_opt(sb, WRITEBACK_DATA);
4175
4176         if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
4177                 set_opt(sb, ERRORS_PANIC);
4178         else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
4179                 set_opt(sb, ERRORS_CONT);
4180         else
4181                 set_opt(sb, ERRORS_RO);
4182         /* block_validity enabled by default; disable with noblock_validity */
4183         set_opt(sb, BLOCK_VALIDITY);
4184         if (def_mount_opts & EXT4_DEFM_DISCARD)
4185                 set_opt(sb, DISCARD);
4186
4187         sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
4188         sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
4189         sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
4190         sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
4191         sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
4192
4193         if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4194                 set_opt(sb, BARRIER);
4195
4196         /*
4197          * enable delayed allocation by default
4198          * Use -o nodelalloc to turn it off
4199          */
4200         if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4201             ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4202                 set_opt(sb, DELALLOC);
4203
4204         /*
4205          * set default s_li_wait_mult for lazyinit, for the case there is
4206          * no mount option specified.
4207          */
4208         sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
4209
4210         if (le32_to_cpu(es->s_log_block_size) >
4211             (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4212                 ext4_msg(sb, KERN_ERR,
4213                          "Invalid log block size: %u",
4214                          le32_to_cpu(es->s_log_block_size));
4215                 goto failed_mount;
4216         }
4217         if (le32_to_cpu(es->s_log_cluster_size) >
4218             (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4219                 ext4_msg(sb, KERN_ERR,
4220                          "Invalid log cluster size: %u",
4221                          le32_to_cpu(es->s_log_cluster_size));
4222                 goto failed_mount;
4223         }
4224
4225         blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
4226
4227         if (blocksize == PAGE_SIZE)
4228                 set_opt(sb, DIOREAD_NOLOCK);
4229
4230         if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
4231                 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
4232                 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
4233         } else {
4234                 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
4235                 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
4236                 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
4237                         ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
4238                                  sbi->s_first_ino);
4239                         goto failed_mount;
4240                 }
4241                 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
4242                     (!is_power_of_2(sbi->s_inode_size)) ||
4243                     (sbi->s_inode_size > blocksize)) {
4244                         ext4_msg(sb, KERN_ERR,
4245                                "unsupported inode size: %d",
4246                                sbi->s_inode_size);
4247                         ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
4248                         goto failed_mount;
4249                 }
4250                 /*
4251                  * i_atime_extra is the last extra field available for
4252                  * [acm]times in struct ext4_inode. Checking for that
4253                  * field should suffice to ensure we have extra space
4254                  * for all three.
4255                  */
4256                 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
4257                         sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
4258                         sb->s_time_gran = 1;
4259                         sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
4260                 } else {
4261                         sb->s_time_gran = NSEC_PER_SEC;
4262                         sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
4263                 }
4264                 sb->s_time_min = EXT4_TIMESTAMP_MIN;
4265         }
4266         if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
4267                 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4268                         EXT4_GOOD_OLD_INODE_SIZE;
4269                 if (ext4_has_feature_extra_isize(sb)) {
4270                         unsigned v, max = (sbi->s_inode_size -
4271                                            EXT4_GOOD_OLD_INODE_SIZE);
4272
4273                         v = le16_to_cpu(es->s_want_extra_isize);
4274                         if (v > max) {
4275                                 ext4_msg(sb, KERN_ERR,
4276                                          "bad s_want_extra_isize: %d", v);
4277                                 goto failed_mount;
4278                         }
4279                         if (sbi->s_want_extra_isize < v)
4280                                 sbi->s_want_extra_isize = v;
4281
4282                         v = le16_to_cpu(es->s_min_extra_isize);
4283                         if (v > max) {
4284                                 ext4_msg(sb, KERN_ERR,
4285                                          "bad s_min_extra_isize: %d", v);
4286                                 goto failed_mount;
4287                         }
4288                         if (sbi->s_want_extra_isize < v)
4289                                 sbi->s_want_extra_isize = v;
4290                 }
4291         }
4292
4293         if (sbi->s_es->s_mount_opts[0]) {
4294                 char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
4295                                               sizeof(sbi->s_es->s_mount_opts),
4296                                               GFP_KERNEL);
4297                 if (!s_mount_opts)
4298                         goto failed_mount;
4299                 if (!parse_options(s_mount_opts, sb, &journal_devnum,
4300                                    &journal_ioprio, 0)) {
4301                         ext4_msg(sb, KERN_WARNING,
4302                                  "failed to parse options in superblock: %s",
4303                                  s_mount_opts);
4304                 }
4305                 kfree(s_mount_opts);
4306         }
4307         sbi->s_def_mount_opt = sbi->s_mount_opt;
4308         if (!parse_options((char *) data, sb, &journal_devnum,
4309                            &journal_ioprio, 0))
4310                 goto failed_mount;
4311
4312 #ifdef CONFIG_UNICODE
4313         if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
4314                 const struct ext4_sb_encodings *encoding_info;
4315                 struct unicode_map *encoding;
4316                 __u16 encoding_flags;
4317
4318                 if (ext4_has_feature_encrypt(sb)) {
4319                         ext4_msg(sb, KERN_ERR,
4320                                  "Can't mount with encoding and encryption");
4321                         goto failed_mount;
4322                 }
4323
4324                 if (ext4_sb_read_encoding(es, &encoding_info,
4325                                           &encoding_flags)) {
4326                         ext4_msg(sb, KERN_ERR,
4327                                  "Encoding requested by superblock is unknown");
4328                         goto failed_mount;
4329                 }
4330
4331                 encoding = utf8_load(encoding_info->version);
4332                 if (IS_ERR(encoding)) {
4333                         ext4_msg(sb, KERN_ERR,
4334                                  "can't mount with superblock charset: %s-%s "
4335                                  "not supported by the kernel. flags: 0x%x.",
4336                                  encoding_info->name, encoding_info->version,
4337                                  encoding_flags);
4338                         goto failed_mount;
4339                 }
4340                 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
4341                          "%s-%s with flags 0x%hx", encoding_info->name,
4342                          encoding_info->version?:"\b", encoding_flags);
4343
4344                 sb->s_encoding = encoding;
4345                 sb->s_encoding_flags = encoding_flags;
4346         }
4347 #endif
4348
4349         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4350                 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
4351                 /* can't mount with both data=journal and dioread_nolock. */
4352                 clear_opt(sb, DIOREAD_NOLOCK);
4353                 clear_opt2(sb, JOURNAL_FAST_COMMIT);
4354                 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4355                         ext4_msg(sb, KERN_ERR, "can't mount with "
4356                                  "both data=journal and delalloc");
4357                         goto failed_mount;
4358                 }
4359                 if (test_opt(sb, DAX_ALWAYS)) {
4360                         ext4_msg(sb, KERN_ERR, "can't mount with "
4361                                  "both data=journal and dax");
4362                         goto failed_mount;
4363                 }
4364                 if (ext4_has_feature_encrypt(sb)) {
4365                         ext4_msg(sb, KERN_WARNING,
4366                                  "encrypted files will use data=ordered "
4367                                  "instead of data journaling mode");
4368                 }
4369                 if (test_opt(sb, DELALLOC))
4370                         clear_opt(sb, DELALLOC);
4371         } else {
4372                 sb->s_iflags |= SB_I_CGROUPWB;
4373         }
4374
4375         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4376                 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
4377
4378         if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4379             (ext4_has_compat_features(sb) ||
4380              ext4_has_ro_compat_features(sb) ||
4381              ext4_has_incompat_features(sb)))
4382                 ext4_msg(sb, KERN_WARNING,
4383                        "feature flags set on rev 0 fs, "
4384                        "running e2fsck is recommended");
4385
4386         if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
4387                 set_opt2(sb, HURD_COMPAT);
4388                 if (ext4_has_feature_64bit(sb)) {
4389                         ext4_msg(sb, KERN_ERR,
4390                                  "The Hurd can't support 64-bit file systems");
4391                         goto failed_mount;
4392                 }
4393
4394                 /*
4395                  * ea_inode feature uses l_i_version field which is not
4396                  * available in HURD_COMPAT mode.
4397                  */
4398                 if (ext4_has_feature_ea_inode(sb)) {
4399                         ext4_msg(sb, KERN_ERR,
4400                                  "ea_inode feature is not supported for Hurd");
4401                         goto failed_mount;
4402                 }
4403         }
4404
4405         if (IS_EXT2_SB(sb)) {
4406                 if (ext2_feature_set_ok(sb))
4407                         ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
4408                                  "using the ext4 subsystem");
4409                 else {
4410                         /*
4411                          * If we're probing be silent, if this looks like
4412                          * it's actually an ext[34] filesystem.
4413                          */
4414                         if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4415                                 goto failed_mount;
4416                         ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4417                                  "to feature incompatibilities");
4418                         goto failed_mount;
4419                 }
4420         }
4421
4422         if (IS_EXT3_SB(sb)) {
4423                 if (ext3_feature_set_ok(sb))
4424                         ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4425                                  "using the ext4 subsystem");
4426                 else {
4427                         /*
4428                          * If we're probing be silent, if this looks like
4429                          * it's actually an ext4 filesystem.
4430                          */
4431                         if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4432                                 goto failed_mount;
4433                         ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4434                                  "to feature incompatibilities");
4435                         goto failed_mount;
4436                 }
4437         }
4438
4439         /*
4440          * Check feature flags regardless of the revision level, since we
4441          * previously didn't change the revision level when setting the flags,
4442          * so there is a chance incompat flags are set on a rev 0 filesystem.
4443          */
4444         if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4445                 goto failed_mount;
4446
4447         if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
4448                 ext4_msg(sb, KERN_ERR,
4449                          "Number of reserved GDT blocks insanely large: %d",
4450                          le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4451                 goto failed_mount;
4452         }
4453
4454         if (bdev_dax_supported(sb->s_bdev, blocksize))
4455                 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
4456
4457         if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4458                 if (ext4_has_feature_inline_data(sb)) {
4459                         ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4460                                         " that may contain inline data");
4461                         goto failed_mount;
4462                 }
4463                 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4464                         ext4_msg(sb, KERN_ERR,
4465                                 "DAX unsupported by block device.");
4466                         goto failed_mount;
4467                 }
4468         }
4469
4470         if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4471                 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4472                          es->s_encryption_level);
4473                 goto failed_mount;
4474         }
4475
4476         if (sb->s_blocksize != blocksize) {
4477                 /*
4478                  * bh must be released before kill_bdev(), otherwise
4479                  * it won't be freed and its page also. kill_bdev()
4480                  * is called by sb_set_blocksize().
4481                  */
4482                 brelse(bh);
4483                 /* Validate the filesystem blocksize */
4484                 if (!sb_set_blocksize(sb, blocksize)) {
4485                         ext4_msg(sb, KERN_ERR, "bad block size %d",
4486                                         blocksize);
4487                         bh = NULL;
4488                         goto failed_mount;
4489                 }
4490
4491                 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
4492                 offset = do_div(logical_sb_block, blocksize);
4493                 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
4494                 if (IS_ERR(bh)) {
4495                         ext4_msg(sb, KERN_ERR,
4496                                "Can't read superblock on 2nd try");
4497                         ret = PTR_ERR(bh);
4498                         bh = NULL;
4499                         goto failed_mount;
4500                 }
4501                 es = (struct ext4_super_block *)(bh->b_data + offset);
4502                 sbi->s_es = es;
4503                 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
4504                         ext4_msg(sb, KERN_ERR,
4505                                "Magic mismatch, very weird!");
4506                         goto failed_mount;
4507                 }
4508         }
4509
4510         has_huge_files = ext4_has_feature_huge_file(sb);
4511         sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
4512                                                       has_huge_files);
4513         sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
4514
4515         sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
4516         if (ext4_has_feature_64bit(sb)) {
4517                 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
4518                     sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
4519                     !is_power_of_2(sbi->s_desc_size)) {
4520                         ext4_msg(sb, KERN_ERR,
4521                                "unsupported descriptor size %lu",
4522                                sbi->s_desc_size);
4523                         goto failed_mount;
4524                 }
4525         } else
4526                 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
4527
4528         sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
4529         sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
4530
4531         sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
4532         if (sbi->s_inodes_per_block == 0)
4533                 goto cantfind_ext4;
4534         if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
4535             sbi->s_inodes_per_group > blocksize * 8) {
4536                 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
4537                          sbi->s_inodes_per_group);
4538                 goto failed_mount;
4539         }
4540         sbi->s_itb_per_group = sbi->s_inodes_per_group /
4541                                         sbi->s_inodes_per_block;
4542         sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
4543         sbi->s_sbh = bh;
4544         sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
4545         sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
4546         sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
4547
4548         for (i = 0; i < 4; i++)
4549                 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
4550         sbi->s_def_hash_version = es->s_def_hash_version;
4551         if (ext4_has_feature_dir_index(sb)) {
4552                 i = le32_to_cpu(es->s_flags);
4553                 if (i & EXT2_FLAGS_UNSIGNED_HASH)
4554                         sbi->s_hash_unsigned = 3;
4555                 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
4556 #ifdef __CHAR_UNSIGNED__
4557                         if (!sb_rdonly(sb))
4558                                 es->s_flags |=
4559                                         cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
4560                         sbi->s_hash_unsigned = 3;
4561 #else
4562                         if (!sb_rdonly(sb))
4563                                 es->s_flags |=
4564                                         cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
4565 #endif
4566                 }
4567         }
4568
4569         /* Handle clustersize */
4570         clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4571         if (ext4_has_feature_bigalloc(sb)) {
4572                 if (clustersize < blocksize) {
4573                         ext4_msg(sb, KERN_ERR,
4574                                  "cluster size (%d) smaller than "
4575                                  "block size (%d)", clustersize, blocksize);
4576                         goto failed_mount;
4577                 }
4578                 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4579                         le32_to_cpu(es->s_log_block_size);
4580                 sbi->s_clusters_per_group =
4581                         le32_to_cpu(es->s_clusters_per_group);
4582                 if (sbi->s_clusters_per_group > blocksize * 8) {
4583                         ext4_msg(sb, KERN_ERR,
4584                                  "#clusters per group too big: %lu",
4585                                  sbi->s_clusters_per_group);
4586                         goto failed_mount;
4587                 }
4588                 if (sbi->s_blocks_per_group !=
4589                     (sbi->s_clusters_per_group * (clustersize / blocksize))) {
4590                         ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
4591                                  "clusters per group (%lu) inconsistent",
4592                                  sbi->s_blocks_per_group,
4593                                  sbi->s_clusters_per_group);
4594                         goto failed_mount;
4595                 }
4596         } else {
4597                 if (clustersize != blocksize) {
4598                         ext4_msg(sb, KERN_ERR,
4599                                  "fragment/cluster size (%d) != "
4600                                  "block size (%d)", clustersize, blocksize);
4601                         goto failed_mount;
4602                 }
4603                 if (sbi->s_blocks_per_group > blocksize * 8) {
4604                         ext4_msg(sb, KERN_ERR,
4605                                  "#blocks per group too big: %lu",
4606                                  sbi->s_blocks_per_group);
4607                         goto failed_mount;
4608                 }
4609                 sbi->s_clusters_per_group = sbi->s_blocks_per_group;
4610                 sbi->s_cluster_bits = 0;
4611         }
4612         sbi->s_cluster_ratio = clustersize / blocksize;
4613
4614         /* Do we have standard group size of clustersize * 8 blocks ? */
4615         if (sbi->s_blocks_per_group == clustersize << 3)
4616                 set_opt2(sb, STD_GROUP_SIZE);
4617
4618         /*
4619          * Test whether we have more sectors than will fit in sector_t,
4620          * and whether the max offset is addressable by the page cache.
4621          */
4622         err = generic_check_addressable(sb->s_blocksize_bits,
4623                                         ext4_blocks_count(es));
4624         if (err) {
4625                 ext4_msg(sb, KERN_ERR, "filesystem"
4626                          " too large to mount safely on this system");
4627                 goto failed_mount;
4628         }
4629
4630         if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
4631                 goto cantfind_ext4;
4632
4633         /* check blocks count against device size */
4634         blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
4635         if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4636                 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4637                        "exceeds size of device (%llu blocks)",
4638                        ext4_blocks_count(es), blocks_count);
4639                 goto failed_mount;
4640         }
4641
4642         /*
4643          * It makes no sense for the first data block to be beyond the end
4644          * of the filesystem.
4645          */
4646         if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4647                 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4648                          "block %u is beyond end of filesystem (%llu)",
4649                          le32_to_cpu(es->s_first_data_block),
4650                          ext4_blocks_count(es));
4651                 goto failed_mount;
4652         }
4653         if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4654             (sbi->s_cluster_ratio == 1)) {
4655                 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4656                          "block is 0 with a 1k block and cluster size");
4657                 goto failed_mount;
4658         }
4659
4660         blocks_count = (ext4_blocks_count(es) -
4661                         le32_to_cpu(es->s_first_data_block) +
4662                         EXT4_BLOCKS_PER_GROUP(sb) - 1);
4663         do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4664         if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4665                 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4666                        "(block count %llu, first data block %u, "
4667                        "blocks per group %lu)", blocks_count,
4668                        ext4_blocks_count(es),
4669                        le32_to_cpu(es->s_first_data_block),
4670                        EXT4_BLOCKS_PER_GROUP(sb));
4671                 goto failed_mount;
4672         }
4673         sbi->s_groups_count = blocks_count;
4674         sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
4675                         (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4676         if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4677             le32_to_cpu(es->s_inodes_count)) {
4678                 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4679                          le32_to_cpu(es->s_inodes_count),
4680                          ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4681                 ret = -EINVAL;
4682                 goto failed_mount;
4683         }
4684         db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4685                    EXT4_DESC_PER_BLOCK(sb);
4686         if (ext4_has_feature_meta_bg(sb)) {
4687                 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4688                         ext4_msg(sb, KERN_WARNING,
4689                                  "first meta block group too large: %u "
4690                                  "(group descriptor block count %u)",
4691                                  le32_to_cpu(es->s_first_meta_bg), db_count);
4692                         goto failed_mount;
4693                 }
4694         }
4695         rcu_assign_pointer(sbi->s_group_desc,
4696                            kvmalloc_array(db_count,
4697                                           sizeof(struct buffer_head *),
4698                                           GFP_KERNEL));
4699         if (sbi->s_group_desc == NULL) {
4700                 ext4_msg(sb, KERN_ERR, "not enough memory");
4701                 ret = -ENOMEM;
4702                 goto failed_mount;
4703         }
4704
4705         bgl_lock_init(sbi->s_blockgroup_lock);
4706
4707         /* Pre-read the descriptors into the buffer cache */
4708         for (i = 0; i < db_count; i++) {
4709                 block = descriptor_loc(sb, logical_sb_block, i);
4710                 ext4_sb_breadahead_unmovable(sb, block);
4711         }
4712
4713         for (i = 0; i < db_count; i++) {
4714                 struct buffer_head *bh;
4715
4716                 block = descriptor_loc(sb, logical_sb_block, i);
4717                 bh = ext4_sb_bread_unmovable(sb, block);
4718                 if (IS_ERR(bh)) {
4719                         ext4_msg(sb, KERN_ERR,
4720                                "can't read group descriptor %d", i);
4721                         db_count = i;
4722                         ret = PTR_ERR(bh);
4723                         bh = NULL;
4724                         goto failed_mount2;
4725                 }
4726                 rcu_read_lock();
4727                 rcu_dereference(sbi->s_group_desc)[i] = bh;
4728                 rcu_read_unlock();
4729         }
4730         sbi->s_gdb_count = db_count;
4731         if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4732                 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4733                 ret = -EFSCORRUPTED;
4734                 goto failed_mount2;
4735         }
4736
4737         timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4738
4739         /* Register extent status tree shrinker */
4740         if (ext4_es_register_shrinker(sbi))
4741                 goto failed_mount3;
4742
4743         sbi->s_stripe = ext4_get_stripe_size(sbi);
4744         sbi->s_extent_max_zeroout_kb = 32;
4745
4746         /*
4747          * set up enough so that it can read an inode
4748          */
4749         sb->s_op = &ext4_sops;
4750         sb->s_export_op = &ext4_export_ops;
4751         sb->s_xattr = ext4_xattr_handlers;
4752 #ifdef CONFIG_FS_ENCRYPTION
4753         sb->s_cop = &ext4_cryptops;
4754 #endif
4755 #ifdef CONFIG_FS_VERITY
4756         sb->s_vop = &ext4_verityops;
4757 #endif
4758 #ifdef CONFIG_QUOTA
4759         sb->dq_op = &ext4_quota_operations;
4760         if (ext4_has_feature_quota(sb))
4761                 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4762         else
4763                 sb->s_qcop = &ext4_qctl_operations;
4764         sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4765 #endif
4766         memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4767
4768         INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4769         mutex_init(&sbi->s_orphan_lock);
4770
4771         /* Initialize fast commit stuff */
4772         atomic_set(&sbi->s_fc_subtid, 0);
4773         atomic_set(&sbi->s_fc_ineligible_updates, 0);
4774         INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
4775         INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
4776         INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
4777         INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
4778         sbi->s_fc_bytes = 0;
4779         ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
4780         ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
4781         spin_lock_init(&sbi->s_fc_lock);
4782         memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4783         sbi->s_fc_replay_state.fc_regions = NULL;
4784         sbi->s_fc_replay_state.fc_regions_size = 0;
4785         sbi->s_fc_replay_state.fc_regions_used = 0;
4786         sbi->s_fc_replay_state.fc_regions_valid = 0;
4787         sbi->s_fc_replay_state.fc_modified_inodes = NULL;
4788         sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
4789         sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4790
4791         sb->s_root = NULL;
4792
4793         needs_recovery = (es->s_last_orphan != 0 ||
4794                           ext4_has_feature_journal_needs_recovery(sb));
4795
4796         if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4797                 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4798                         goto failed_mount3a;
4799
4800         /*
4801          * The first inode we look at is the journal inode.  Don't try
4802          * root first: it may be modified in the journal!
4803          */
4804         if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4805                 err = ext4_load_journal(sb, es, journal_devnum);
4806                 if (err)
4807                         goto failed_mount3a;
4808         } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4809                    ext4_has_feature_journal_needs_recovery(sb)) {
4810                 ext4_msg(sb, KERN_ERR, "required journal recovery "
4811                        "suppressed and not mounted read-only");
4812                 goto failed_mount_wq;
4813         } else {
4814                 /* Nojournal mode, all journal mount options are illegal */
4815                 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
4816                         ext4_msg(sb, KERN_ERR, "can't mount with "
4817                                  "journal_checksum, fs mounted w/o journal");
4818                         goto failed_mount_wq;
4819                 }
4820                 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4821                         ext4_msg(sb, KERN_ERR, "can't mount with "
4822                                  "journal_async_commit, fs mounted w/o journal");
4823                         goto failed_mount_wq;
4824                 }
4825                 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
4826                         ext4_msg(sb, KERN_ERR, "can't mount with "
4827                                  "commit=%lu, fs mounted w/o journal",
4828                                  sbi->s_commit_interval / HZ);
4829                         goto failed_mount_wq;
4830                 }
4831                 if (EXT4_MOUNT_DATA_FLAGS &
4832                     (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
4833                         ext4_msg(sb, KERN_ERR, "can't mount with "
4834                                  "data=, fs mounted w/o journal");
4835                         goto failed_mount_wq;
4836                 }
4837                 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
4838                 clear_opt(sb, JOURNAL_CHECKSUM);
4839                 clear_opt(sb, DATA_FLAGS);
4840                 clear_opt2(sb, JOURNAL_FAST_COMMIT);
4841                 sbi->s_journal = NULL;
4842                 needs_recovery = 0;
4843                 goto no_journal;
4844         }
4845
4846         if (ext4_has_feature_64bit(sb) &&
4847             !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4848                                        JBD2_FEATURE_INCOMPAT_64BIT)) {
4849                 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4850                 goto failed_mount_wq;
4851         }
4852
4853         if (!set_journal_csum_feature_set(sb)) {
4854                 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4855                          "feature set");
4856                 goto failed_mount_wq;
4857         }
4858
4859         if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
4860                 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4861                                           JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
4862                 ext4_msg(sb, KERN_ERR,
4863                         "Failed to set fast commit journal feature");
4864                 goto failed_mount_wq;
4865         }
4866
4867         /* We have now updated the journal if required, so we can
4868          * validate the data journaling mode. */
4869         switch (test_opt(sb, DATA_FLAGS)) {
4870         case 0:
4871                 /* No mode set, assume a default based on the journal
4872                  * capabilities: ORDERED_DATA if the journal can
4873                  * cope, else JOURNAL_DATA
4874                  */
4875                 if (jbd2_journal_check_available_features
4876                     (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4877                         set_opt(sb, ORDERED_DATA);
4878                         sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4879                 } else {
4880                         set_opt(sb, JOURNAL_DATA);
4881                         sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4882                 }
4883                 break;
4884
4885         case EXT4_MOUNT_ORDERED_DATA:
4886         case EXT4_MOUNT_WRITEBACK_DATA:
4887                 if (!jbd2_journal_check_available_features
4888                     (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4889                         ext4_msg(sb, KERN_ERR, "Journal does not support "
4890                                "requested data journaling mode");
4891                         goto failed_mount_wq;
4892                 }
4893         default:
4894                 break;
4895         }
4896
4897         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4898             test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4899                 ext4_msg(sb, KERN_ERR, "can't mount with "
4900                         "journal_async_commit in data=ordered mode");
4901                 goto failed_mount_wq;
4902         }
4903
4904         set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4905
4906         sbi->s_journal->j_submit_inode_data_buffers =
4907                 ext4_journal_submit_inode_data_buffers;
4908         sbi->s_journal->j_finish_inode_data_buffers =
4909                 ext4_journal_finish_inode_data_buffers;
4910
4911 no_journal:
4912         if (!test_opt(sb, NO_MBCACHE)) {
4913                 sbi->s_ea_block_cache = ext4_xattr_create_cache();
4914                 if (!sbi->s_ea_block_cache) {
4915                         ext4_msg(sb, KERN_ERR,
4916                                  "Failed to create ea_block_cache");
4917                         goto failed_mount_wq;
4918                 }
4919
4920                 if (ext4_has_feature_ea_inode(sb)) {
4921                         sbi->s_ea_inode_cache = ext4_xattr_create_cache();
4922                         if (!sbi->s_ea_inode_cache) {
4923                                 ext4_msg(sb, KERN_ERR,
4924                                          "Failed to create ea_inode_cache");
4925                                 goto failed_mount_wq;
4926                         }
4927                 }
4928         }
4929
4930         if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
4931                 ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
4932                 goto failed_mount_wq;
4933         }
4934
4935         /*
4936          * Get the # of file system overhead blocks from the
4937          * superblock if present.
4938          */
4939         sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
4940         /* ignore the precalculated value if it is ridiculous */
4941         if (sbi->s_overhead > ext4_blocks_count(es))
4942                 sbi->s_overhead = 0;
4943         /*
4944          * If the bigalloc feature is not enabled recalculating the
4945          * overhead doesn't take long, so we might as well just redo
4946          * it to make sure we are using the correct value.
4947          */
4948         if (!ext4_has_feature_bigalloc(sb))
4949                 sbi->s_overhead = 0;
4950         if (sbi->s_overhead == 0) {
4951                 err = ext4_calculate_overhead(sb);
4952                 if (err)
4953                         goto failed_mount_wq;
4954         }
4955
4956         /*
4957          * The maximum number of concurrent works can be high and
4958          * concurrency isn't really necessary.  Limit it to 1.
4959          */
4960         EXT4_SB(sb)->rsv_conversion_wq =
4961                 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4962         if (!EXT4_SB(sb)->rsv_conversion_wq) {
4963                 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4964                 ret = -ENOMEM;
4965                 goto failed_mount4;
4966         }
4967
4968         /*
4969          * The jbd2_journal_load will have done any necessary log recovery,
4970          * so we can safely mount the rest of the filesystem now.
4971          */
4972
4973         root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4974         if (IS_ERR(root)) {
4975                 ext4_msg(sb, KERN_ERR, "get root inode failed");
4976                 ret = PTR_ERR(root);
4977                 root = NULL;
4978                 goto failed_mount4;
4979         }
4980         if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4981                 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
4982                 iput(root);
4983                 goto failed_mount4;
4984         }
4985
4986 #ifdef CONFIG_UNICODE
4987         if (sb->s_encoding)
4988                 sb->s_d_op = &ext4_dentry_ops;
4989 #endif
4990
4991         sb->s_root = d_make_root(root);
4992         if (!sb->s_root) {
4993                 ext4_msg(sb, KERN_ERR, "get root dentry failed");
4994                 ret = -ENOMEM;
4995                 goto failed_mount4;
4996         }
4997
4998         ret = ext4_setup_super(sb, es, sb_rdonly(sb));
4999         if (ret == -EROFS) {
5000                 sb->s_flags |= SB_RDONLY;
5001                 ret = 0;
5002         } else if (ret)
5003                 goto failed_mount4a;
5004
5005         ext4_set_resv_clusters(sb);
5006
5007         if (test_opt(sb, BLOCK_VALIDITY)) {
5008                 err = ext4_setup_system_zone(sb);
5009                 if (err) {
5010                         ext4_msg(sb, KERN_ERR, "failed to initialize system "
5011                                  "zone (%d)", err);
5012                         goto failed_mount4a;
5013                 }
5014         }
5015         ext4_fc_replay_cleanup(sb);
5016
5017         ext4_ext_init(sb);
5018         err = ext4_mb_init(sb);
5019         if (err) {
5020                 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
5021                          err);
5022                 goto failed_mount5;
5023         }
5024
5025         /*
5026          * We can only set up the journal commit callback once
5027          * mballoc is initialized
5028          */
5029         if (sbi->s_journal)
5030                 sbi->s_journal->j_commit_callback =
5031                         ext4_journal_commit_callback;
5032
5033         block = ext4_count_free_clusters(sb);
5034         ext4_free_blocks_count_set(sbi->s_es, 
5035                                    EXT4_C2B(sbi, block));
5036         ext4_superblock_csum_set(sb);
5037         err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
5038                                   GFP_KERNEL);
5039         if (!err) {
5040                 unsigned long freei = ext4_count_free_inodes(sb);
5041                 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
5042                 ext4_superblock_csum_set(sb);
5043                 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
5044                                           GFP_KERNEL);
5045         }
5046         if (!err)
5047                 err = percpu_counter_init(&sbi->s_dirs_counter,
5048                                           ext4_count_dirs(sb), GFP_KERNEL);
5049         if (!err)
5050                 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
5051                                           GFP_KERNEL);
5052         if (!err)
5053                 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
5054                                           GFP_KERNEL);
5055         if (!err)
5056                 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
5057
5058         if (err) {
5059                 ext4_msg(sb, KERN_ERR, "insufficient memory");
5060                 goto failed_mount6;
5061         }
5062
5063         if (ext4_has_feature_flex_bg(sb))
5064                 if (!ext4_fill_flex_info(sb)) {
5065                         ext4_msg(sb, KERN_ERR,
5066                                "unable to initialize "
5067                                "flex_bg meta info!");
5068                         ret = -ENOMEM;
5069                         goto failed_mount6;
5070                 }
5071
5072         err = ext4_register_li_request(sb, first_not_zeroed);
5073         if (err)
5074                 goto failed_mount6;
5075
5076         err = ext4_register_sysfs(sb);
5077         if (err)
5078                 goto failed_mount7;
5079
5080 #ifdef CONFIG_QUOTA
5081         /* Enable quota usage during mount. */
5082         if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
5083                 err = ext4_enable_quotas(sb);
5084                 if (err)
5085                         goto failed_mount8;
5086         }
5087 #endif  /* CONFIG_QUOTA */
5088
5089         /*
5090          * Save the original bdev mapping's wb_err value which could be
5091          * used to detect the metadata async write error.
5092          */
5093         spin_lock_init(&sbi->s_bdev_wb_lock);
5094         errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
5095                                  &sbi->s_bdev_wb_err);
5096         sb->s_bdev->bd_super = sb;
5097         EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
5098         ext4_orphan_cleanup(sb, es);
5099         EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5100         if (needs_recovery) {
5101                 ext4_msg(sb, KERN_INFO, "recovery complete");
5102                 err = ext4_mark_recovery_complete(sb, es);
5103                 if (err)
5104                         goto failed_mount8;
5105         }
5106         if (EXT4_SB(sb)->s_journal) {
5107                 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5108                         descr = " journalled data mode";
5109                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
5110                         descr = " ordered data mode";
5111                 else
5112                         descr = " writeback data mode";
5113         } else
5114                 descr = "out journal";
5115
5116         if (test_opt(sb, DISCARD)) {
5117                 struct request_queue *q = bdev_get_queue(sb->s_bdev);
5118                 if (!blk_queue_discard(q))
5119                         ext4_msg(sb, KERN_WARNING,
5120                                  "mounting with \"discard\" option, but "
5121                                  "the device does not support discard");
5122         }
5123
5124         if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
5125                 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
5126                          "Opts: %.*s%s%s", descr,
5127                          (int) sizeof(sbi->s_es->s_mount_opts),
5128                          sbi->s_es->s_mount_opts,
5129                          *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
5130
5131         if (es->s_error_count)
5132                 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
5133
5134         /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
5135         ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
5136         ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
5137         ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5138         atomic_set(&sbi->s_warning_count, 0);
5139         atomic_set(&sbi->s_msg_count, 0);
5140
5141         kfree(orig_data);
5142         return 0;
5143
5144 cantfind_ext4:
5145         if (!silent)
5146                 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5147         goto failed_mount;
5148
5149 failed_mount8:
5150         ext4_unregister_sysfs(sb);
5151         kobject_put(&sbi->s_kobj);
5152 failed_mount7:
5153         ext4_unregister_li_request(sb);
5154 failed_mount6:
5155         ext4_mb_release(sb);
5156         rcu_read_lock();
5157         flex_groups = rcu_dereference(sbi->s_flex_groups);
5158         if (flex_groups) {
5159                 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5160                         kvfree(flex_groups[i]);
5161                 kvfree(flex_groups);
5162         }
5163         rcu_read_unlock();
5164         percpu_counter_destroy(&sbi->s_freeclusters_counter);
5165         percpu_counter_destroy(&sbi->s_freeinodes_counter);
5166         percpu_counter_destroy(&sbi->s_dirs_counter);
5167         percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5168         percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
5169         percpu_free_rwsem(&sbi->s_writepages_rwsem);
5170 failed_mount5:
5171         ext4_ext_release(sb);
5172         ext4_release_system_zone(sb);
5173 failed_mount4a:
5174         dput(sb->s_root);
5175         sb->s_root = NULL;
5176 failed_mount4:
5177         ext4_msg(sb, KERN_ERR, "mount failed");
5178         if (EXT4_SB(sb)->rsv_conversion_wq)
5179                 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5180 failed_mount_wq:
5181         ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
5182         sbi->s_ea_inode_cache = NULL;
5183
5184         ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
5185         sbi->s_ea_block_cache = NULL;
5186
5187         if (sbi->s_journal) {
5188                 jbd2_journal_destroy(sbi->s_journal);
5189                 sbi->s_journal = NULL;
5190         }
5191 failed_mount3a:
5192         ext4_es_unregister_shrinker(sbi);
5193 failed_mount3:
5194         del_timer_sync(&sbi->s_err_report);
5195         ext4_stop_mmpd(sbi);
5196 failed_mount2:
5197         rcu_read_lock();
5198         group_desc = rcu_dereference(sbi->s_group_desc);
5199         for (i = 0; i < db_count; i++)
5200                 brelse(group_desc[i]);
5201         kvfree(group_desc);
5202         rcu_read_unlock();
5203 failed_mount:
5204         if (sbi->s_chksum_driver)
5205                 crypto_free_shash(sbi->s_chksum_driver);
5206
5207 #ifdef CONFIG_UNICODE
5208         utf8_unload(sb->s_encoding);
5209 #endif
5210
5211 #ifdef CONFIG_QUOTA
5212         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5213                 kfree(get_qf_name(sb, sbi, i));
5214 #endif
5215         fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5216         /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
5217         brelse(bh);
5218         ext4_blkdev_remove(sbi);
5219 out_fail:
5220         sb->s_fs_info = NULL;
5221         kfree(sbi->s_blockgroup_lock);
5222 out_free_base:
5223         kfree(sbi);
5224         kfree(orig_data);
5225         fs_put_dax(dax_dev);
5226         return err ? err : ret;
5227 }
5228
5229 /*
5230  * Setup any per-fs journal parameters now.  We'll do this both on
5231  * initial mount, once the journal has been initialised but before we've
5232  * done any recovery; and again on any subsequent remount.
5233  */
5234 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5235 {
5236         struct ext4_sb_info *sbi = EXT4_SB(sb);
5237
5238         journal->j_commit_interval = sbi->s_commit_interval;
5239         journal->j_min_batch_time = sbi->s_min_batch_time;
5240         journal->j_max_batch_time = sbi->s_max_batch_time;
5241         ext4_fc_init(sb, journal);
5242
5243         write_lock(&journal->j_state_lock);
5244         if (test_opt(sb, BARRIER))
5245                 journal->j_flags |= JBD2_BARRIER;
5246         else
5247                 journal->j_flags &= ~JBD2_BARRIER;
5248         if (test_opt(sb, DATA_ERR_ABORT))
5249                 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
5250         else
5251                 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
5252         write_unlock(&journal->j_state_lock);
5253 }
5254
5255 static struct inode *ext4_get_journal_inode(struct super_block *sb,
5256                                              unsigned int journal_inum)
5257 {
5258         struct inode *journal_inode;
5259
5260         /*
5261          * Test for the existence of a valid inode on disk.  Bad things
5262          * happen if we iget() an unused inode, as the subsequent iput()
5263          * will try to delete it.
5264          */
5265         journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5266         if (IS_ERR(journal_inode)) {
5267                 ext4_msg(sb, KERN_ERR, "no journal found");
5268                 return NULL;
5269         }
5270         if (!journal_inode->i_nlink) {
5271                 make_bad_inode(journal_inode);
5272                 iput(journal_inode);
5273                 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5274                 return NULL;
5275         }
5276
5277         jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
5278                   journal_inode, journal_inode->i_size);
5279         if (!S_ISREG(journal_inode->i_mode)) {
5280                 ext4_msg(sb, KERN_ERR, "invalid journal inode");
5281                 iput(journal_inode);
5282                 return NULL;
5283         }
5284         return journal_inode;
5285 }
5286
5287 static journal_t *ext4_get_journal(struct super_block *sb,
5288                                    unsigned int journal_inum)
5289 {
5290         struct inode *journal_inode;
5291         journal_t *journal;
5292
5293         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5294                 return NULL;
5295
5296         journal_inode = ext4_get_journal_inode(sb, journal_inum);
5297         if (!journal_inode)
5298                 return NULL;
5299
5300         journal = jbd2_journal_init_inode(journal_inode);
5301         if (!journal) {
5302                 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5303                 iput(journal_inode);
5304                 return NULL;
5305         }
5306         journal->j_private = sb;
5307         ext4_init_journal_params(sb, journal);
5308         return journal;
5309 }
5310
5311 static journal_t *ext4_get_dev_journal(struct super_block *sb,
5312                                        dev_t j_dev)
5313 {
5314         struct buffer_head *bh;
5315         journal_t *journal;
5316         ext4_fsblk_t start;
5317         ext4_fsblk_t len;
5318         int hblock, blocksize;
5319         ext4_fsblk_t sb_block;
5320         unsigned long offset;
5321         struct ext4_super_block *es;
5322         struct block_device *bdev;
5323
5324         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5325                 return NULL;
5326
5327         bdev = ext4_blkdev_get(j_dev, sb);
5328         if (bdev == NULL)
5329                 return NULL;
5330
5331         blocksize = sb->s_blocksize;
5332         hblock = bdev_logical_block_size(bdev);
5333         if (blocksize < hblock) {
5334                 ext4_msg(sb, KERN_ERR,
5335                         "blocksize too small for journal device");
5336                 goto out_bdev;
5337         }
5338
5339         sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
5340         offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5341         set_blocksize(bdev, blocksize);
5342         if (!(bh = __bread(bdev, sb_block, blocksize))) {
5343                 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
5344                        "external journal");
5345                 goto out_bdev;
5346         }
5347
5348         es = (struct ext4_super_block *) (bh->b_data + offset);
5349         if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5350             !(le32_to_cpu(es->s_feature_incompat) &
5351               EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5352                 ext4_msg(sb, KERN_ERR, "external journal has "
5353                                         "bad superblock");
5354                 brelse(bh);
5355                 goto out_bdev;
5356         }
5357
5358         if ((le32_to_cpu(es->s_feature_ro_compat) &
5359              EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
5360             es->s_checksum != ext4_superblock_csum(sb, es)) {
5361                 ext4_msg(sb, KERN_ERR, "external journal has "
5362                                        "corrupt superblock");
5363                 brelse(bh);
5364                 goto out_bdev;
5365         }
5366
5367         if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5368                 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5369                 brelse(bh);
5370                 goto out_bdev;
5371         }
5372
5373         len = ext4_blocks_count(es);
5374         start = sb_block + 1;
5375         brelse(bh);     /* we're done with the superblock */
5376
5377         journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
5378                                         start, len, blocksize);
5379         if (!journal) {
5380                 ext4_msg(sb, KERN_ERR, "failed to create device journal");
5381                 goto out_bdev;
5382         }
5383         journal->j_private = sb;
5384         if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
5385                 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
5386                 goto out_journal;
5387         }
5388         if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
5389                 ext4_msg(sb, KERN_ERR, "External journal has more than one "
5390                                         "user (unsupported) - %d",
5391                         be32_to_cpu(journal->j_superblock->s_nr_users));
5392                 goto out_journal;
5393         }
5394         EXT4_SB(sb)->s_journal_bdev = bdev;
5395         ext4_init_journal_params(sb, journal);
5396         return journal;
5397
5398 out_journal:
5399         jbd2_journal_destroy(journal);
5400 out_bdev:
5401         ext4_blkdev_put(bdev);
5402         return NULL;
5403 }
5404
5405 static int ext4_load_journal(struct super_block *sb,
5406                              struct ext4_super_block *es,
5407                              unsigned long journal_devnum)
5408 {
5409         journal_t *journal;
5410         unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
5411         dev_t journal_dev;
5412         int err = 0;
5413         int really_read_only;
5414         int journal_dev_ro;
5415
5416         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5417                 return -EFSCORRUPTED;
5418
5419         if (journal_devnum &&
5420             journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5421                 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
5422                         "numbers have changed");
5423                 journal_dev = new_decode_dev(journal_devnum);
5424         } else
5425                 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
5426
5427         if (journal_inum && journal_dev) {
5428                 ext4_msg(sb, KERN_ERR,
5429                          "filesystem has both journal inode and journal device!");
5430                 return -EINVAL;
5431         }
5432
5433         if (journal_inum) {
5434                 journal = ext4_get_journal(sb, journal_inum);
5435                 if (!journal)
5436                         return -EINVAL;
5437         } else {
5438                 journal = ext4_get_dev_journal(sb, journal_dev);
5439                 if (!journal)
5440                         return -EINVAL;
5441         }
5442
5443         journal_dev_ro = bdev_read_only(journal->j_dev);
5444         really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
5445
5446         if (journal_dev_ro && !sb_rdonly(sb)) {
5447                 ext4_msg(sb, KERN_ERR,
5448                          "journal device read-only, try mounting with '-o ro'");
5449                 err = -EROFS;
5450                 goto err_out;
5451         }
5452
5453         /*
5454          * Are we loading a blank journal or performing recovery after a
5455          * crash?  For recovery, we need to check in advance whether we
5456          * can get read-write access to the device.
5457          */
5458         if (ext4_has_feature_journal_needs_recovery(sb)) {
5459                 if (sb_rdonly(sb)) {
5460                         ext4_msg(sb, KERN_INFO, "INFO: recovery "
5461                                         "required on readonly filesystem");
5462                         if (really_read_only) {
5463                                 ext4_msg(sb, KERN_ERR, "write access "
5464                                         "unavailable, cannot proceed "
5465                                         "(try mounting with noload)");
5466                                 err = -EROFS;
5467                                 goto err_out;
5468                         }
5469                         ext4_msg(sb, KERN_INFO, "write access will "
5470                                "be enabled during recovery");
5471                 }
5472         }
5473
5474         if (!(journal->j_flags & JBD2_BARRIER))
5475                 ext4_msg(sb, KERN_INFO, "barriers disabled");
5476
5477         if (!ext4_has_feature_journal_needs_recovery(sb))
5478                 err = jbd2_journal_wipe(journal, !really_read_only);
5479         if (!err) {
5480                 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
5481                 if (save)
5482                         memcpy(save, ((char *) es) +
5483                                EXT4_S_ERR_START, EXT4_S_ERR_LEN);
5484                 err = jbd2_journal_load(journal);
5485                 if (save)
5486                         memcpy(((char *) es) + EXT4_S_ERR_START,
5487                                save, EXT4_S_ERR_LEN);
5488                 kfree(save);
5489         }
5490
5491         if (err) {
5492                 ext4_msg(sb, KERN_ERR, "error loading journal");
5493                 goto err_out;
5494         }
5495
5496         EXT4_SB(sb)->s_journal = journal;
5497         err = ext4_clear_journal_err(sb, es);
5498         if (err) {
5499                 EXT4_SB(sb)->s_journal = NULL;
5500                 jbd2_journal_destroy(journal);
5501                 return err;
5502         }
5503
5504         if (!really_read_only && journal_devnum &&
5505             journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5506                 es->s_journal_dev = cpu_to_le32(journal_devnum);
5507
5508                 /* Make sure we flush the recovery flag to disk. */
5509                 ext4_commit_super(sb, 1);
5510         }
5511
5512         return 0;
5513
5514 err_out:
5515         jbd2_journal_destroy(journal);
5516         return err;
5517 }
5518
5519 static int ext4_commit_super(struct super_block *sb, int sync)
5520 {
5521         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5522         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
5523         int error = 0;
5524
5525         if (!sbh)
5526                 return -EINVAL;
5527         if (block_device_ejected(sb))
5528                 return -ENODEV;
5529
5530         /*
5531          * If the file system is mounted read-only, don't update the
5532          * superblock write time.  This avoids updating the superblock
5533          * write time when we are mounting the root file system
5534          * read/only but we need to replay the journal; at that point,
5535          * for people who are east of GMT and who make their clock
5536          * tick in localtime for Windows bug-for-bug compatibility,
5537          * the clock is set in the future, and this will cause e2fsck
5538          * to complain and force a full file system check.
5539          */
5540         if (!(sb->s_flags & SB_RDONLY))
5541                 ext4_update_tstamp(es, s_wtime);
5542         if (sb->s_bdev->bd_part)
5543                 es->s_kbytes_written =
5544                         cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
5545                             ((part_stat_read(sb->s_bdev->bd_part,
5546                                              sectors[STAT_WRITE]) -
5547                               EXT4_SB(sb)->s_sectors_written_start) >> 1));
5548         else
5549                 es->s_kbytes_written =
5550                         cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
5551         if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
5552                 ext4_free_blocks_count_set(es,
5553                         EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
5554                                 &EXT4_SB(sb)->s_freeclusters_counter)));
5555         if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
5556                 es->s_free_inodes_count =
5557                         cpu_to_le32(percpu_counter_sum_positive(
5558                                 &EXT4_SB(sb)->s_freeinodes_counter));
5559         BUFFER_TRACE(sbh, "marking dirty");
5560         ext4_superblock_csum_set(sb);
5561         if (sync)
5562                 lock_buffer(sbh);
5563         if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
5564                 /*
5565                  * Oh, dear.  A previous attempt to write the
5566                  * superblock failed.  This could happen because the
5567                  * USB device was yanked out.  Or it could happen to
5568                  * be a transient write error and maybe the block will
5569                  * be remapped.  Nothing we can do but to retry the
5570                  * write and hope for the best.
5571                  */
5572                 ext4_msg(sb, KERN_ERR, "previous I/O error to "
5573                        "superblock detected");
5574                 clear_buffer_write_io_error(sbh);
5575                 set_buffer_uptodate(sbh);
5576         }
5577         mark_buffer_dirty(sbh);
5578         if (sync) {
5579                 unlock_buffer(sbh);
5580                 error = __sync_dirty_buffer(sbh,
5581                         REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
5582                 if (buffer_write_io_error(sbh)) {
5583                         ext4_msg(sb, KERN_ERR, "I/O error while writing "
5584                                "superblock");
5585                         clear_buffer_write_io_error(sbh);
5586                         set_buffer_uptodate(sbh);
5587                 }
5588         }
5589         return error;
5590 }
5591
5592 /*
5593  * Have we just finished recovery?  If so, and if we are mounting (or
5594  * remounting) the filesystem readonly, then we will end up with a
5595  * consistent fs on disk.  Record that fact.
5596  */
5597 static int ext4_mark_recovery_complete(struct super_block *sb,
5598                                        struct ext4_super_block *es)
5599 {
5600         int err;
5601         journal_t *journal = EXT4_SB(sb)->s_journal;
5602
5603         if (!ext4_has_feature_journal(sb)) {
5604                 if (journal != NULL) {
5605                         ext4_error(sb, "Journal got removed while the fs was "
5606                                    "mounted!");
5607                         return -EFSCORRUPTED;
5608                 }
5609                 return 0;
5610         }
5611         jbd2_journal_lock_updates(journal);
5612         err = jbd2_journal_flush(journal);
5613         if (err < 0)
5614                 goto out;
5615
5616         if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5617                 ext4_clear_feature_journal_needs_recovery(sb);
5618                 ext4_commit_super(sb, 1);
5619         }
5620 out:
5621         jbd2_journal_unlock_updates(journal);
5622         return err;
5623 }
5624
5625 /*
5626  * If we are mounting (or read-write remounting) a filesystem whose journal
5627  * has recorded an error from a previous lifetime, move that error to the
5628  * main filesystem now.
5629  */
5630 static int ext4_clear_journal_err(struct super_block *sb,
5631                                    struct ext4_super_block *es)
5632 {
5633         journal_t *journal;
5634         int j_errno;
5635         const char *errstr;
5636
5637         if (!ext4_has_feature_journal(sb)) {
5638                 ext4_error(sb, "Journal got removed while the fs was mounted!");
5639                 return -EFSCORRUPTED;
5640         }
5641
5642         journal = EXT4_SB(sb)->s_journal;
5643
5644         /*
5645          * Now check for any error status which may have been recorded in the
5646          * journal by a prior ext4_error() or ext4_abort()
5647          */
5648
5649         j_errno = jbd2_journal_errno(journal);
5650         if (j_errno) {
5651                 char nbuf[16];
5652
5653                 errstr = ext4_decode_error(sb, j_errno, nbuf);
5654                 ext4_warning(sb, "Filesystem error recorded "
5655                              "from previous mount: %s", errstr);
5656                 ext4_warning(sb, "Marking fs in need of filesystem check.");
5657
5658                 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
5659                 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5660                 ext4_commit_super(sb, 1);
5661
5662                 jbd2_journal_clear_err(journal);
5663                 jbd2_journal_update_sb_errno(journal);
5664         }
5665         return 0;
5666 }
5667
5668 /*
5669  * Force the running and committing transactions to commit,
5670  * and wait on the commit.
5671  */
5672 int ext4_force_commit(struct super_block *sb)
5673 {
5674         journal_t *journal;
5675
5676         if (sb_rdonly(sb))
5677                 return 0;
5678
5679         journal = EXT4_SB(sb)->s_journal;
5680         return ext4_journal_force_commit(journal);
5681 }
5682
5683 static int ext4_sync_fs(struct super_block *sb, int wait)
5684 {
5685         int ret = 0;
5686         tid_t target;
5687         bool needs_barrier = false;
5688         struct ext4_sb_info *sbi = EXT4_SB(sb);
5689
5690         if (unlikely(ext4_forced_shutdown(sbi)))
5691                 return 0;
5692
5693         trace_ext4_sync_fs(sb, wait);
5694         flush_workqueue(sbi->rsv_conversion_wq);
5695         /*
5696          * Writeback quota in non-journalled quota case - journalled quota has
5697          * no dirty dquots
5698          */
5699         dquot_writeback_dquots(sb, -1);
5700         /*
5701          * Data writeback is possible w/o journal transaction, so barrier must
5702          * being sent at the end of the function. But we can skip it if
5703          * transaction_commit will do it for us.
5704          */
5705         if (sbi->s_journal) {
5706                 target = jbd2_get_latest_transaction(sbi->s_journal);
5707                 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
5708                     !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
5709                         needs_barrier = true;
5710
5711                 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
5712                         if (wait)
5713                                 ret = jbd2_log_wait_commit(sbi->s_journal,
5714                                                            target);
5715                 }
5716         } else if (wait && test_opt(sb, BARRIER))
5717                 needs_barrier = true;
5718         if (needs_barrier) {
5719                 int err;
5720                 err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
5721                 if (!ret)
5722                         ret = err;
5723         }
5724
5725         return ret;
5726 }
5727
5728 /*
5729  * LVM calls this function before a (read-only) snapshot is created.  This
5730  * gives us a chance to flush the journal completely and mark the fs clean.
5731  *
5732  * Note that only this function cannot bring a filesystem to be in a clean
5733  * state independently. It relies on upper layer to stop all data & metadata
5734  * modifications.
5735  */
5736 static int ext4_freeze(struct super_block *sb)
5737 {
5738         int error = 0;
5739         journal_t *journal;
5740
5741         if (sb_rdonly(sb))
5742                 return 0;
5743
5744         journal = EXT4_SB(sb)->s_journal;
5745
5746         if (journal) {
5747                 /* Now we set up the journal barrier. */
5748                 jbd2_journal_lock_updates(journal);
5749
5750                 /*
5751                  * Don't clear the needs_recovery flag if we failed to
5752                  * flush the journal.
5753                  */
5754                 error = jbd2_journal_flush(journal);
5755                 if (error < 0)
5756                         goto out;
5757
5758                 /* Journal blocked and flushed, clear needs_recovery flag. */
5759                 ext4_clear_feature_journal_needs_recovery(sb);
5760         }
5761
5762         error = ext4_commit_super(sb, 1);
5763 out:
5764         if (journal)
5765                 /* we rely on upper layer to stop further updates */
5766                 jbd2_journal_unlock_updates(journal);
5767         return error;
5768 }
5769
5770 /*
5771  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
5772  * flag here, even though the filesystem is not technically dirty yet.
5773  */
5774 static int ext4_unfreeze(struct super_block *sb)
5775 {
5776         if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5777                 return 0;
5778
5779         if (EXT4_SB(sb)->s_journal) {
5780                 /* Reset the needs_recovery flag before the fs is unlocked. */
5781                 ext4_set_feature_journal_needs_recovery(sb);
5782         }
5783
5784         ext4_commit_super(sb, 1);
5785         return 0;
5786 }
5787
5788 /*
5789  * Structure to save mount options for ext4_remount's benefit
5790  */
5791 struct ext4_mount_options {
5792         unsigned long s_mount_opt;
5793         unsigned long s_mount_opt2;
5794         kuid_t s_resuid;
5795         kgid_t s_resgid;
5796         unsigned long s_commit_interval;
5797         u32 s_min_batch_time, s_max_batch_time;
5798 #ifdef CONFIG_QUOTA
5799         int s_jquota_fmt;
5800         char *s_qf_names[EXT4_MAXQUOTAS];
5801 #endif
5802 };
5803
5804 static int ext4_remount(struct super_block *sb, int *flags, char *data)
5805 {
5806         struct ext4_super_block *es;
5807         struct ext4_sb_info *sbi = EXT4_SB(sb);
5808         unsigned long old_sb_flags, vfs_flags;
5809         struct ext4_mount_options old_opts;
5810         int enable_quota = 0;
5811         ext4_group_t g;
5812         unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5813         int err = 0;
5814 #ifdef CONFIG_QUOTA
5815         int i, j;
5816         char *to_free[EXT4_MAXQUOTAS];
5817 #endif
5818         char *orig_data = kstrdup(data, GFP_KERNEL);
5819
5820         if (data && !orig_data)
5821                 return -ENOMEM;
5822
5823         /* Store the original options */
5824         old_sb_flags = sb->s_flags;
5825         old_opts.s_mount_opt = sbi->s_mount_opt;
5826         old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5827         old_opts.s_resuid = sbi->s_resuid;
5828         old_opts.s_resgid = sbi->s_resgid;
5829         old_opts.s_commit_interval = sbi->s_commit_interval;
5830         old_opts.s_min_batch_time = sbi->s_min_batch_time;
5831         old_opts.s_max_batch_time = sbi->s_max_batch_time;
5832 #ifdef CONFIG_QUOTA
5833         old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
5834         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5835                 if (sbi->s_qf_names[i]) {
5836                         char *qf_name = get_qf_name(sb, sbi, i);
5837
5838                         old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5839                         if (!old_opts.s_qf_names[i]) {
5840                                 for (j = 0; j < i; j++)
5841                                         kfree(old_opts.s_qf_names[j]);
5842                                 kfree(orig_data);
5843                                 return -ENOMEM;
5844                         }
5845                 } else
5846                         old_opts.s_qf_names[i] = NULL;
5847 #endif
5848         if (sbi->s_journal && sbi->s_journal->j_task->io_context)
5849                 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5850
5851         /*
5852          * Some options can be enabled by ext4 and/or by VFS mount flag
5853          * either way we need to make sure it matches in both *flags and
5854          * s_flags. Copy those selected flags from *flags to s_flags
5855          */
5856         vfs_flags = SB_LAZYTIME | SB_I_VERSION;
5857         sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
5858
5859         if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5860                 err = -EINVAL;
5861                 goto restore_opts;
5862         }
5863
5864         if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5865             test_opt(sb, JOURNAL_CHECKSUM)) {
5866                 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5867                          "during remount not supported; ignoring");
5868                 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5869         }
5870
5871         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5872                 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5873                         ext4_msg(sb, KERN_ERR, "can't mount with "
5874                                  "both data=journal and delalloc");
5875                         err = -EINVAL;
5876                         goto restore_opts;
5877                 }
5878                 if (test_opt(sb, DIOREAD_NOLOCK)) {
5879                         ext4_msg(sb, KERN_ERR, "can't mount with "
5880                                  "both data=journal and dioread_nolock");
5881                         err = -EINVAL;
5882                         goto restore_opts;
5883                 }
5884         } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
5885                 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
5886                         ext4_msg(sb, KERN_ERR, "can't mount with "
5887                                 "journal_async_commit in data=ordered mode");
5888                         err = -EINVAL;
5889                         goto restore_opts;
5890                 }
5891         }
5892
5893         if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
5894                 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
5895                 err = -EINVAL;
5896                 goto restore_opts;
5897         }
5898
5899         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5900                 ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
5901
5902         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5903                 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5904
5905         es = sbi->s_es;
5906
5907         if (sbi->s_journal) {
5908                 ext4_init_journal_params(sb, sbi->s_journal);
5909                 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
5910         }
5911
5912         if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5913                 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
5914                         err = -EROFS;
5915                         goto restore_opts;
5916                 }
5917
5918                 if (*flags & SB_RDONLY) {
5919                         err = sync_filesystem(sb);
5920                         if (err < 0)
5921                                 goto restore_opts;
5922                         err = dquot_suspend(sb, -1);
5923                         if (err < 0)
5924                                 goto restore_opts;
5925
5926                         /*
5927                          * First of all, the unconditional stuff we have to do
5928                          * to disable replay of the journal when we next remount
5929                          */
5930                         sb->s_flags |= SB_RDONLY;
5931
5932                         /*
5933                          * OK, test if we are remounting a valid rw partition
5934                          * readonly, and if so set the rdonly flag and then
5935                          * mark the partition as valid again.
5936                          */
5937                         if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
5938                             (sbi->s_mount_state & EXT4_VALID_FS))
5939                                 es->s_state = cpu_to_le16(sbi->s_mount_state);
5940
5941                         if (sbi->s_journal) {
5942                                 /*
5943                                  * We let remount-ro finish even if marking fs
5944                                  * as clean failed...
5945                                  */
5946                                 ext4_mark_recovery_complete(sb, es);
5947                         }
5948                 } else {
5949                         /* Make sure we can mount this feature set readwrite */
5950                         if (ext4_has_feature_readonly(sb) ||
5951                             !ext4_feature_set_ok(sb, 0)) {
5952                                 err = -EROFS;
5953                                 goto restore_opts;
5954                         }
5955                         /*
5956                          * Make sure the group descriptor checksums
5957                          * are sane.  If they aren't, refuse to remount r/w.
5958                          */
5959                         for (g = 0; g < sbi->s_groups_count; g++) {
5960                                 struct ext4_group_desc *gdp =
5961                                         ext4_get_group_desc(sb, g, NULL);
5962
5963                                 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5964                                         ext4_msg(sb, KERN_ERR,
5965                "ext4_remount: Checksum for group %u failed (%u!=%u)",
5966                 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5967                                                le16_to_cpu(gdp->bg_checksum));
5968                                         err = -EFSBADCRC;
5969                                         goto restore_opts;
5970                                 }
5971                         }
5972
5973                         /*
5974                          * If we have an unprocessed orphan list hanging
5975                          * around from a previously readonly bdev mount,
5976                          * require a full umount/remount for now.
5977                          */
5978                         if (es->s_last_orphan) {
5979                                 ext4_msg(sb, KERN_WARNING, "Couldn't "
5980                                        "remount RDWR because of unprocessed "
5981                                        "orphan inode list.  Please "
5982                                        "umount/remount instead");
5983                                 err = -EINVAL;
5984                                 goto restore_opts;
5985                         }
5986
5987                         /*
5988                          * Mounting a RDONLY partition read-write, so reread
5989                          * and store the current valid flag.  (It may have
5990                          * been changed by e2fsck since we originally mounted
5991                          * the partition.)
5992                          */
5993                         if (sbi->s_journal) {
5994                                 err = ext4_clear_journal_err(sb, es);
5995                                 if (err)
5996                                         goto restore_opts;
5997                         }
5998                         sbi->s_mount_state = (le16_to_cpu(es->s_state) &
5999                                               ~EXT4_FC_REPLAY);
6000
6001                         err = ext4_setup_super(sb, es, 0);
6002                         if (err)
6003                                 goto restore_opts;
6004
6005                         sb->s_flags &= ~SB_RDONLY;
6006                         if (ext4_has_feature_mmp(sb))
6007                                 if (ext4_multi_mount_protect(sb,
6008                                                 le64_to_cpu(es->s_mmp_block))) {
6009                                         err = -EROFS;
6010                                         goto restore_opts;
6011                                 }
6012                         enable_quota = 1;
6013                 }
6014         }
6015
6016         /*
6017          * Reinitialize lazy itable initialization thread based on
6018          * current settings
6019          */
6020         if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
6021                 ext4_unregister_li_request(sb);
6022         else {
6023                 ext4_group_t first_not_zeroed;
6024                 first_not_zeroed = ext4_has_uninit_itable(sb);
6025                 ext4_register_li_request(sb, first_not_zeroed);
6026         }
6027
6028         /*
6029          * Handle creation of system zone data early because it can fail.
6030          * Releasing of existing data is done when we are sure remount will
6031          * succeed.
6032          */
6033         if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
6034                 err = ext4_setup_system_zone(sb);
6035                 if (err)
6036                         goto restore_opts;
6037         }
6038
6039         if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
6040                 err = ext4_commit_super(sb, 1);
6041                 if (err)
6042                         goto restore_opts;
6043         }
6044
6045 #ifdef CONFIG_QUOTA
6046         /* Release old quota file names */
6047         for (i = 0; i < EXT4_MAXQUOTAS; i++)
6048                 kfree(old_opts.s_qf_names[i]);
6049         if (enable_quota) {
6050                 if (sb_any_quota_suspended(sb))
6051                         dquot_resume(sb, -1);
6052                 else if (ext4_has_feature_quota(sb)) {
6053                         err = ext4_enable_quotas(sb);
6054                         if (err)
6055                                 goto restore_opts;
6056                 }
6057         }
6058 #endif
6059         if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6060                 ext4_release_system_zone(sb);
6061
6062         if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6063                 ext4_stop_mmpd(sbi);
6064
6065         /*
6066          * Some options can be enabled by ext4 and/or by VFS mount flag
6067          * either way we need to make sure it matches in both *flags and
6068          * s_flags. Copy those selected flags from s_flags to *flags
6069          */
6070         *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
6071
6072         ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
6073         kfree(orig_data);
6074         return 0;
6075
6076 restore_opts:
6077         sb->s_flags = old_sb_flags;
6078         sbi->s_mount_opt = old_opts.s_mount_opt;
6079         sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6080         sbi->s_resuid = old_opts.s_resuid;
6081         sbi->s_resgid = old_opts.s_resgid;
6082         sbi->s_commit_interval = old_opts.s_commit_interval;
6083         sbi->s_min_batch_time = old_opts.s_min_batch_time;
6084         sbi->s_max_batch_time = old_opts.s_max_batch_time;
6085         if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6086                 ext4_release_system_zone(sb);
6087 #ifdef CONFIG_QUOTA
6088         sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
6089         for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6090                 to_free[i] = get_qf_name(sb, sbi, i);
6091                 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6092         }
6093         synchronize_rcu();
6094         for (i = 0; i < EXT4_MAXQUOTAS; i++)
6095                 kfree(to_free[i]);
6096 #endif
6097         if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6098                 ext4_stop_mmpd(sbi);
6099         kfree(orig_data);
6100         return err;
6101 }
6102
6103 #ifdef CONFIG_QUOTA
6104 static int ext4_statfs_project(struct super_block *sb,
6105                                kprojid_t projid, struct kstatfs *buf)
6106 {
6107         struct kqid qid;
6108         struct dquot *dquot;
6109         u64 limit;
6110         u64 curblock;
6111
6112         qid = make_kqid_projid(projid);
6113         dquot = dqget(sb, qid);
6114         if (IS_ERR(dquot))
6115                 return PTR_ERR(dquot);
6116         spin_lock(&dquot->dq_dqb_lock);
6117
6118         limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
6119                              dquot->dq_dqb.dqb_bhardlimit);
6120         limit >>= sb->s_blocksize_bits;
6121
6122         if (limit && buf->f_blocks > limit) {
6123                 curblock = (dquot->dq_dqb.dqb_curspace +
6124                             dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
6125                 buf->f_blocks = limit;
6126                 buf->f_bfree = buf->f_bavail =
6127                         (buf->f_blocks > curblock) ?
6128                          (buf->f_blocks - curblock) : 0;
6129         }
6130
6131         limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
6132                              dquot->dq_dqb.dqb_ihardlimit);
6133         if (limit && buf->f_files > limit) {
6134                 buf->f_files = limit;
6135                 buf->f_ffree =
6136                         (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
6137                          (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
6138         }
6139
6140         spin_unlock(&dquot->dq_dqb_lock);
6141         dqput(dquot);
6142         return 0;
6143 }
6144 #endif
6145
6146 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6147 {
6148         struct super_block *sb = dentry->d_sb;
6149         struct ext4_sb_info *sbi = EXT4_SB(sb);
6150         struct ext4_super_block *es = sbi->s_es;
6151         ext4_fsblk_t overhead = 0, resv_blocks;
6152         u64 fsid;
6153         s64 bfree;
6154         resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6155
6156         if (!test_opt(sb, MINIX_DF))
6157                 overhead = sbi->s_overhead;
6158
6159         buf->f_type = EXT4_SUPER_MAGIC;
6160         buf->f_bsize = sb->s_blocksize;
6161         buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6162         bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
6163                 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6164         /* prevent underflow in case that few free space is available */
6165         buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
6166         buf->f_bavail = buf->f_bfree -
6167                         (ext4_r_blocks_count(es) + resv_blocks);
6168         if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6169                 buf->f_bavail = 0;
6170         buf->f_files = le32_to_cpu(es->s_inodes_count);
6171         buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6172         buf->f_namelen = EXT4_NAME_LEN;
6173         fsid = le64_to_cpup((void *)es->s_uuid) ^
6174                le64_to_cpup((void *)es->s_uuid + sizeof(u64));
6175         buf->f_fsid = u64_to_fsid(fsid);
6176
6177 #ifdef CONFIG_QUOTA
6178         if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
6179             sb_has_quota_limits_enabled(sb, PRJQUOTA))
6180                 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
6181 #endif
6182         return 0;
6183 }
6184
6185
6186 #ifdef CONFIG_QUOTA
6187
6188 /*
6189  * Helper functions so that transaction is started before we acquire dqio_sem
6190  * to keep correct lock ordering of transaction > dqio_sem
6191  */
6192 static inline struct inode *dquot_to_inode(struct dquot *dquot)
6193 {
6194         return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6195 }
6196
6197 static int ext4_write_dquot(struct dquot *dquot)
6198 {
6199         int ret, err;
6200         handle_t *handle;
6201         struct inode *inode;
6202
6203         inode = dquot_to_inode(dquot);
6204         handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6205                                     EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6206         if (IS_ERR(handle))
6207                 return PTR_ERR(handle);
6208         ret = dquot_commit(dquot);
6209         err = ext4_journal_stop(handle);
6210         if (!ret)
6211                 ret = err;
6212         return ret;
6213 }
6214
6215 static int ext4_acquire_dquot(struct dquot *dquot)
6216 {
6217         int ret, err;
6218         handle_t *handle;
6219
6220         handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6221                                     EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6222         if (IS_ERR(handle))
6223                 return PTR_ERR(handle);
6224         ret = dquot_acquire(dquot);
6225         err = ext4_journal_stop(handle);
6226         if (!ret)
6227                 ret = err;
6228         return ret;
6229 }
6230
6231 static int ext4_release_dquot(struct dquot *dquot)
6232 {
6233         int ret, err;
6234         handle_t *handle;
6235
6236         handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6237                                     EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
6238         if (IS_ERR(handle)) {
6239                 /* Release dquot anyway to avoid endless cycle in dqput() */
6240                 dquot_release(dquot);
6241                 return PTR_ERR(handle);
6242         }
6243         ret = dquot_release(dquot);
6244         err = ext4_journal_stop(handle);
6245         if (!ret)
6246                 ret = err;
6247         return ret;
6248 }
6249
6250 static int ext4_mark_dquot_dirty(struct dquot *dquot)
6251 {
6252         struct super_block *sb = dquot->dq_sb;
6253         struct ext4_sb_info *sbi = EXT4_SB(sb);
6254
6255         /* Are we journaling quotas? */
6256         if (ext4_has_feature_quota(sb) ||
6257             sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
6258                 dquot_mark_dquot_dirty(dquot);
6259                 return ext4_write_dquot(dquot);
6260         } else {
6261                 return dquot_mark_dquot_dirty(dquot);
6262         }
6263 }
6264
6265 static int ext4_write_info(struct super_block *sb, int type)
6266 {
6267         int ret, err;
6268         handle_t *handle;
6269
6270         /* Data block + inode block */
6271         handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
6272         if (IS_ERR(handle))
6273                 return PTR_ERR(handle);
6274         ret = dquot_commit_info(sb, type);
6275         err = ext4_journal_stop(handle);
6276         if (!ret)
6277                 ret = err;
6278         return ret;
6279 }
6280
6281 /*
6282  * Turn on quotas during mount time - we need to find
6283  * the quota file and such...
6284  */
6285 static int ext4_quota_on_mount(struct super_block *sb, int type)
6286 {
6287         return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
6288                                         EXT4_SB(sb)->s_jquota_fmt, type);
6289 }
6290
6291 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
6292 {
6293         struct ext4_inode_info *ei = EXT4_I(inode);
6294
6295         /* The first argument of lockdep_set_subclass has to be
6296          * *exactly* the same as the argument to init_rwsem() --- in
6297          * this case, in init_once() --- or lockdep gets unhappy
6298          * because the name of the lock is set using the
6299          * stringification of the argument to init_rwsem().
6300          */
6301         (void) ei;      /* shut up clang warning if !CONFIG_LOCKDEP */
6302         lockdep_set_subclass(&ei->i_data_sem, subclass);
6303 }
6304
6305 /*
6306  * Standard function to be called on quota_on
6307  */
6308 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
6309                          const struct path *path)
6310 {
6311         int err;
6312
6313         if (!test_opt(sb, QUOTA))
6314                 return -EINVAL;
6315
6316         /* Quotafile not on the same filesystem? */
6317         if (path->dentry->d_sb != sb)
6318                 return -EXDEV;
6319
6320         /* Quota already enabled for this file? */
6321         if (IS_NOQUOTA(d_inode(path->dentry)))
6322                 return -EBUSY;
6323
6324         /* Journaling quota? */
6325         if (EXT4_SB(sb)->s_qf_names[type]) {
6326                 /* Quotafile not in fs root? */
6327                 if (path->dentry->d_parent != sb->s_root)
6328                         ext4_msg(sb, KERN_WARNING,
6329                                 "Quota file not on filesystem root. "
6330                                 "Journaled quota will not work");
6331                 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
6332         } else {
6333                 /*
6334                  * Clear the flag just in case mount options changed since
6335                  * last time.
6336                  */
6337                 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
6338         }
6339
6340         /*
6341          * When we journal data on quota file, we have to flush journal to see
6342          * all updates to the file when we bypass pagecache...
6343          */
6344         if (EXT4_SB(sb)->s_journal &&
6345             ext4_should_journal_data(d_inode(path->dentry))) {
6346                 /*
6347                  * We don't need to lock updates but journal_flush() could
6348                  * otherwise be livelocked...
6349                  */
6350                 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
6351                 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
6352                 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
6353                 if (err)
6354                         return err;
6355         }
6356
6357         lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
6358         err = dquot_quota_on(sb, type, format_id, path);
6359         if (!err) {
6360                 struct inode *inode = d_inode(path->dentry);
6361                 handle_t *handle;
6362
6363                 /*
6364                  * Set inode flags to prevent userspace from messing with quota
6365                  * files. If this fails, we return success anyway since quotas
6366                  * are already enabled and this is not a hard failure.
6367                  */
6368                 inode_lock(inode);
6369                 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6370                 if (IS_ERR(handle))
6371                         goto unlock_inode;
6372                 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
6373                 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
6374                                 S_NOATIME | S_IMMUTABLE);
6375                 err = ext4_mark_inode_dirty(handle, inode);
6376                 ext4_journal_stop(handle);
6377         unlock_inode:
6378                 inode_unlock(inode);
6379                 if (err)
6380                         dquot_quota_off(sb, type);
6381         }
6382         if (err)
6383                 lockdep_set_quota_inode(path->dentry->d_inode,
6384                                              I_DATA_SEM_NORMAL);
6385         return err;
6386 }
6387
6388 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
6389                              unsigned int flags)
6390 {
6391         int err;
6392         struct inode *qf_inode;
6393         unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6394                 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
6395                 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
6396                 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6397         };
6398
6399         BUG_ON(!ext4_has_feature_quota(sb));
6400
6401         if (!qf_inums[type])
6402                 return -EPERM;
6403
6404         qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
6405         if (IS_ERR(qf_inode)) {
6406                 ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
6407                 return PTR_ERR(qf_inode);
6408         }
6409
6410         /* Don't account quota for quota files to avoid recursion */
6411         qf_inode->i_flags |= S_NOQUOTA;
6412         lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
6413         err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
6414         if (err)
6415                 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
6416         iput(qf_inode);
6417
6418         return err;
6419 }
6420
6421 /* Enable usage tracking for all quota types. */
6422 static int ext4_enable_quotas(struct super_block *sb)
6423 {
6424         int type, err = 0;
6425         unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6426                 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
6427                 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
6428                 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6429         };
6430         bool quota_mopt[EXT4_MAXQUOTAS] = {
6431                 test_opt(sb, USRQUOTA),
6432                 test_opt(sb, GRPQUOTA),
6433                 test_opt(sb, PRJQUOTA),
6434         };
6435
6436         sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
6437         for (type = 0; type < EXT4_MAXQUOTAS; type++) {
6438                 if (qf_inums[type]) {
6439                         err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
6440                                 DQUOT_USAGE_ENABLED |
6441                                 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
6442                         if (err) {
6443                                 ext4_warning(sb,
6444                                         "Failed to enable quota tracking "
6445                                         "(type=%d, err=%d). Please run "
6446                                         "e2fsck to fix.", type, err);
6447                                 for (type--; type >= 0; type--) {
6448                                         struct inode *inode;
6449
6450                                         inode = sb_dqopt(sb)->files[type];
6451                                         if (inode)
6452                                                 inode = igrab(inode);
6453                                         dquot_quota_off(sb, type);
6454                                         if (inode) {
6455                                                 lockdep_set_quota_inode(inode,
6456                                                         I_DATA_SEM_NORMAL);
6457                                                 iput(inode);
6458                                         }
6459                                 }
6460
6461                                 return err;
6462                         }
6463                 }
6464         }
6465         return 0;
6466 }
6467
6468 static int ext4_quota_off(struct super_block *sb, int type)
6469 {
6470         struct inode *inode = sb_dqopt(sb)->files[type];
6471         handle_t *handle;
6472         int err;
6473
6474         /* Force all delayed allocation blocks to be allocated.
6475          * Caller already holds s_umount sem */
6476         if (test_opt(sb, DELALLOC))
6477                 sync_filesystem(sb);
6478
6479         if (!inode || !igrab(inode))
6480                 goto out;
6481
6482         err = dquot_quota_off(sb, type);
6483         if (err || ext4_has_feature_quota(sb))
6484                 goto out_put;
6485
6486         inode_lock(inode);
6487         /*
6488          * Update modification times of quota files when userspace can
6489          * start looking at them. If we fail, we return success anyway since
6490          * this is not a hard failure and quotas are already disabled.
6491          */
6492         handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6493         if (IS_ERR(handle)) {
6494                 err = PTR_ERR(handle);
6495                 goto out_unlock;
6496         }
6497         EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
6498         inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
6499         inode->i_mtime = inode->i_ctime = current_time(inode);
6500         err = ext4_mark_inode_dirty(handle, inode);
6501         ext4_journal_stop(handle);
6502 out_unlock:
6503         inode_unlock(inode);
6504 out_put:
6505         lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
6506         iput(inode);
6507         return err;
6508 out:
6509         return dquot_quota_off(sb, type);
6510 }
6511
6512 /* Read data from quotafile - avoid pagecache and such because we cannot afford
6513  * acquiring the locks... As quota files are never truncated and quota code
6514  * itself serializes the operations (and no one else should touch the files)
6515  * we don't have to be afraid of races */
6516 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
6517                                size_t len, loff_t off)
6518 {
6519         struct inode *inode = sb_dqopt(sb)->files[type];
6520         ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6521         int offset = off & (sb->s_blocksize - 1);
6522         int tocopy;
6523         size_t toread;
6524         struct buffer_head *bh;
6525         loff_t i_size = i_size_read(inode);
6526
6527         if (off > i_size)
6528                 return 0;
6529         if (off+len > i_size)
6530                 len = i_size-off;
6531         toread = len;
6532         while (toread > 0) {
6533                 tocopy = sb->s_blocksize - offset < toread ?
6534                                 sb->s_blocksize - offset : toread;
6535                 bh = ext4_bread(NULL, inode, blk, 0);
6536                 if (IS_ERR(bh))
6537                         return PTR_ERR(bh);
6538                 if (!bh)        /* A hole? */
6539                         memset(data, 0, tocopy);
6540                 else
6541                         memcpy(data, bh->b_data+offset, tocopy);
6542                 brelse(bh);
6543                 offset = 0;
6544                 toread -= tocopy;
6545                 data += tocopy;
6546                 blk++;
6547         }
6548         return len;
6549 }
6550
6551 /* Write to quotafile (we know the transaction is already started and has
6552  * enough credits) */
6553 static ssize_t ext4_quota_write(struct super_block *sb, int type,
6554                                 const char *data, size_t len, loff_t off)
6555 {
6556         struct inode *inode = sb_dqopt(sb)->files[type];
6557         ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6558         int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
6559         int retries = 0;
6560         struct buffer_head *bh;
6561         handle_t *handle = journal_current_handle();
6562
6563         if (!handle) {
6564                 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6565                         " cancelled because transaction is not started",
6566                         (unsigned long long)off, (unsigned long long)len);
6567                 return -EIO;
6568         }
6569         /*
6570          * Since we account only one data block in transaction credits,
6571          * then it is impossible to cross a block boundary.
6572          */
6573         if (sb->s_blocksize - offset < len) {
6574                 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6575                         " cancelled because not block aligned",
6576                         (unsigned long long)off, (unsigned long long)len);
6577                 return -EIO;
6578         }
6579
6580         do {
6581                 bh = ext4_bread(handle, inode, blk,
6582                                 EXT4_GET_BLOCKS_CREATE |
6583                                 EXT4_GET_BLOCKS_METADATA_NOFAIL);
6584         } while (PTR_ERR(bh) == -ENOSPC &&
6585                  ext4_should_retry_alloc(inode->i_sb, &retries));
6586         if (IS_ERR(bh))
6587                 return PTR_ERR(bh);
6588         if (!bh)
6589                 goto out;
6590         BUFFER_TRACE(bh, "get write access");
6591         err = ext4_journal_get_write_access(handle, bh);
6592         if (err) {
6593                 brelse(bh);
6594                 return err;
6595         }
6596         lock_buffer(bh);
6597         memcpy(bh->b_data+offset, data, len);
6598         flush_dcache_page(bh->b_page);
6599         unlock_buffer(bh);
6600         err = ext4_handle_dirty_metadata(handle, NULL, bh);
6601         brelse(bh);
6602 out:
6603         if (inode->i_size < off + len) {
6604                 i_size_write(inode, off + len);
6605                 EXT4_I(inode)->i_disksize = inode->i_size;
6606                 err2 = ext4_mark_inode_dirty(handle, inode);
6607                 if (unlikely(err2 && !err))
6608                         err = err2;
6609         }
6610         return err ? err : len;
6611 }
6612 #endif
6613
6614 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
6615                        const char *dev_name, void *data)
6616 {
6617         return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
6618 }
6619
6620 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
6621 static inline void register_as_ext2(void)
6622 {
6623         int err = register_filesystem(&ext2_fs_type);
6624         if (err)
6625                 printk(KERN_WARNING
6626                        "EXT4-fs: Unable to register as ext2 (%d)\n", err);
6627 }
6628
6629 static inline void unregister_as_ext2(void)
6630 {
6631         unregister_filesystem(&ext2_fs_type);
6632 }
6633
6634 static inline int ext2_feature_set_ok(struct super_block *sb)
6635 {
6636         if (ext4_has_unknown_ext2_incompat_features(sb))
6637                 return 0;
6638         if (sb_rdonly(sb))
6639                 return 1;
6640         if (ext4_has_unknown_ext2_ro_compat_features(sb))
6641                 return 0;
6642         return 1;
6643 }
6644 #else
6645 static inline void register_as_ext2(void) { }
6646 static inline void unregister_as_ext2(void) { }
6647 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
6648 #endif
6649
6650 static inline void register_as_ext3(void)
6651 {
6652         int err = register_filesystem(&ext3_fs_type);
6653         if (err)
6654                 printk(KERN_WARNING
6655                        "EXT4-fs: Unable to register as ext3 (%d)\n", err);
6656 }
6657
6658 static inline void unregister_as_ext3(void)
6659 {
6660         unregister_filesystem(&ext3_fs_type);
6661 }
6662
6663 static inline int ext3_feature_set_ok(struct super_block *sb)
6664 {
6665         if (ext4_has_unknown_ext3_incompat_features(sb))
6666                 return 0;
6667         if (!ext4_has_feature_journal(sb))
6668                 return 0;
6669         if (sb_rdonly(sb))
6670                 return 1;
6671         if (ext4_has_unknown_ext3_ro_compat_features(sb))
6672                 return 0;
6673         return 1;
6674 }
6675
6676 static struct file_system_type ext4_fs_type = {
6677         .owner          = THIS_MODULE,
6678         .name           = "ext4",
6679         .mount          = ext4_mount,
6680         .kill_sb        = kill_block_super,
6681         .fs_flags       = FS_REQUIRES_DEV,
6682 };
6683 MODULE_ALIAS_FS("ext4");
6684
6685 /* Shared across all ext4 file systems */
6686 wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
6687
6688 static int __init ext4_init_fs(void)
6689 {
6690         int i, err;
6691
6692         ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
6693         ext4_li_info = NULL;
6694         mutex_init(&ext4_li_mtx);
6695
6696         /* Build-time check for flags consistency */
6697         ext4_check_flag_values();
6698
6699         for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6700                 init_waitqueue_head(&ext4__ioend_wq[i]);
6701
6702         err = ext4_init_es();
6703         if (err)
6704                 return err;
6705
6706         err = ext4_init_pending();
6707         if (err)
6708                 goto out7;
6709
6710         err = ext4_init_post_read_processing();
6711         if (err)
6712                 goto out6;
6713
6714         err = ext4_init_pageio();
6715         if (err)
6716                 goto out5;
6717
6718         err = ext4_init_system_zone();
6719         if (err)
6720                 goto out4;
6721
6722         err = ext4_init_sysfs();
6723         if (err)
6724                 goto out3;
6725
6726         err = ext4_init_mballoc();
6727         if (err)
6728                 goto out2;
6729         err = init_inodecache();
6730         if (err)
6731                 goto out1;
6732
6733         err = ext4_fc_init_dentry_cache();
6734         if (err)
6735                 goto out05;
6736
6737         register_as_ext3();
6738         register_as_ext2();
6739         err = register_filesystem(&ext4_fs_type);
6740         if (err)
6741                 goto out;
6742
6743         return 0;
6744 out:
6745         unregister_as_ext2();
6746         unregister_as_ext3();
6747         ext4_fc_destroy_dentry_cache();
6748 out05:
6749         destroy_inodecache();
6750 out1:
6751         ext4_exit_mballoc();
6752 out2:
6753         ext4_exit_sysfs();
6754 out3:
6755         ext4_exit_system_zone();
6756 out4:
6757         ext4_exit_pageio();
6758 out5:
6759         ext4_exit_post_read_processing();
6760 out6:
6761         ext4_exit_pending();
6762 out7:
6763         ext4_exit_es();
6764
6765         return err;
6766 }
6767
6768 static void __exit ext4_exit_fs(void)
6769 {
6770         ext4_destroy_lazyinit_thread();
6771         unregister_as_ext2();
6772         unregister_as_ext3();
6773         unregister_filesystem(&ext4_fs_type);
6774         ext4_fc_destroy_dentry_cache();
6775         destroy_inodecache();
6776         ext4_exit_mballoc();
6777         ext4_exit_sysfs();
6778         ext4_exit_system_zone();
6779         ext4_exit_pageio();
6780         ext4_exit_post_read_processing();
6781         ext4_exit_es();
6782         ext4_exit_pending();
6783 }
6784
6785 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6786 MODULE_DESCRIPTION("Fourth Extended Filesystem");
6787 MODULE_LICENSE("GPL");
6788 MODULE_SOFTDEP("pre: crc32c");
6789 module_init(ext4_init_fs)
6790 module_exit(ext4_exit_fs)