1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/ioctl.c
5 * Copyright (C) 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
12 #include <linux/capability.h>
13 #include <linux/time.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
16 #include <linux/file.h>
17 #include <linux/quotaops.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/delay.h>
21 #include <linux/iversion.h>
22 #include <linux/fileattr.h>
23 #include <linux/uuid.h>
24 #include "ext4_jbd2.h"
26 #include <linux/fsmap.h>
28 #include <trace/events/ext4.h>
30 typedef void ext4_update_sb_callback(struct ext4_super_block *es,
34 * Superblock modification callback function for changing file system
37 static void ext4_sb_setlabel(struct ext4_super_block *es, const void *arg)
39 /* Sanity check, this should never happen */
40 BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX);
42 memcpy(es->s_volume_name, (char *)arg, EXT4_LABEL_MAX);
46 * Superblock modification callback function for changing file system
49 static void ext4_sb_setuuid(struct ext4_super_block *es, const void *arg)
51 memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE);
55 int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
56 ext4_update_sb_callback func,
60 struct ext4_sb_info *sbi = EXT4_SB(sb);
61 struct buffer_head *bh = sbi->s_sbh;
62 struct ext4_super_block *es = sbi->s_es;
64 trace_ext4_update_sb(sb, bh->b_blocknr, 1);
66 BUFFER_TRACE(bh, "get_write_access");
67 err = ext4_journal_get_write_access(handle, sb,
75 ext4_superblock_csum_set(sb);
78 if (buffer_write_io_error(bh) || !buffer_uptodate(bh)) {
79 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
80 "superblock detected");
81 clear_buffer_write_io_error(bh);
82 set_buffer_uptodate(bh);
85 err = ext4_handle_dirty_metadata(handle, NULL, bh);
88 err = sync_dirty_buffer(bh);
90 ext4_std_error(sb, err);
95 * Update one backup superblock in the group 'grp' using the callback
96 * function 'func' and argument 'arg'. If the handle is NULL the
97 * modification is not journalled.
99 * Returns: 0 when no modification was done (no superblock in the group)
100 * 1 when the modification was successful
103 static int ext4_update_backup_sb(struct super_block *sb,
104 handle_t *handle, ext4_group_t grp,
105 ext4_update_sb_callback func, const void *arg)
108 ext4_fsblk_t sb_block;
109 struct buffer_head *bh;
110 unsigned long offset = 0;
111 struct ext4_super_block *es;
113 if (!ext4_bg_has_super(sb, grp))
117 * For the group 0 there is always 1k padding, so we have
118 * either adjust offset, or sb_block depending on blocksize
121 sb_block = 1 * EXT4_MIN_BLOCK_SIZE;
122 offset = do_div(sb_block, sb->s_blocksize);
124 sb_block = ext4_group_first_block_no(sb, grp);
128 trace_ext4_update_sb(sb, sb_block, handle ? 1 : 0);
130 bh = ext4_sb_bread(sb, sb_block, 0);
135 BUFFER_TRACE(bh, "get_write_access");
136 err = ext4_journal_get_write_access(handle, sb,
143 es = (struct ext4_super_block *) (bh->b_data + offset);
145 if (ext4_has_metadata_csum(sb) &&
146 es->s_checksum != ext4_superblock_csum(sb, es)) {
147 ext4_msg(sb, KERN_ERR, "Invalid checksum for backup "
148 "superblock %llu", sb_block);
153 if (ext4_has_metadata_csum(sb))
154 es->s_checksum = ext4_superblock_csum(sb, es);
155 set_buffer_uptodate(bh);
162 err = ext4_handle_dirty_metadata(handle, NULL, bh);
166 BUFFER_TRACE(bh, "marking dirty");
167 mark_buffer_dirty(bh);
169 err = sync_dirty_buffer(bh);
173 ext4_std_error(sb, err);
174 return (err) ? err : 1;
178 * Update primary and backup superblocks using the provided function
179 * func and argument arg.
181 * Only the primary superblock and at most two backup superblock
182 * modifications are journalled; the rest is modified without journal.
183 * This is safe because e2fsck will re-write them if there is a problem,
184 * and we're very unlikely to ever need more than two backups.
187 int ext4_update_superblocks_fn(struct super_block *sb,
188 ext4_update_sb_callback func,
192 ext4_group_t ngroups;
193 unsigned int three = 1;
194 unsigned int five = 5;
195 unsigned int seven = 7;
197 ext4_group_t grp, primary_grp;
198 struct ext4_sb_info *sbi = EXT4_SB(sb);
201 * We can't update superblocks while the online resize is running
203 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
204 &sbi->s_ext4_flags)) {
205 ext4_msg(sb, KERN_ERR, "Can't modify superblock while"
206 "performing online resize");
211 * We're only going to update primary superblock and two
212 * backup superblocks in this transaction.
214 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 3);
215 if (IS_ERR(handle)) {
216 err = PTR_ERR(handle);
220 /* Update primary superblock */
221 err = ext4_update_primary_sb(sb, handle, func, arg);
223 ext4_msg(sb, KERN_ERR, "Failed to update primary "
228 primary_grp = ext4_get_group_number(sb, sbi->s_sbh->b_blocknr);
229 ngroups = ext4_get_groups_count(sb);
232 * Update backup superblocks. We have to start from group 0
233 * because it might not be where the primary superblock is
234 * if the fs is mounted with -o sb=<backup_sb_block>
238 while (grp < ngroups) {
239 /* Skip primary superblock */
240 if (grp == primary_grp)
243 ret = ext4_update_backup_sb(sb, handle, grp, func, arg);
245 /* Ignore bad checksum; try to update next sb */
246 if (ret == -EFSBADCRC)
253 if (handle && i > 1) {
255 * We're only journalling primary superblock and
256 * two backup superblocks; the rest is not
259 err = ext4_journal_stop(handle);
265 grp = ext4_list_backups(sb, &three, &five, &seven);
270 ret = ext4_journal_stop(handle);
275 clear_bit_unlock(EXT4_FLAGS_RESIZING, &sbi->s_ext4_flags);
276 smp_mb__after_atomic();
277 return err ? err : 0;
281 * Swap memory between @a and @b for @len bytes.
283 * @a: pointer to first memory area
284 * @b: pointer to second memory area
285 * @len: number of bytes to swap
288 static void memswap(void *a, void *b, size_t len)
290 unsigned char *ap, *bp;
292 ap = (unsigned char *)a;
293 bp = (unsigned char *)b;
302 * Swap i_data and associated attributes between @inode1 and @inode2.
303 * This function is used for the primary swap between inode1 and inode2
304 * and also to revert this primary swap in case of errors.
306 * Therefore you have to make sure, that calling this method twice
307 * will revert all changes.
309 * @inode1: pointer to first inode
310 * @inode2: pointer to second inode
312 static void swap_inode_data(struct inode *inode1, struct inode *inode2)
315 struct ext4_inode_info *ei1;
316 struct ext4_inode_info *ei2;
319 ei1 = EXT4_I(inode1);
320 ei2 = EXT4_I(inode2);
322 swap(inode1->i_version, inode2->i_version);
323 swap(inode1->i_atime, inode2->i_atime);
324 swap(inode1->i_mtime, inode2->i_mtime);
326 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
327 tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
328 ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
329 (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
330 ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
331 swap(ei1->i_disksize, ei2->i_disksize);
332 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
333 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
335 isize = i_size_read(inode1);
336 i_size_write(inode1, i_size_read(inode2));
337 i_size_write(inode2, isize);
340 void ext4_reset_inode_seed(struct inode *inode)
342 struct ext4_inode_info *ei = EXT4_I(inode);
343 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
344 __le32 inum = cpu_to_le32(inode->i_ino);
345 __le32 gen = cpu_to_le32(inode->i_generation);
348 if (!ext4_has_metadata_csum(inode->i_sb))
351 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
352 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
356 * Swap the information from the given @inode and the inode
357 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
358 * important fields of the inodes.
360 * @sb: the super block of the filesystem
361 * @mnt_userns: user namespace of the mount the inode was found from
362 * @inode: the inode to swap with EXT4_BOOT_LOADER_INO
365 static long swap_inode_boot_loader(struct super_block *sb,
366 struct user_namespace *mnt_userns,
371 struct inode *inode_bl;
372 struct ext4_inode_info *ei_bl;
373 qsize_t size, size_bl, diff;
375 unsigned short bytes;
377 inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
378 EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
379 if (IS_ERR(inode_bl))
380 return PTR_ERR(inode_bl);
381 ei_bl = EXT4_I(inode_bl);
383 /* Protect orig inodes against a truncate and make sure,
384 * that only 1 swap_inode_boot_loader is running. */
385 lock_two_nondirectories(inode, inode_bl);
387 if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
388 IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
389 (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) ||
390 ext4_has_inline_data(inode)) {
392 goto journal_err_out;
395 if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
396 !inode_owner_or_capable(mnt_userns, inode) ||
397 !capable(CAP_SYS_ADMIN)) {
399 goto journal_err_out;
402 filemap_invalidate_lock(inode->i_mapping);
403 err = filemap_write_and_wait(inode->i_mapping);
407 err = filemap_write_and_wait(inode_bl->i_mapping);
411 /* Wait for all existing dio workers */
412 inode_dio_wait(inode);
413 inode_dio_wait(inode_bl);
415 truncate_inode_pages(&inode->i_data, 0);
416 truncate_inode_pages(&inode_bl->i_data, 0);
418 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
419 if (IS_ERR(handle)) {
423 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
425 /* Protect extent tree against block allocations via delalloc */
426 ext4_double_down_write_data_sem(inode, inode_bl);
428 if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
429 /* this inode has never been used as a BOOT_LOADER */
430 set_nlink(inode_bl, 1);
431 i_uid_write(inode_bl, 0);
432 i_gid_write(inode_bl, 0);
433 inode_bl->i_flags = 0;
435 inode_set_iversion(inode_bl, 1);
436 i_size_write(inode_bl, 0);
437 EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
438 inode_bl->i_mode = S_IFREG;
439 if (ext4_has_feature_extents(sb)) {
440 ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
441 ext4_ext_tree_init(handle, inode_bl);
443 memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
446 err = dquot_initialize(inode);
450 size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
451 size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
452 diff = size - size_bl;
453 swap_inode_data(inode, inode_bl);
455 inode->i_ctime = inode_bl->i_ctime = current_time(inode);
456 inode_inc_iversion(inode);
458 inode->i_generation = get_random_u32();
459 inode_bl->i_generation = get_random_u32();
460 ext4_reset_inode_seed(inode);
461 ext4_reset_inode_seed(inode_bl);
463 ext4_discard_preallocations(inode, 0);
465 err = ext4_mark_inode_dirty(handle, inode);
467 /* No need to update quota information. */
468 ext4_warning(inode->i_sb,
469 "couldn't mark inode #%lu dirty (err %d)",
471 /* Revert all changes: */
472 swap_inode_data(inode, inode_bl);
473 ext4_mark_inode_dirty(handle, inode);
477 blocks = inode_bl->i_blocks;
478 bytes = inode_bl->i_bytes;
479 inode_bl->i_blocks = inode->i_blocks;
480 inode_bl->i_bytes = inode->i_bytes;
481 err = ext4_mark_inode_dirty(handle, inode_bl);
483 /* No need to update quota information. */
484 ext4_warning(inode_bl->i_sb,
485 "couldn't mark inode #%lu dirty (err %d)",
486 inode_bl->i_ino, err);
490 /* Bootloader inode should not be counted into quota information. */
492 dquot_free_space(inode, diff);
494 err = dquot_alloc_space(inode, -1 * diff);
498 /* Revert all changes: */
499 inode_bl->i_blocks = blocks;
500 inode_bl->i_bytes = bytes;
501 swap_inode_data(inode, inode_bl);
502 ext4_mark_inode_dirty(handle, inode);
503 ext4_mark_inode_dirty(handle, inode_bl);
507 ext4_journal_stop(handle);
508 ext4_double_up_write_data_sem(inode, inode_bl);
511 filemap_invalidate_unlock(inode->i_mapping);
513 unlock_two_nondirectories(inode, inode_bl);
519 * If immutable is set and we are not clearing it, we're not allowed to change
520 * anything else in the inode. Don't error out if we're only trying to set
521 * immutable on an immutable file.
523 static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
526 struct ext4_inode_info *ei = EXT4_I(inode);
527 unsigned int oldflags = ei->i_flags;
529 if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
532 if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
534 if (ext4_has_feature_project(inode->i_sb) &&
535 __kprojid_val(ei->i_projid) != new_projid)
541 static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
543 struct ext4_inode_info *ei = EXT4_I(inode);
545 if (S_ISDIR(inode->i_mode))
548 if (test_opt2(inode->i_sb, DAX_NEVER) ||
549 test_opt(inode->i_sb, DAX_ALWAYS))
552 if ((ei->i_flags ^ flags) & EXT4_DAX_FL)
553 d_mark_dontcache(inode);
556 static bool dax_compatible(struct inode *inode, unsigned int oldflags,
559 /* Allow the DAX flag to be changed on inline directories */
560 if (S_ISDIR(inode->i_mode)) {
561 flags &= ~EXT4_INLINE_DATA_FL;
562 oldflags &= ~EXT4_INLINE_DATA_FL;
565 if (flags & EXT4_DAX_FL) {
566 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
567 ext4_test_inode_state(inode,
568 EXT4_STATE_VERITY_IN_PROGRESS)) {
573 if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL))
579 static int ext4_ioctl_setflags(struct inode *inode,
582 struct ext4_inode_info *ei = EXT4_I(inode);
583 handle_t *handle = NULL;
584 int err = -EPERM, migrate = 0;
585 struct ext4_iloc iloc;
586 unsigned int oldflags, mask, i;
587 struct super_block *sb = inode->i_sb;
589 /* Is it quota file? Do not allow user to mess with it */
590 if (ext4_is_quota_file(inode))
593 oldflags = ei->i_flags;
595 * The JOURNAL_DATA flag can only be changed by
596 * the relevant capability.
598 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
599 if (!capable(CAP_SYS_RESOURCE))
603 if (!dax_compatible(inode, oldflags, flags)) {
608 if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
611 if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) {
612 if (!ext4_has_feature_casefold(sb)) {
617 if (!S_ISDIR(inode->i_mode)) {
622 if (!ext4_empty_dir(inode)) {
629 * Wait for all pending directio and then flush all the dirty pages
630 * for this file. The flush marks all the pages readonly, so any
631 * subsequent attempt to write to the file (particularly mmap pages)
632 * will come through the filesystem and fail.
634 if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
635 (flags & EXT4_IMMUTABLE_FL)) {
636 inode_dio_wait(inode);
637 err = filemap_write_and_wait(inode->i_mapping);
642 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
643 if (IS_ERR(handle)) {
644 err = PTR_ERR(handle);
648 ext4_handle_sync(handle);
649 err = ext4_reserve_inode_write(handle, inode, &iloc);
653 ext4_dax_dontcache(inode, flags);
655 for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
656 if (!(mask & EXT4_FL_USER_MODIFIABLE))
658 /* These flags get special treatment later */
659 if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL)
662 ext4_set_inode_flag(inode, i);
664 ext4_clear_inode_flag(inode, i);
667 ext4_set_inode_flags(inode, false);
669 inode->i_ctime = current_time(inode);
670 inode_inc_iversion(inode);
672 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
674 ext4_journal_stop(handle);
678 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
680 * Changes to the journaling mode can cause unsafe changes to
681 * S_DAX if the inode is DAX
688 err = ext4_change_inode_journal_flag(inode,
689 flags & EXT4_JOURNAL_DATA_FL);
694 if (flags & EXT4_EXTENTS_FL)
695 err = ext4_ext_migrate(inode);
697 err = ext4_ind_migrate(inode);
705 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
707 struct super_block *sb = inode->i_sb;
708 struct ext4_inode_info *ei = EXT4_I(inode);
712 struct ext4_iloc iloc;
713 struct ext4_inode *raw_inode;
714 struct dquot *transfer_to[MAXQUOTAS] = { };
716 if (!ext4_has_feature_project(sb)) {
717 if (projid != EXT4_DEF_PROJID)
723 if (EXT4_INODE_SIZE(sb) <= EXT4_GOOD_OLD_INODE_SIZE)
726 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
728 if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
732 /* Is it quota file? Do not allow user to mess with it */
733 if (ext4_is_quota_file(inode))
736 err = dquot_initialize(inode);
740 err = ext4_get_inode_loc(inode, &iloc);
744 raw_inode = ext4_raw_inode(&iloc);
745 if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
746 err = ext4_expand_extra_isize(inode,
747 EXT4_SB(sb)->s_want_extra_isize,
755 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
756 EXT4_QUOTA_INIT_BLOCKS(sb) +
757 EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
759 return PTR_ERR(handle);
761 err = ext4_reserve_inode_write(handle, inode, &iloc);
765 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
766 if (!IS_ERR(transfer_to[PRJQUOTA])) {
768 /* __dquot_transfer() calls back ext4_get_inode_usage() which
769 * counts xattr inode references.
771 down_read(&EXT4_I(inode)->xattr_sem);
772 err = __dquot_transfer(inode, transfer_to);
773 up_read(&EXT4_I(inode)->xattr_sem);
774 dqput(transfer_to[PRJQUOTA]);
779 EXT4_I(inode)->i_projid = kprojid;
780 inode->i_ctime = current_time(inode);
781 inode_inc_iversion(inode);
783 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
787 ext4_journal_stop(handle);
791 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
793 if (projid != EXT4_DEF_PROJID)
799 static int ext4_shutdown(struct super_block *sb, unsigned long arg)
801 struct ext4_sb_info *sbi = EXT4_SB(sb);
805 if (!capable(CAP_SYS_ADMIN))
808 if (get_user(flags, (__u32 __user *)arg))
811 if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH)
814 if (ext4_forced_shutdown(sbi))
817 ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags);
818 trace_ext4_shutdown(sb, flags);
821 case EXT4_GOING_FLAGS_DEFAULT:
822 ret = freeze_bdev(sb->s_bdev);
825 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
826 thaw_bdev(sb->s_bdev);
828 case EXT4_GOING_FLAGS_LOGFLUSH:
829 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
830 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
831 (void) ext4_force_commit(sb);
832 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
835 case EXT4_GOING_FLAGS_NOLOGFLUSH:
836 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
837 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal))
838 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
843 clear_opt(sb, DISCARD);
847 struct getfsmap_info {
848 struct super_block *gi_sb;
849 struct fsmap_head __user *gi_data;
854 static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv)
856 struct getfsmap_info *info = priv;
859 trace_ext4_getfsmap_mapping(info->gi_sb, xfm);
861 info->gi_last_flags = xfm->fmr_flags;
862 ext4_fsmap_from_internal(info->gi_sb, &fm, xfm);
863 if (copy_to_user(&info->gi_data->fmh_recs[info->gi_idx++], &fm,
864 sizeof(struct fsmap)))
870 static int ext4_ioc_getfsmap(struct super_block *sb,
871 struct fsmap_head __user *arg)
873 struct getfsmap_info info = { NULL };
874 struct ext4_fsmap_head xhead = {0};
875 struct fsmap_head head;
876 bool aborted = false;
879 if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
881 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
882 memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
883 sizeof(head.fmh_keys[0].fmr_reserved)) ||
884 memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
885 sizeof(head.fmh_keys[1].fmr_reserved)))
888 * ext4 doesn't report file extents at all, so the only valid
889 * file offsets are the magic ones (all zeroes or all ones).
891 if (head.fmh_keys[0].fmr_offset ||
892 (head.fmh_keys[1].fmr_offset != 0 &&
893 head.fmh_keys[1].fmr_offset != -1ULL))
896 xhead.fmh_iflags = head.fmh_iflags;
897 xhead.fmh_count = head.fmh_count;
898 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[0], &head.fmh_keys[0]);
899 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[1], &head.fmh_keys[1]);
901 trace_ext4_getfsmap_low_key(sb, &xhead.fmh_keys[0]);
902 trace_ext4_getfsmap_high_key(sb, &xhead.fmh_keys[1]);
906 error = ext4_getfsmap(sb, &xhead, ext4_getfsmap_format, &info);
907 if (error == EXT4_QUERY_RANGE_ABORT)
912 /* If we didn't abort, set the "last" flag in the last fmx */
913 if (!aborted && info.gi_idx) {
914 info.gi_last_flags |= FMR_OF_LAST;
915 if (copy_to_user(&info.gi_data->fmh_recs[info.gi_idx - 1].fmr_flags,
917 sizeof(info.gi_last_flags)))
921 /* copy back header */
922 head.fmh_entries = xhead.fmh_entries;
923 head.fmh_oflags = xhead.fmh_oflags;
924 if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
930 static long ext4_ioctl_group_add(struct file *file,
931 struct ext4_new_group_data *input)
933 struct super_block *sb = file_inode(file)->i_sb;
936 err = ext4_resize_begin(sb);
940 if (ext4_has_feature_bigalloc(sb)) {
941 ext4_msg(sb, KERN_ERR,
942 "Online resizing not supported with bigalloc");
947 err = mnt_want_write_file(file);
951 err = ext4_group_add(sb, input);
952 if (EXT4_SB(sb)->s_journal) {
953 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
954 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
955 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
959 mnt_drop_write_file(file);
960 if (!err && ext4_has_group_desc_csum(sb) &&
961 test_opt(sb, INIT_INODE_TABLE))
962 err = ext4_register_li_request(sb, input->group);
964 err2 = ext4_resize_end(sb, false);
970 int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa)
972 struct inode *inode = d_inode(dentry);
973 struct ext4_inode_info *ei = EXT4_I(inode);
974 u32 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
976 if (S_ISREG(inode->i_mode))
977 flags &= ~FS_PROJINHERIT_FL;
979 fileattr_fill_flags(fa, flags);
980 if (ext4_has_feature_project(inode->i_sb))
981 fa->fsx_projid = from_kprojid(&init_user_ns, ei->i_projid);
986 int ext4_fileattr_set(struct user_namespace *mnt_userns,
987 struct dentry *dentry, struct fileattr *fa)
989 struct inode *inode = d_inode(dentry);
990 u32 flags = fa->flags;
991 int err = -EOPNOTSUPP;
993 if (flags & ~EXT4_FL_USER_VISIBLE)
997 * chattr(1) grabs flags via GETFLAGS, modifies the result and
998 * passes that to SETFLAGS. So we cannot easily make SETFLAGS
999 * more restrictive than just silently masking off visible but
1000 * not settable flags as we always did.
1002 flags &= EXT4_FL_USER_MODIFIABLE;
1003 if (ext4_mask_flags(inode->i_mode, flags) != flags)
1005 err = ext4_ioctl_check_immutable(inode, fa->fsx_projid, flags);
1008 err = ext4_ioctl_setflags(inode, flags);
1011 err = ext4_ioctl_setproject(inode, fa->fsx_projid);
1016 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1017 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
1019 static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
1021 struct fiemap fiemap;
1022 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
1023 struct fiemap_extent_info fieinfo = { 0, };
1024 struct inode *inode = file_inode(filp);
1027 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
1030 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
1033 fieinfo.fi_flags = fiemap.fm_flags;
1034 fieinfo.fi_extents_max = fiemap.fm_extent_count;
1035 fieinfo.fi_extents_start = ufiemap->fm_extents;
1037 error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start,
1039 fiemap.fm_flags = fieinfo.fi_flags;
1040 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
1041 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
1047 static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
1051 unsigned int flush_flags = 0;
1052 struct super_block *sb = file_inode(filp)->i_sb;
1054 if (copy_from_user(&flags, (__u32 __user *)arg,
1058 if (!capable(CAP_SYS_ADMIN))
1061 /* check for invalid bits set */
1062 if ((flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID) ||
1063 ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1064 (flags & JBD2_JOURNAL_FLUSH_ZEROOUT)))
1067 if (!EXT4_SB(sb)->s_journal)
1070 if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1071 !bdev_max_discard_sectors(EXT4_SB(sb)->s_journal->j_dev))
1074 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
1077 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DISCARD)
1078 flush_flags |= JBD2_JOURNAL_FLUSH_DISCARD;
1080 if (flags & EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT) {
1081 flush_flags |= JBD2_JOURNAL_FLUSH_ZEROOUT;
1082 pr_info_ratelimited("warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow");
1085 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1086 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, flush_flags);
1087 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1092 static int ext4_ioctl_setlabel(struct file *filp, const char __user *user_label)
1096 char new_label[EXT4_LABEL_MAX + 1];
1097 struct super_block *sb = file_inode(filp)->i_sb;
1099 if (!capable(CAP_SYS_ADMIN))
1103 * Copy the maximum length allowed for ext4 label with one more to
1104 * find the required terminating null byte in order to test the
1105 * label length. The on disk label doesn't need to be null terminated.
1107 if (copy_from_user(new_label, user_label, EXT4_LABEL_MAX + 1))
1110 len = strnlen(new_label, EXT4_LABEL_MAX + 1);
1111 if (len > EXT4_LABEL_MAX)
1115 * Clear the buffer after the new label
1117 memset(new_label + len, 0, EXT4_LABEL_MAX - len);
1119 ret = mnt_want_write_file(filp);
1123 ret = ext4_update_superblocks_fn(sb, ext4_sb_setlabel, new_label);
1125 mnt_drop_write_file(filp);
1129 static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label)
1131 char label[EXT4_LABEL_MAX + 1];
1134 * EXT4_LABEL_MAX must always be smaller than FSLABEL_MAX because
1135 * FSLABEL_MAX must include terminating null byte, while s_volume_name
1138 BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX);
1140 memset(label, 0, sizeof(label));
1141 lock_buffer(sbi->s_sbh);
1142 strncpy(label, sbi->s_es->s_volume_name, EXT4_LABEL_MAX);
1143 unlock_buffer(sbi->s_sbh);
1145 if (copy_to_user(user_label, label, sizeof(label)))
1150 static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
1151 struct fsuuid __user *ufsuuid)
1153 struct fsuuid fsuuid;
1154 __u8 uuid[UUID_SIZE];
1156 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
1159 if (fsuuid.fsu_len == 0) {
1160 fsuuid.fsu_len = UUID_SIZE;
1161 if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len,
1162 sizeof(fsuuid.fsu_len)))
1167 if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0)
1170 lock_buffer(sbi->s_sbh);
1171 memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE);
1172 unlock_buffer(sbi->s_sbh);
1174 fsuuid.fsu_len = UUID_SIZE;
1175 if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) ||
1176 copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
1181 static int ext4_ioctl_setuuid(struct file *filp,
1182 const struct fsuuid __user *ufsuuid)
1185 struct super_block *sb = file_inode(filp)->i_sb;
1186 struct fsuuid fsuuid;
1187 __u8 uuid[UUID_SIZE];
1189 if (!capable(CAP_SYS_ADMIN))
1193 * If any checksums (group descriptors or metadata) are being used
1194 * then the checksum seed feature is required to change the UUID.
1196 if (((ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb))
1197 && !ext4_has_feature_csum_seed(sb))
1198 || ext4_has_feature_stable_inodes(sb))
1201 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
1204 if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
1207 if (copy_from_user(uuid, &ufsuuid->fsu_uuid[0], UUID_SIZE))
1210 ret = mnt_want_write_file(filp);
1214 ret = ext4_update_superblocks_fn(sb, ext4_sb_setuuid, &uuid);
1215 mnt_drop_write_file(filp);
1220 static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1222 struct inode *inode = file_inode(filp);
1223 struct super_block *sb = inode->i_sb;
1224 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
1226 ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
1229 case FS_IOC_GETFSMAP:
1230 return ext4_ioc_getfsmap(sb, (void __user *)arg);
1231 case EXT4_IOC_GETVERSION:
1232 case EXT4_IOC_GETVERSION_OLD:
1233 return put_user(inode->i_generation, (int __user *) arg);
1234 case EXT4_IOC_SETVERSION:
1235 case EXT4_IOC_SETVERSION_OLD: {
1237 struct ext4_iloc iloc;
1241 if (!inode_owner_or_capable(mnt_userns, inode))
1244 if (ext4_has_metadata_csum(inode->i_sb)) {
1245 ext4_warning(sb, "Setting inode version is not "
1246 "supported with metadata_csum enabled.");
1250 err = mnt_want_write_file(filp);
1253 if (get_user(generation, (int __user *) arg)) {
1255 goto setversion_out;
1259 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
1260 if (IS_ERR(handle)) {
1261 err = PTR_ERR(handle);
1264 err = ext4_reserve_inode_write(handle, inode, &iloc);
1266 inode->i_ctime = current_time(inode);
1267 inode_inc_iversion(inode);
1268 inode->i_generation = generation;
1269 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
1271 ext4_journal_stop(handle);
1274 inode_unlock(inode);
1276 mnt_drop_write_file(filp);
1279 case EXT4_IOC_GROUP_EXTEND: {
1280 ext4_fsblk_t n_blocks_count;
1283 err = ext4_resize_begin(sb);
1287 if (get_user(n_blocks_count, (__u32 __user *)arg)) {
1289 goto group_extend_out;
1292 if (ext4_has_feature_bigalloc(sb)) {
1293 ext4_msg(sb, KERN_ERR,
1294 "Online resizing not supported with bigalloc");
1296 goto group_extend_out;
1299 err = mnt_want_write_file(filp);
1301 goto group_extend_out;
1303 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
1304 if (EXT4_SB(sb)->s_journal) {
1305 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1306 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
1307 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1311 mnt_drop_write_file(filp);
1313 err2 = ext4_resize_end(sb, false);
1319 case EXT4_IOC_MOVE_EXT: {
1320 struct move_extent me;
1324 if (!(filp->f_mode & FMODE_READ) ||
1325 !(filp->f_mode & FMODE_WRITE))
1328 if (copy_from_user(&me,
1329 (struct move_extent __user *)arg, sizeof(me)))
1333 donor = fdget(me.donor_fd);
1337 if (!(donor.file->f_mode & FMODE_WRITE)) {
1342 if (ext4_has_feature_bigalloc(sb)) {
1343 ext4_msg(sb, KERN_ERR,
1344 "Online defrag not supported with bigalloc");
1347 } else if (IS_DAX(inode)) {
1348 ext4_msg(sb, KERN_ERR,
1349 "Online defrag not supported with DAX");
1354 err = mnt_want_write_file(filp);
1358 err = ext4_move_extents(filp, donor.file, me.orig_start,
1359 me.donor_start, me.len, &me.moved_len);
1360 mnt_drop_write_file(filp);
1362 if (copy_to_user((struct move_extent __user *)arg,
1370 case EXT4_IOC_GROUP_ADD: {
1371 struct ext4_new_group_data input;
1373 if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
1377 return ext4_ioctl_group_add(filp, &input);
1380 case EXT4_IOC_MIGRATE:
1383 if (!inode_owner_or_capable(mnt_userns, inode))
1386 err = mnt_want_write_file(filp);
1390 * inode_mutex prevent write and truncate on the file.
1391 * Read still goes through. We take i_data_sem in
1392 * ext4_ext_swap_inode_data before we switch the
1393 * inode format to prevent read.
1395 inode_lock((inode));
1396 err = ext4_ext_migrate(inode);
1397 inode_unlock((inode));
1398 mnt_drop_write_file(filp);
1402 case EXT4_IOC_ALLOC_DA_BLKS:
1405 if (!inode_owner_or_capable(mnt_userns, inode))
1408 err = mnt_want_write_file(filp);
1411 err = ext4_alloc_da_blocks(inode);
1412 mnt_drop_write_file(filp);
1416 case EXT4_IOC_SWAP_BOOT:
1419 if (!(filp->f_mode & FMODE_WRITE))
1421 err = mnt_want_write_file(filp);
1424 err = swap_inode_boot_loader(sb, mnt_userns, inode);
1425 mnt_drop_write_file(filp);
1429 case EXT4_IOC_RESIZE_FS: {
1430 ext4_fsblk_t n_blocks_count;
1431 int err = 0, err2 = 0;
1432 ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
1434 if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
1439 err = ext4_resize_begin(sb);
1443 err = mnt_want_write_file(filp);
1447 err = ext4_resize_fs(sb, n_blocks_count);
1448 if (EXT4_SB(sb)->s_journal) {
1449 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
1450 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1451 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
1452 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1456 mnt_drop_write_file(filp);
1457 if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
1458 ext4_has_group_desc_csum(sb) &&
1459 test_opt(sb, INIT_INODE_TABLE))
1460 err = ext4_register_li_request(sb, o_group);
1463 err2 = ext4_resize_end(sb, true);
1471 struct fstrim_range range;
1474 if (!capable(CAP_SYS_ADMIN))
1477 if (!bdev_max_discard_sectors(sb->s_bdev))
1481 * We haven't replayed the journal, so we cannot use our
1482 * block-bitmap-guided storage zapping commands.
1484 if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
1487 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1491 ret = ext4_trim_fs(sb, &range);
1495 if (copy_to_user((struct fstrim_range __user *)arg, &range,
1501 case EXT4_IOC_PRECACHE_EXTENTS:
1502 return ext4_ext_precache(inode);
1504 case FS_IOC_SET_ENCRYPTION_POLICY:
1505 if (!ext4_has_feature_encrypt(sb))
1507 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1509 case FS_IOC_GET_ENCRYPTION_PWSALT:
1510 return ext4_ioctl_get_encryption_pwsalt(filp, (void __user *)arg);
1512 case FS_IOC_GET_ENCRYPTION_POLICY:
1513 if (!ext4_has_feature_encrypt(sb))
1515 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1517 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1518 if (!ext4_has_feature_encrypt(sb))
1520 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
1522 case FS_IOC_ADD_ENCRYPTION_KEY:
1523 if (!ext4_has_feature_encrypt(sb))
1525 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
1527 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1528 if (!ext4_has_feature_encrypt(sb))
1530 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
1532 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1533 if (!ext4_has_feature_encrypt(sb))
1535 return fscrypt_ioctl_remove_key_all_users(filp,
1536 (void __user *)arg);
1537 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1538 if (!ext4_has_feature_encrypt(sb))
1540 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
1542 case FS_IOC_GET_ENCRYPTION_NONCE:
1543 if (!ext4_has_feature_encrypt(sb))
1545 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
1547 case EXT4_IOC_CLEAR_ES_CACHE:
1549 if (!inode_owner_or_capable(mnt_userns, inode))
1551 ext4_clear_inode_es(inode);
1555 case EXT4_IOC_GETSTATE:
1559 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED))
1560 state |= EXT4_STATE_FLAG_EXT_PRECACHED;
1561 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
1562 state |= EXT4_STATE_FLAG_NEW;
1563 if (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
1564 state |= EXT4_STATE_FLAG_NEWENTRY;
1565 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE))
1566 state |= EXT4_STATE_FLAG_DA_ALLOC_CLOSE;
1568 return put_user(state, (__u32 __user *) arg);
1571 case EXT4_IOC_GET_ES_CACHE:
1572 return ext4_ioctl_get_es_cache(filp, arg);
1574 case EXT4_IOC_SHUTDOWN:
1575 return ext4_shutdown(sb, arg);
1577 case FS_IOC_ENABLE_VERITY:
1578 if (!ext4_has_feature_verity(sb))
1580 return fsverity_ioctl_enable(filp, (const void __user *)arg);
1582 case FS_IOC_MEASURE_VERITY:
1583 if (!ext4_has_feature_verity(sb))
1585 return fsverity_ioctl_measure(filp, (void __user *)arg);
1587 case FS_IOC_READ_VERITY_METADATA:
1588 if (!ext4_has_feature_verity(sb))
1590 return fsverity_ioctl_read_metadata(filp,
1591 (const void __user *)arg);
1593 case EXT4_IOC_CHECKPOINT:
1594 return ext4_ioctl_checkpoint(filp, arg);
1596 case FS_IOC_GETFSLABEL:
1597 return ext4_ioctl_getlabel(EXT4_SB(sb), (void __user *)arg);
1599 case FS_IOC_SETFSLABEL:
1600 return ext4_ioctl_setlabel(filp,
1601 (const void __user *)arg);
1603 case EXT4_IOC_GETFSUUID:
1604 return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg);
1605 case EXT4_IOC_SETFSUUID:
1606 return ext4_ioctl_setuuid(filp, (const void __user *)arg);
1612 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1614 return __ext4_ioctl(filp, cmd, arg);
1617 #ifdef CONFIG_COMPAT
1618 long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1620 /* These are just misnamed, they actually get/put from/to user an int */
1622 case EXT4_IOC32_GETVERSION:
1623 cmd = EXT4_IOC_GETVERSION;
1625 case EXT4_IOC32_SETVERSION:
1626 cmd = EXT4_IOC_SETVERSION;
1628 case EXT4_IOC32_GROUP_EXTEND:
1629 cmd = EXT4_IOC_GROUP_EXTEND;
1631 case EXT4_IOC32_GETVERSION_OLD:
1632 cmd = EXT4_IOC_GETVERSION_OLD;
1634 case EXT4_IOC32_SETVERSION_OLD:
1635 cmd = EXT4_IOC_SETVERSION_OLD;
1637 case EXT4_IOC32_GETRSVSZ:
1638 cmd = EXT4_IOC_GETRSVSZ;
1640 case EXT4_IOC32_SETRSVSZ:
1641 cmd = EXT4_IOC_SETRSVSZ;
1643 case EXT4_IOC32_GROUP_ADD: {
1644 struct compat_ext4_new_group_input __user *uinput;
1645 struct ext4_new_group_data input;
1648 uinput = compat_ptr(arg);
1649 err = get_user(input.group, &uinput->group);
1650 err |= get_user(input.block_bitmap, &uinput->block_bitmap);
1651 err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
1652 err |= get_user(input.inode_table, &uinput->inode_table);
1653 err |= get_user(input.blocks_count, &uinput->blocks_count);
1654 err |= get_user(input.reserved_blocks,
1655 &uinput->reserved_blocks);
1658 return ext4_ioctl_group_add(file, &input);
1660 case EXT4_IOC_MOVE_EXT:
1661 case EXT4_IOC_RESIZE_FS:
1663 case EXT4_IOC_PRECACHE_EXTENTS:
1664 case FS_IOC_SET_ENCRYPTION_POLICY:
1665 case FS_IOC_GET_ENCRYPTION_PWSALT:
1666 case FS_IOC_GET_ENCRYPTION_POLICY:
1667 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1668 case FS_IOC_ADD_ENCRYPTION_KEY:
1669 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1670 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1671 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1672 case FS_IOC_GET_ENCRYPTION_NONCE:
1673 case EXT4_IOC_SHUTDOWN:
1674 case FS_IOC_GETFSMAP:
1675 case FS_IOC_ENABLE_VERITY:
1676 case FS_IOC_MEASURE_VERITY:
1677 case FS_IOC_READ_VERITY_METADATA:
1678 case EXT4_IOC_CLEAR_ES_CACHE:
1679 case EXT4_IOC_GETSTATE:
1680 case EXT4_IOC_GET_ES_CACHE:
1681 case EXT4_IOC_CHECKPOINT:
1682 case FS_IOC_GETFSLABEL:
1683 case FS_IOC_SETFSLABEL:
1684 case EXT4_IOC_GETFSUUID:
1685 case EXT4_IOC_SETFSUUID:
1688 return -ENOIOCTLCMD;
1690 return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1694 static void set_overhead(struct ext4_super_block *es, const void *arg)
1696 es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
1699 int ext4_update_overhead(struct super_block *sb, bool force)
1701 struct ext4_sb_info *sbi = EXT4_SB(sb);
1706 (sbi->s_overhead == 0 ||
1707 sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters)))
1709 return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);