1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
31 #include "transaction.h"
32 #include "btrfs_inode.h"
33 #include "print-tree.h"
36 #include "inode-map.h"
38 #include "rcu-string.h"
40 #include "dev-replace.h"
45 #include "compression.h"
46 #include "space-info.h"
47 #include "delalloc-space.h"
48 #include "block-group.h"
51 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
52 * structures are incorrect, as the timespec structure from userspace
53 * is 4 bytes too small. We define these alternatives here to teach
54 * the kernel about the 32-bit struct packing.
56 struct btrfs_ioctl_timespec_32 {
59 } __attribute__ ((__packed__));
61 struct btrfs_ioctl_received_subvol_args_32 {
62 char uuid[BTRFS_UUID_SIZE]; /* in */
63 __u64 stransid; /* in */
64 __u64 rtransid; /* out */
65 struct btrfs_ioctl_timespec_32 stime; /* in */
66 struct btrfs_ioctl_timespec_32 rtime; /* out */
68 __u64 reserved[16]; /* in */
69 } __attribute__ ((__packed__));
71 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
72 struct btrfs_ioctl_received_subvol_args_32)
75 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
76 struct btrfs_ioctl_send_args_32 {
77 __s64 send_fd; /* in */
78 __u64 clone_sources_count; /* in */
79 compat_uptr_t clone_sources; /* in */
80 __u64 parent_root; /* in */
82 __u64 reserved[4]; /* in */
83 } __attribute__ ((__packed__));
85 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
86 struct btrfs_ioctl_send_args_32)
89 static int btrfs_clone(struct inode *src, struct inode *inode,
90 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
93 /* Mask out flags that are inappropriate for the given type of inode. */
94 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
97 if (S_ISDIR(inode->i_mode))
99 else if (S_ISREG(inode->i_mode))
100 return flags & ~FS_DIRSYNC_FL;
102 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
106 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
109 static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
111 unsigned int iflags = 0;
113 if (flags & BTRFS_INODE_SYNC)
114 iflags |= FS_SYNC_FL;
115 if (flags & BTRFS_INODE_IMMUTABLE)
116 iflags |= FS_IMMUTABLE_FL;
117 if (flags & BTRFS_INODE_APPEND)
118 iflags |= FS_APPEND_FL;
119 if (flags & BTRFS_INODE_NODUMP)
120 iflags |= FS_NODUMP_FL;
121 if (flags & BTRFS_INODE_NOATIME)
122 iflags |= FS_NOATIME_FL;
123 if (flags & BTRFS_INODE_DIRSYNC)
124 iflags |= FS_DIRSYNC_FL;
125 if (flags & BTRFS_INODE_NODATACOW)
126 iflags |= FS_NOCOW_FL;
128 if (flags & BTRFS_INODE_NOCOMPRESS)
129 iflags |= FS_NOCOMP_FL;
130 else if (flags & BTRFS_INODE_COMPRESS)
131 iflags |= FS_COMPR_FL;
137 * Update inode->i_flags based on the btrfs internal flags.
139 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
141 struct btrfs_inode *binode = BTRFS_I(inode);
142 unsigned int new_fl = 0;
144 if (binode->flags & BTRFS_INODE_SYNC)
146 if (binode->flags & BTRFS_INODE_IMMUTABLE)
147 new_fl |= S_IMMUTABLE;
148 if (binode->flags & BTRFS_INODE_APPEND)
150 if (binode->flags & BTRFS_INODE_NOATIME)
152 if (binode->flags & BTRFS_INODE_DIRSYNC)
155 set_mask_bits(&inode->i_flags,
156 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
160 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
162 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
163 unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags);
165 if (copy_to_user(arg, &flags, sizeof(flags)))
171 * Check if @flags are a supported and valid set of FS_*_FL flags and that
172 * the old and new flags are not conflicting
174 static int check_fsflags(unsigned int old_flags, unsigned int flags)
176 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
177 FS_NOATIME_FL | FS_NODUMP_FL | \
178 FS_SYNC_FL | FS_DIRSYNC_FL | \
179 FS_NOCOMP_FL | FS_COMPR_FL |
183 /* COMPR and NOCOMP on new/old are valid */
184 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
187 if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
190 /* NOCOW and compression options are mutually exclusive */
191 if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
193 if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
199 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
201 struct inode *inode = file_inode(file);
202 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
203 struct btrfs_inode *binode = BTRFS_I(inode);
204 struct btrfs_root *root = binode->root;
205 struct btrfs_trans_handle *trans;
206 unsigned int fsflags, old_fsflags;
208 const char *comp = NULL;
211 if (!inode_owner_or_capable(inode))
214 if (btrfs_root_readonly(root))
217 if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
220 ret = mnt_want_write_file(file);
225 fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
226 old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
228 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
232 ret = check_fsflags(old_fsflags, fsflags);
236 binode_flags = binode->flags;
237 if (fsflags & FS_SYNC_FL)
238 binode_flags |= BTRFS_INODE_SYNC;
240 binode_flags &= ~BTRFS_INODE_SYNC;
241 if (fsflags & FS_IMMUTABLE_FL)
242 binode_flags |= BTRFS_INODE_IMMUTABLE;
244 binode_flags &= ~BTRFS_INODE_IMMUTABLE;
245 if (fsflags & FS_APPEND_FL)
246 binode_flags |= BTRFS_INODE_APPEND;
248 binode_flags &= ~BTRFS_INODE_APPEND;
249 if (fsflags & FS_NODUMP_FL)
250 binode_flags |= BTRFS_INODE_NODUMP;
252 binode_flags &= ~BTRFS_INODE_NODUMP;
253 if (fsflags & FS_NOATIME_FL)
254 binode_flags |= BTRFS_INODE_NOATIME;
256 binode_flags &= ~BTRFS_INODE_NOATIME;
257 if (fsflags & FS_DIRSYNC_FL)
258 binode_flags |= BTRFS_INODE_DIRSYNC;
260 binode_flags &= ~BTRFS_INODE_DIRSYNC;
261 if (fsflags & FS_NOCOW_FL) {
262 if (S_ISREG(inode->i_mode)) {
264 * It's safe to turn csums off here, no extents exist.
265 * Otherwise we want the flag to reflect the real COW
266 * status of the file and will not set it.
268 if (inode->i_size == 0)
269 binode_flags |= BTRFS_INODE_NODATACOW |
270 BTRFS_INODE_NODATASUM;
272 binode_flags |= BTRFS_INODE_NODATACOW;
276 * Revert back under same assumptions as above
278 if (S_ISREG(inode->i_mode)) {
279 if (inode->i_size == 0)
280 binode_flags &= ~(BTRFS_INODE_NODATACOW |
281 BTRFS_INODE_NODATASUM);
283 binode_flags &= ~BTRFS_INODE_NODATACOW;
288 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
289 * flag may be changed automatically if compression code won't make
292 if (fsflags & FS_NOCOMP_FL) {
293 binode_flags &= ~BTRFS_INODE_COMPRESS;
294 binode_flags |= BTRFS_INODE_NOCOMPRESS;
295 } else if (fsflags & FS_COMPR_FL) {
297 if (IS_SWAPFILE(inode)) {
302 binode_flags |= BTRFS_INODE_COMPRESS;
303 binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
305 comp = btrfs_compress_type2str(fs_info->compress_type);
306 if (!comp || comp[0] == 0)
307 comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
309 binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
316 trans = btrfs_start_transaction(root, 3);
318 ret = PTR_ERR(trans);
323 ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
326 btrfs_abort_transaction(trans, ret);
330 ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
332 if (ret && ret != -ENODATA) {
333 btrfs_abort_transaction(trans, ret);
338 binode->flags = binode_flags;
339 btrfs_sync_inode_flags_to_i_flags(inode);
340 inode_inc_iversion(inode);
341 inode->i_ctime = current_time(inode);
342 ret = btrfs_update_inode(trans, root, inode);
345 btrfs_end_transaction(trans);
348 mnt_drop_write_file(file);
353 * Translate btrfs internal inode flags to xflags as expected by the
354 * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are
357 static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags)
359 unsigned int xflags = 0;
361 if (flags & BTRFS_INODE_APPEND)
362 xflags |= FS_XFLAG_APPEND;
363 if (flags & BTRFS_INODE_IMMUTABLE)
364 xflags |= FS_XFLAG_IMMUTABLE;
365 if (flags & BTRFS_INODE_NOATIME)
366 xflags |= FS_XFLAG_NOATIME;
367 if (flags & BTRFS_INODE_NODUMP)
368 xflags |= FS_XFLAG_NODUMP;
369 if (flags & BTRFS_INODE_SYNC)
370 xflags |= FS_XFLAG_SYNC;
375 /* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */
376 static int check_xflags(unsigned int flags)
378 if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
379 FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
385 * Set the xflags from the internal inode flags. The remaining items of fsxattr
388 static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg)
390 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
393 simple_fill_fsxattr(&fa, btrfs_inode_flags_to_xflags(binode->flags));
394 if (copy_to_user(arg, &fa, sizeof(fa)))
400 static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
402 struct inode *inode = file_inode(file);
403 struct btrfs_inode *binode = BTRFS_I(inode);
404 struct btrfs_root *root = binode->root;
405 struct btrfs_trans_handle *trans;
406 struct fsxattr fa, old_fa;
408 unsigned old_i_flags;
411 if (!inode_owner_or_capable(inode))
414 if (btrfs_root_readonly(root))
417 if (copy_from_user(&fa, arg, sizeof(fa)))
420 ret = check_xflags(fa.fsx_xflags);
424 if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0)
427 ret = mnt_want_write_file(file);
433 old_flags = binode->flags;
434 old_i_flags = inode->i_flags;
436 simple_fill_fsxattr(&old_fa,
437 btrfs_inode_flags_to_xflags(binode->flags));
438 ret = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
442 if (fa.fsx_xflags & FS_XFLAG_SYNC)
443 binode->flags |= BTRFS_INODE_SYNC;
445 binode->flags &= ~BTRFS_INODE_SYNC;
446 if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE)
447 binode->flags |= BTRFS_INODE_IMMUTABLE;
449 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
450 if (fa.fsx_xflags & FS_XFLAG_APPEND)
451 binode->flags |= BTRFS_INODE_APPEND;
453 binode->flags &= ~BTRFS_INODE_APPEND;
454 if (fa.fsx_xflags & FS_XFLAG_NODUMP)
455 binode->flags |= BTRFS_INODE_NODUMP;
457 binode->flags &= ~BTRFS_INODE_NODUMP;
458 if (fa.fsx_xflags & FS_XFLAG_NOATIME)
459 binode->flags |= BTRFS_INODE_NOATIME;
461 binode->flags &= ~BTRFS_INODE_NOATIME;
463 /* 1 item for the inode */
464 trans = btrfs_start_transaction(root, 1);
466 ret = PTR_ERR(trans);
470 btrfs_sync_inode_flags_to_i_flags(inode);
471 inode_inc_iversion(inode);
472 inode->i_ctime = current_time(inode);
473 ret = btrfs_update_inode(trans, root, inode);
475 btrfs_end_transaction(trans);
479 binode->flags = old_flags;
480 inode->i_flags = old_i_flags;
484 mnt_drop_write_file(file);
489 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
491 struct inode *inode = file_inode(file);
493 return put_user(inode->i_generation, arg);
496 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
498 struct inode *inode = file_inode(file);
499 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
500 struct btrfs_device *device;
501 struct request_queue *q;
502 struct fstrim_range range;
503 u64 minlen = ULLONG_MAX;
507 if (!capable(CAP_SYS_ADMIN))
511 * If the fs is mounted with nologreplay, which requires it to be
512 * mounted in RO mode as well, we can not allow discard on free space
513 * inside block groups, because log trees refer to extents that are not
514 * pinned in a block group's free space cache (pinning the extents is
515 * precisely the first phase of replaying a log tree).
517 if (btrfs_test_opt(fs_info, NOLOGREPLAY))
521 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
525 q = bdev_get_queue(device->bdev);
526 if (blk_queue_discard(q)) {
528 minlen = min_t(u64, q->limits.discard_granularity,
536 if (copy_from_user(&range, arg, sizeof(range)))
540 * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
541 * block group is in the logical address space, which can be any
542 * sectorsize aligned bytenr in the range [0, U64_MAX].
544 if (range.len < fs_info->sb->s_blocksize)
547 range.minlen = max(range.minlen, minlen);
548 ret = btrfs_trim_fs(fs_info, &range);
552 if (copy_to_user(arg, &range, sizeof(range)))
558 int btrfs_is_empty_uuid(u8 *uuid)
562 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
569 static noinline int create_subvol(struct inode *dir,
570 struct dentry *dentry,
571 const char *name, int namelen,
573 struct btrfs_qgroup_inherit *inherit)
575 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
576 struct btrfs_trans_handle *trans;
577 struct btrfs_key key;
578 struct btrfs_root_item *root_item;
579 struct btrfs_inode_item *inode_item;
580 struct extent_buffer *leaf;
581 struct btrfs_root *root = BTRFS_I(dir)->root;
582 struct btrfs_root *new_root;
583 struct btrfs_block_rsv block_rsv;
584 struct timespec64 cur_time = current_time(dir);
589 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
593 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
597 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
602 * Don't create subvolume whose level is not zero. Or qgroup will be
603 * screwed up since it assumes subvolume qgroup's level to be 0.
605 if (btrfs_qgroup_level(objectid)) {
610 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
612 * The same as the snapshot creation, please see the comment
613 * of create_snapshot().
615 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
619 trans = btrfs_start_transaction(root, 0);
621 ret = PTR_ERR(trans);
622 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
625 trans->block_rsv = &block_rsv;
626 trans->bytes_reserved = block_rsv.size;
628 ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
632 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
638 btrfs_mark_buffer_dirty(leaf);
640 inode_item = &root_item->inode;
641 btrfs_set_stack_inode_generation(inode_item, 1);
642 btrfs_set_stack_inode_size(inode_item, 3);
643 btrfs_set_stack_inode_nlink(inode_item, 1);
644 btrfs_set_stack_inode_nbytes(inode_item,
646 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
648 btrfs_set_root_flags(root_item, 0);
649 btrfs_set_root_limit(root_item, 0);
650 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
652 btrfs_set_root_bytenr(root_item, leaf->start);
653 btrfs_set_root_generation(root_item, trans->transid);
654 btrfs_set_root_level(root_item, 0);
655 btrfs_set_root_refs(root_item, 1);
656 btrfs_set_root_used(root_item, leaf->len);
657 btrfs_set_root_last_snapshot(root_item, 0);
659 btrfs_set_root_generation_v2(root_item,
660 btrfs_root_generation(root_item));
661 uuid_le_gen(&new_uuid);
662 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
663 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
664 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
665 root_item->ctime = root_item->otime;
666 btrfs_set_root_ctransid(root_item, trans->transid);
667 btrfs_set_root_otransid(root_item, trans->transid);
669 btrfs_tree_unlock(leaf);
671 btrfs_set_root_dirid(root_item, new_dirid);
673 key.objectid = objectid;
675 key.type = BTRFS_ROOT_ITEM_KEY;
676 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
680 * Since we don't abort the transaction in this case, free the
681 * tree block so that we don't leak space and leave the
682 * filesystem in an inconsistent state (an extent item in the
683 * extent tree without backreferences). Also no need to have
684 * the tree block locked since it is not in any tree at this
685 * point, so no other task can find it and use it.
687 btrfs_free_tree_block(trans, root, leaf, 0, 1);
688 free_extent_buffer(leaf);
692 free_extent_buffer(leaf);
695 key.offset = (u64)-1;
696 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
697 if (IS_ERR(new_root)) {
698 ret = PTR_ERR(new_root);
699 btrfs_abort_transaction(trans, ret);
703 btrfs_record_root_in_trans(trans, new_root);
705 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
707 /* We potentially lose an unused inode item here */
708 btrfs_abort_transaction(trans, ret);
712 mutex_lock(&new_root->objectid_mutex);
713 new_root->highest_objectid = new_dirid;
714 mutex_unlock(&new_root->objectid_mutex);
717 * insert the directory item
719 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
721 btrfs_abort_transaction(trans, ret);
725 ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
726 BTRFS_FT_DIR, index);
728 btrfs_abort_transaction(trans, ret);
732 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
733 ret = btrfs_update_inode(trans, root, dir);
735 btrfs_abort_transaction(trans, ret);
739 ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
740 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
742 btrfs_abort_transaction(trans, ret);
746 ret = btrfs_uuid_tree_add(trans, root_item->uuid,
747 BTRFS_UUID_KEY_SUBVOL, objectid);
749 btrfs_abort_transaction(trans, ret);
753 trans->block_rsv = NULL;
754 trans->bytes_reserved = 0;
755 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
758 *async_transid = trans->transid;
759 err = btrfs_commit_transaction_async(trans, 1);
761 err = btrfs_commit_transaction(trans);
763 err = btrfs_commit_transaction(trans);
769 inode = btrfs_lookup_dentry(dir, dentry);
771 return PTR_ERR(inode);
772 d_instantiate(dentry, inode);
781 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
782 struct dentry *dentry,
783 u64 *async_transid, bool readonly,
784 struct btrfs_qgroup_inherit *inherit)
786 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
788 struct btrfs_pending_snapshot *pending_snapshot;
789 struct btrfs_trans_handle *trans;
791 bool snapshot_force_cow = false;
793 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
796 if (atomic_read(&root->nr_swapfiles)) {
798 "cannot snapshot subvolume with active swapfile");
802 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
803 if (!pending_snapshot)
806 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
808 pending_snapshot->path = btrfs_alloc_path();
809 if (!pending_snapshot->root_item || !pending_snapshot->path) {
815 * Force new buffered writes to reserve space even when NOCOW is
816 * possible. This is to avoid later writeback (running dealloc) to
817 * fallback to COW mode and unexpectedly fail with ENOSPC.
819 atomic_inc(&root->will_be_snapshotted);
820 smp_mb__after_atomic();
821 /* wait for no snapshot writes */
822 wait_event(root->subv_writers->wait,
823 percpu_counter_sum(&root->subv_writers->counter) == 0);
825 ret = btrfs_start_delalloc_snapshot(root);
830 * All previous writes have started writeback in NOCOW mode, so now
831 * we force future writes to fallback to COW mode during snapshot
834 atomic_inc(&root->snapshot_force_cow);
835 snapshot_force_cow = true;
837 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
839 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
840 BTRFS_BLOCK_RSV_TEMP);
842 * 1 - parent dir inode
845 * 2 - root ref/backref
846 * 1 - root of snapshot
849 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
850 &pending_snapshot->block_rsv, 8,
855 pending_snapshot->dentry = dentry;
856 pending_snapshot->root = root;
857 pending_snapshot->readonly = readonly;
858 pending_snapshot->dir = dir;
859 pending_snapshot->inherit = inherit;
861 trans = btrfs_start_transaction(root, 0);
863 ret = PTR_ERR(trans);
867 spin_lock(&fs_info->trans_lock);
868 list_add(&pending_snapshot->list,
869 &trans->transaction->pending_snapshots);
870 spin_unlock(&fs_info->trans_lock);
872 *async_transid = trans->transid;
873 ret = btrfs_commit_transaction_async(trans, 1);
875 ret = btrfs_commit_transaction(trans);
877 ret = btrfs_commit_transaction(trans);
882 ret = pending_snapshot->error;
886 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
890 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
892 ret = PTR_ERR(inode);
896 d_instantiate(dentry, inode);
899 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
901 if (snapshot_force_cow)
902 atomic_dec(&root->snapshot_force_cow);
903 if (atomic_dec_and_test(&root->will_be_snapshotted))
904 wake_up_var(&root->will_be_snapshotted);
906 kfree(pending_snapshot->root_item);
907 btrfs_free_path(pending_snapshot->path);
908 kfree(pending_snapshot);
913 /* copy of may_delete in fs/namei.c()
914 * Check whether we can remove a link victim from directory dir, check
915 * whether the type of victim is right.
916 * 1. We can't do it if dir is read-only (done in permission())
917 * 2. We should have write and exec permissions on dir
918 * 3. We can't remove anything from append-only dir
919 * 4. We can't do anything with immutable dir (done in permission())
920 * 5. If the sticky bit on dir is set we should either
921 * a. be owner of dir, or
922 * b. be owner of victim, or
923 * c. have CAP_FOWNER capability
924 * 6. If the victim is append-only or immutable we can't do anything with
925 * links pointing to it.
926 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
927 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
928 * 9. We can't remove a root or mountpoint.
929 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
930 * nfs_async_unlink().
933 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
937 if (d_really_is_negative(victim))
940 BUG_ON(d_inode(victim->d_parent) != dir);
941 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
943 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
948 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
949 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
952 if (!d_is_dir(victim))
956 } else if (d_is_dir(victim))
960 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
965 /* copy of may_create in fs/namei.c() */
966 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
968 if (d_really_is_positive(child))
972 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
976 * Create a new subvolume below @parent. This is largely modeled after
977 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
978 * inside this filesystem so it's quite a bit simpler.
980 static noinline int btrfs_mksubvol(const struct path *parent,
981 const char *name, int namelen,
982 struct btrfs_root *snap_src,
983 u64 *async_transid, bool readonly,
984 struct btrfs_qgroup_inherit *inherit)
986 struct inode *dir = d_inode(parent->dentry);
987 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
988 struct dentry *dentry;
991 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
995 dentry = lookup_one_len(name, parent->dentry, namelen);
996 error = PTR_ERR(dentry);
1000 error = btrfs_may_create(dir, dentry);
1005 * even if this name doesn't exist, we may get hash collisions.
1006 * check for them now when we can safely fail
1008 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
1014 down_read(&fs_info->subvol_sem);
1016 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
1020 error = create_snapshot(snap_src, dir, dentry,
1021 async_transid, readonly, inherit);
1023 error = create_subvol(dir, dentry, name, namelen,
1024 async_transid, inherit);
1027 fsnotify_mkdir(dir, dentry);
1029 up_read(&fs_info->subvol_sem);
1038 * When we're defragging a range, we don't want to kick it off again
1039 * if it is really just waiting for delalloc to send it down.
1040 * If we find a nice big extent or delalloc range for the bytes in the
1041 * file you want to defrag, we return 0 to let you know to skip this
1044 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
1046 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1047 struct extent_map *em = NULL;
1048 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1051 read_lock(&em_tree->lock);
1052 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
1053 read_unlock(&em_tree->lock);
1056 end = extent_map_end(em);
1057 free_extent_map(em);
1058 if (end - offset > thresh)
1061 /* if we already have a nice delalloc here, just stop */
1063 end = count_range_bits(io_tree, &offset, offset + thresh,
1064 thresh, EXTENT_DELALLOC, 1);
1071 * helper function to walk through a file and find extents
1072 * newer than a specific transid, and smaller than thresh.
1074 * This is used by the defragging code to find new and small
1077 static int find_new_extents(struct btrfs_root *root,
1078 struct inode *inode, u64 newer_than,
1079 u64 *off, u32 thresh)
1081 struct btrfs_path *path;
1082 struct btrfs_key min_key;
1083 struct extent_buffer *leaf;
1084 struct btrfs_file_extent_item *extent;
1087 u64 ino = btrfs_ino(BTRFS_I(inode));
1089 path = btrfs_alloc_path();
1093 min_key.objectid = ino;
1094 min_key.type = BTRFS_EXTENT_DATA_KEY;
1095 min_key.offset = *off;
1098 ret = btrfs_search_forward(root, &min_key, path, newer_than);
1102 if (min_key.objectid != ino)
1104 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
1107 leaf = path->nodes[0];
1108 extent = btrfs_item_ptr(leaf, path->slots[0],
1109 struct btrfs_file_extent_item);
1111 type = btrfs_file_extent_type(leaf, extent);
1112 if (type == BTRFS_FILE_EXTENT_REG &&
1113 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
1114 check_defrag_in_cache(inode, min_key.offset, thresh)) {
1115 *off = min_key.offset;
1116 btrfs_free_path(path);
1121 if (path->slots[0] < btrfs_header_nritems(leaf)) {
1122 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
1126 if (min_key.offset == (u64)-1)
1130 btrfs_release_path(path);
1133 btrfs_free_path(path);
1137 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1139 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1140 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1141 struct extent_map *em;
1142 u64 len = PAGE_SIZE;
1145 * hopefully we have this extent in the tree already, try without
1146 * the full extent lock
1148 read_lock(&em_tree->lock);
1149 em = lookup_extent_mapping(em_tree, start, len);
1150 read_unlock(&em_tree->lock);
1153 struct extent_state *cached = NULL;
1154 u64 end = start + len - 1;
1156 /* get the big lock and read metadata off disk */
1157 lock_extent_bits(io_tree, start, end, &cached);
1158 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1159 unlock_extent_cached(io_tree, start, end, &cached);
1168 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1170 struct extent_map *next;
1173 /* this is the last extent */
1174 if (em->start + em->len >= i_size_read(inode))
1177 next = defrag_lookup_extent(inode, em->start + em->len);
1178 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1180 else if ((em->block_start + em->block_len == next->block_start) &&
1181 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1184 free_extent_map(next);
1188 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1189 u64 *last_len, u64 *skip, u64 *defrag_end,
1192 struct extent_map *em;
1194 bool next_mergeable = true;
1195 bool prev_mergeable = true;
1198 * make sure that once we start defragging an extent, we keep on
1201 if (start < *defrag_end)
1206 em = defrag_lookup_extent(inode, start);
1210 /* this will cover holes, and inline extents */
1211 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1217 prev_mergeable = false;
1219 next_mergeable = defrag_check_next_extent(inode, em);
1221 * we hit a real extent, if it is big or the next extent is not a
1222 * real extent, don't bother defragging it
1224 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1225 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1229 * last_len ends up being a counter of how many bytes we've defragged.
1230 * every time we choose not to defrag an extent, we reset *last_len
1231 * so that the next tiny extent will force a defrag.
1233 * The end result of this is that tiny extents before a single big
1234 * extent will force at least part of that big extent to be defragged.
1237 *defrag_end = extent_map_end(em);
1240 *skip = extent_map_end(em);
1244 free_extent_map(em);
1249 * it doesn't do much good to defrag one or two pages
1250 * at a time. This pulls in a nice chunk of pages
1251 * to COW and defrag.
1253 * It also makes sure the delalloc code has enough
1254 * dirty data to avoid making new small extents as part
1257 * It's a good idea to start RA on this range
1258 * before calling this.
1260 static int cluster_pages_for_defrag(struct inode *inode,
1261 struct page **pages,
1262 unsigned long start_index,
1263 unsigned long num_pages)
1265 unsigned long file_end;
1266 u64 isize = i_size_read(inode);
1270 u64 start = (u64)start_index << PAGE_SHIFT;
1275 struct btrfs_ordered_extent *ordered;
1276 struct extent_state *cached_state = NULL;
1277 struct extent_io_tree *tree;
1278 struct extent_changeset *data_reserved = NULL;
1279 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1281 file_end = (isize - 1) >> PAGE_SHIFT;
1282 if (!isize || start_index > file_end)
1285 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1287 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1288 start, page_cnt << PAGE_SHIFT);
1292 tree = &BTRFS_I(inode)->io_tree;
1294 /* step one, lock all the pages */
1295 for (i = 0; i < page_cnt; i++) {
1298 page = find_or_create_page(inode->i_mapping,
1299 start_index + i, mask);
1303 page_start = page_offset(page);
1304 page_end = page_start + PAGE_SIZE - 1;
1306 lock_extent_bits(tree, page_start, page_end,
1308 ordered = btrfs_lookup_ordered_extent(inode,
1310 unlock_extent_cached(tree, page_start, page_end,
1316 btrfs_start_ordered_extent(inode, ordered, 1);
1317 btrfs_put_ordered_extent(ordered);
1320 * we unlocked the page above, so we need check if
1321 * it was released or not.
1323 if (page->mapping != inode->i_mapping) {
1330 if (!PageUptodate(page)) {
1331 btrfs_readpage(NULL, page);
1333 if (!PageUptodate(page)) {
1341 if (page->mapping != inode->i_mapping) {
1353 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1357 * so now we have a nice long stream of locked
1358 * and up to date pages, lets wait on them
1360 for (i = 0; i < i_done; i++)
1361 wait_on_page_writeback(pages[i]);
1363 page_start = page_offset(pages[0]);
1364 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1366 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1367 page_start, page_end - 1, &cached_state);
1370 * When defragmenting we skip ranges that have holes or inline extents,
1371 * (check should_defrag_range()), to avoid unnecessary IO and wasting
1372 * space. At btrfs_defrag_file(), we check if a range should be defragged
1373 * before locking the inode and then, if it should, we trigger a sync
1374 * page cache readahead - we lock the inode only after that to avoid
1375 * blocking for too long other tasks that possibly want to operate on
1376 * other file ranges. But before we were able to get the inode lock,
1377 * some other task may have punched a hole in the range, or we may have
1378 * now an inline extent, in which case we should not defrag. So check
1379 * for that here, where we have the inode and the range locked, and bail
1380 * out if that happened.
1382 search_start = page_start;
1383 while (search_start < page_end) {
1384 struct extent_map *em;
1386 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
1387 page_end - search_start, 0);
1390 goto out_unlock_range;
1392 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1393 free_extent_map(em);
1394 /* Ok, 0 means we did not defrag anything */
1396 goto out_unlock_range;
1398 search_start = extent_map_end(em);
1399 free_extent_map(em);
1402 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1403 page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1404 EXTENT_DEFRAG, 0, 0, &cached_state);
1406 if (i_done != page_cnt) {
1407 spin_lock(&BTRFS_I(inode)->lock);
1408 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1409 spin_unlock(&BTRFS_I(inode)->lock);
1410 btrfs_delalloc_release_space(inode, data_reserved,
1411 start, (page_cnt - i_done) << PAGE_SHIFT, true);
1415 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1418 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1419 page_start, page_end - 1, &cached_state);
1421 for (i = 0; i < i_done; i++) {
1422 clear_page_dirty_for_io(pages[i]);
1423 ClearPageChecked(pages[i]);
1424 set_page_extent_mapped(pages[i]);
1425 set_page_dirty(pages[i]);
1426 unlock_page(pages[i]);
1429 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1430 extent_changeset_free(data_reserved);
1434 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1435 page_start, page_end - 1, &cached_state);
1437 for (i = 0; i < i_done; i++) {
1438 unlock_page(pages[i]);
1441 btrfs_delalloc_release_space(inode, data_reserved,
1442 start, page_cnt << PAGE_SHIFT, true);
1443 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1444 extent_changeset_free(data_reserved);
1449 int btrfs_defrag_file(struct inode *inode, struct file *file,
1450 struct btrfs_ioctl_defrag_range_args *range,
1451 u64 newer_than, unsigned long max_to_defrag)
1453 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1454 struct btrfs_root *root = BTRFS_I(inode)->root;
1455 struct file_ra_state *ra = NULL;
1456 unsigned long last_index;
1457 u64 isize = i_size_read(inode);
1461 u64 newer_off = range->start;
1463 unsigned long ra_index = 0;
1465 int defrag_count = 0;
1466 int compress_type = BTRFS_COMPRESS_ZLIB;
1467 u32 extent_thresh = range->extent_thresh;
1468 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1469 unsigned long cluster = max_cluster;
1470 u64 new_align = ~((u64)SZ_128K - 1);
1471 struct page **pages = NULL;
1472 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1477 if (range->start >= isize)
1481 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1483 if (range->compress_type)
1484 compress_type = range->compress_type;
1487 if (extent_thresh == 0)
1488 extent_thresh = SZ_256K;
1491 * If we were not given a file, allocate a readahead context. As
1492 * readahead is just an optimization, defrag will work without it so
1493 * we don't error out.
1496 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1498 file_ra_state_init(ra, inode->i_mapping);
1503 pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
1509 /* find the last page to defrag */
1510 if (range->start + range->len > range->start) {
1511 last_index = min_t(u64, isize - 1,
1512 range->start + range->len - 1) >> PAGE_SHIFT;
1514 last_index = (isize - 1) >> PAGE_SHIFT;
1518 ret = find_new_extents(root, inode, newer_than,
1519 &newer_off, SZ_64K);
1521 range->start = newer_off;
1523 * we always align our defrag to help keep
1524 * the extents in the file evenly spaced
1526 i = (newer_off & new_align) >> PAGE_SHIFT;
1530 i = range->start >> PAGE_SHIFT;
1533 max_to_defrag = last_index - i + 1;
1536 * make writeback starts from i, so the defrag range can be
1537 * written sequentially.
1539 if (i < inode->i_mapping->writeback_index)
1540 inode->i_mapping->writeback_index = i;
1542 while (i <= last_index && defrag_count < max_to_defrag &&
1543 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1545 * make sure we stop running if someone unmounts
1548 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1551 if (btrfs_defrag_cancelled(fs_info)) {
1552 btrfs_debug(fs_info, "defrag_file cancelled");
1557 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1558 extent_thresh, &last_len, &skip,
1559 &defrag_end, do_compress)){
1562 * the should_defrag function tells us how much to skip
1563 * bump our counter by the suggested amount
1565 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1566 i = max(i + 1, next);
1571 cluster = (PAGE_ALIGN(defrag_end) >>
1573 cluster = min(cluster, max_cluster);
1575 cluster = max_cluster;
1578 if (i + cluster > ra_index) {
1579 ra_index = max(i, ra_index);
1581 page_cache_sync_readahead(inode->i_mapping, ra,
1582 file, ra_index, cluster);
1583 ra_index += cluster;
1587 if (IS_SWAPFILE(inode)) {
1591 BTRFS_I(inode)->defrag_compress = compress_type;
1592 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1595 inode_unlock(inode);
1599 defrag_count += ret;
1600 balance_dirty_pages_ratelimited(inode->i_mapping);
1601 inode_unlock(inode);
1604 if (newer_off == (u64)-1)
1610 newer_off = max(newer_off + 1,
1611 (u64)i << PAGE_SHIFT);
1613 ret = find_new_extents(root, inode, newer_than,
1614 &newer_off, SZ_64K);
1616 range->start = newer_off;
1617 i = (newer_off & new_align) >> PAGE_SHIFT;
1624 last_len += ret << PAGE_SHIFT;
1632 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1633 filemap_flush(inode->i_mapping);
1634 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1635 &BTRFS_I(inode)->runtime_flags))
1636 filemap_flush(inode->i_mapping);
1639 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1640 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1641 } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
1642 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1650 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1651 inode_unlock(inode);
1659 static noinline int btrfs_ioctl_resize(struct file *file,
1662 struct inode *inode = file_inode(file);
1663 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1667 struct btrfs_root *root = BTRFS_I(inode)->root;
1668 struct btrfs_ioctl_vol_args *vol_args;
1669 struct btrfs_trans_handle *trans;
1670 struct btrfs_device *device = NULL;
1673 char *devstr = NULL;
1677 if (!capable(CAP_SYS_ADMIN))
1680 ret = mnt_want_write_file(file);
1684 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1685 mnt_drop_write_file(file);
1686 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1689 vol_args = memdup_user(arg, sizeof(*vol_args));
1690 if (IS_ERR(vol_args)) {
1691 ret = PTR_ERR(vol_args);
1695 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1697 sizestr = vol_args->name;
1698 devstr = strchr(sizestr, ':');
1700 sizestr = devstr + 1;
1702 devstr = vol_args->name;
1703 ret = kstrtoull(devstr, 10, &devid);
1710 btrfs_info(fs_info, "resizing devid %llu", devid);
1713 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
1715 btrfs_info(fs_info, "resizer unable to find device %llu",
1721 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1723 "resizer unable to apply on readonly device %llu",
1729 if (!strcmp(sizestr, "max"))
1730 new_size = device->bdev->bd_inode->i_size;
1732 if (sizestr[0] == '-') {
1735 } else if (sizestr[0] == '+') {
1739 new_size = memparse(sizestr, &retptr);
1740 if (*retptr != '\0' || new_size == 0) {
1746 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1751 old_size = btrfs_device_get_total_bytes(device);
1754 if (new_size > old_size) {
1758 new_size = old_size - new_size;
1759 } else if (mod > 0) {
1760 if (new_size > ULLONG_MAX - old_size) {
1764 new_size = old_size + new_size;
1767 if (new_size < SZ_256M) {
1771 if (new_size > device->bdev->bd_inode->i_size) {
1776 new_size = round_down(new_size, fs_info->sectorsize);
1778 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1779 rcu_str_deref(device->name), new_size);
1781 if (new_size > old_size) {
1782 trans = btrfs_start_transaction(root, 0);
1783 if (IS_ERR(trans)) {
1784 ret = PTR_ERR(trans);
1787 ret = btrfs_grow_device(trans, device, new_size);
1788 btrfs_commit_transaction(trans);
1789 } else if (new_size < old_size) {
1790 ret = btrfs_shrink_device(device, new_size);
1791 } /* equal, nothing need to do */
1796 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1797 mnt_drop_write_file(file);
1801 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1802 const char *name, unsigned long fd, int subvol,
1803 u64 *transid, bool readonly,
1804 struct btrfs_qgroup_inherit *inherit)
1809 if (!S_ISDIR(file_inode(file)->i_mode))
1812 ret = mnt_want_write_file(file);
1816 namelen = strlen(name);
1817 if (strchr(name, '/')) {
1819 goto out_drop_write;
1822 if (name[0] == '.' &&
1823 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1825 goto out_drop_write;
1829 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1830 NULL, transid, readonly, inherit);
1832 struct fd src = fdget(fd);
1833 struct inode *src_inode;
1836 goto out_drop_write;
1839 src_inode = file_inode(src.file);
1840 if (src_inode->i_sb != file_inode(file)->i_sb) {
1841 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1842 "Snapshot src from another FS");
1844 } else if (!inode_owner_or_capable(src_inode)) {
1846 * Subvolume creation is not restricted, but snapshots
1847 * are limited to own subvolumes only
1851 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1852 BTRFS_I(src_inode)->root,
1853 transid, readonly, inherit);
1858 mnt_drop_write_file(file);
1863 static noinline int btrfs_ioctl_snap_create(struct file *file,
1864 void __user *arg, int subvol)
1866 struct btrfs_ioctl_vol_args *vol_args;
1869 if (!S_ISDIR(file_inode(file)->i_mode))
1872 vol_args = memdup_user(arg, sizeof(*vol_args));
1873 if (IS_ERR(vol_args))
1874 return PTR_ERR(vol_args);
1875 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1877 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1878 vol_args->fd, subvol,
1885 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1886 void __user *arg, int subvol)
1888 struct btrfs_ioctl_vol_args_v2 *vol_args;
1892 bool readonly = false;
1893 struct btrfs_qgroup_inherit *inherit = NULL;
1895 if (!S_ISDIR(file_inode(file)->i_mode))
1898 vol_args = memdup_user(arg, sizeof(*vol_args));
1899 if (IS_ERR(vol_args))
1900 return PTR_ERR(vol_args);
1901 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1903 if (vol_args->flags &
1904 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1905 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1910 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1911 struct inode *inode = file_inode(file);
1912 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1915 "SNAP_CREATE_V2 ioctl with CREATE_ASYNC is deprecated and will be removed in kernel 5.7");
1919 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1921 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1924 if (vol_args->size < sizeof(*inherit) ||
1925 vol_args->size > PAGE_SIZE) {
1929 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1930 if (IS_ERR(inherit)) {
1931 ret = PTR_ERR(inherit);
1935 if (inherit->num_qgroups > PAGE_SIZE ||
1936 inherit->num_ref_copies > PAGE_SIZE ||
1937 inherit->num_excl_copies > PAGE_SIZE) {
1942 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1943 2 * inherit->num_excl_copies;
1944 if (vol_args->size != struct_size(inherit, qgroups, nums)) {
1950 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1951 vol_args->fd, subvol, ptr,
1956 if (ptr && copy_to_user(arg +
1957 offsetof(struct btrfs_ioctl_vol_args_v2,
1969 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1972 struct inode *inode = file_inode(file);
1973 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1974 struct btrfs_root *root = BTRFS_I(inode)->root;
1978 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1981 down_read(&fs_info->subvol_sem);
1982 if (btrfs_root_readonly(root))
1983 flags |= BTRFS_SUBVOL_RDONLY;
1984 up_read(&fs_info->subvol_sem);
1986 if (copy_to_user(arg, &flags, sizeof(flags)))
1992 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1995 struct inode *inode = file_inode(file);
1996 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1997 struct btrfs_root *root = BTRFS_I(inode)->root;
1998 struct btrfs_trans_handle *trans;
2003 if (!inode_owner_or_capable(inode))
2006 ret = mnt_want_write_file(file);
2010 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2012 goto out_drop_write;
2015 if (copy_from_user(&flags, arg, sizeof(flags))) {
2017 goto out_drop_write;
2020 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
2022 goto out_drop_write;
2025 if (flags & ~BTRFS_SUBVOL_RDONLY) {
2027 goto out_drop_write;
2030 down_write(&fs_info->subvol_sem);
2033 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
2036 root_flags = btrfs_root_flags(&root->root_item);
2037 if (flags & BTRFS_SUBVOL_RDONLY) {
2038 btrfs_set_root_flags(&root->root_item,
2039 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
2042 * Block RO -> RW transition if this subvolume is involved in
2045 spin_lock(&root->root_item_lock);
2046 if (root->send_in_progress == 0) {
2047 btrfs_set_root_flags(&root->root_item,
2048 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
2049 spin_unlock(&root->root_item_lock);
2051 spin_unlock(&root->root_item_lock);
2053 "Attempt to set subvolume %llu read-write during send",
2054 root->root_key.objectid);
2060 trans = btrfs_start_transaction(root, 1);
2061 if (IS_ERR(trans)) {
2062 ret = PTR_ERR(trans);
2066 ret = btrfs_update_root(trans, fs_info->tree_root,
2067 &root->root_key, &root->root_item);
2069 btrfs_end_transaction(trans);
2073 ret = btrfs_commit_transaction(trans);
2077 btrfs_set_root_flags(&root->root_item, root_flags);
2079 up_write(&fs_info->subvol_sem);
2081 mnt_drop_write_file(file);
2086 static noinline int key_in_sk(struct btrfs_key *key,
2087 struct btrfs_ioctl_search_key *sk)
2089 struct btrfs_key test;
2092 test.objectid = sk->min_objectid;
2093 test.type = sk->min_type;
2094 test.offset = sk->min_offset;
2096 ret = btrfs_comp_cpu_keys(key, &test);
2100 test.objectid = sk->max_objectid;
2101 test.type = sk->max_type;
2102 test.offset = sk->max_offset;
2104 ret = btrfs_comp_cpu_keys(key, &test);
2110 static noinline int copy_to_sk(struct btrfs_path *path,
2111 struct btrfs_key *key,
2112 struct btrfs_ioctl_search_key *sk,
2115 unsigned long *sk_offset,
2119 struct extent_buffer *leaf;
2120 struct btrfs_ioctl_search_header sh;
2121 struct btrfs_key test;
2122 unsigned long item_off;
2123 unsigned long item_len;
2129 leaf = path->nodes[0];
2130 slot = path->slots[0];
2131 nritems = btrfs_header_nritems(leaf);
2133 if (btrfs_header_generation(leaf) > sk->max_transid) {
2137 found_transid = btrfs_header_generation(leaf);
2139 for (i = slot; i < nritems; i++) {
2140 item_off = btrfs_item_ptr_offset(leaf, i);
2141 item_len = btrfs_item_size_nr(leaf, i);
2143 btrfs_item_key_to_cpu(leaf, key, i);
2144 if (!key_in_sk(key, sk))
2147 if (sizeof(sh) + item_len > *buf_size) {
2154 * return one empty item back for v1, which does not
2158 *buf_size = sizeof(sh) + item_len;
2163 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2168 sh.objectid = key->objectid;
2169 sh.offset = key->offset;
2170 sh.type = key->type;
2172 sh.transid = found_transid;
2175 * Copy search result header. If we fault then loop again so we
2176 * can fault in the pages and -EFAULT there if there's a
2177 * problem. Otherwise we'll fault and then copy the buffer in
2178 * properly this next time through
2180 if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
2185 *sk_offset += sizeof(sh);
2188 char __user *up = ubuf + *sk_offset;
2190 * Copy the item, same behavior as above, but reset the
2191 * * sk_offset so we copy the full thing again.
2193 if (read_extent_buffer_to_user_nofault(leaf, up,
2194 item_off, item_len)) {
2196 *sk_offset -= sizeof(sh);
2200 *sk_offset += item_len;
2204 if (ret) /* -EOVERFLOW from above */
2207 if (*num_found >= sk->nr_items) {
2214 test.objectid = sk->max_objectid;
2215 test.type = sk->max_type;
2216 test.offset = sk->max_offset;
2217 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2219 else if (key->offset < (u64)-1)
2221 else if (key->type < (u8)-1) {
2224 } else if (key->objectid < (u64)-1) {
2232 * 0: all items from this leaf copied, continue with next
2233 * 1: * more items can be copied, but unused buffer is too small
2234 * * all items were found
2235 * Either way, it will stops the loop which iterates to the next
2237 * -EOVERFLOW: item was to large for buffer
2238 * -EFAULT: could not copy extent buffer back to userspace
2243 static noinline int search_ioctl(struct inode *inode,
2244 struct btrfs_ioctl_search_key *sk,
2248 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2249 struct btrfs_root *root;
2250 struct btrfs_key key;
2251 struct btrfs_path *path;
2254 unsigned long sk_offset = 0;
2256 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2257 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2261 path = btrfs_alloc_path();
2265 if (sk->tree_id == 0) {
2266 /* search the root of the inode that was passed */
2267 root = BTRFS_I(inode)->root;
2269 key.objectid = sk->tree_id;
2270 key.type = BTRFS_ROOT_ITEM_KEY;
2271 key.offset = (u64)-1;
2272 root = btrfs_read_fs_root_no_name(info, &key);
2274 btrfs_free_path(path);
2275 return PTR_ERR(root);
2279 key.objectid = sk->min_objectid;
2280 key.type = sk->min_type;
2281 key.offset = sk->min_offset;
2284 ret = fault_in_pages_writeable(ubuf + sk_offset,
2285 *buf_size - sk_offset);
2289 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2295 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2296 &sk_offset, &num_found);
2297 btrfs_release_path(path);
2305 sk->nr_items = num_found;
2306 btrfs_free_path(path);
2310 static noinline int btrfs_ioctl_tree_search(struct file *file,
2313 struct btrfs_ioctl_search_args __user *uargs;
2314 struct btrfs_ioctl_search_key sk;
2315 struct inode *inode;
2319 if (!capable(CAP_SYS_ADMIN))
2322 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2324 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2327 buf_size = sizeof(uargs->buf);
2329 inode = file_inode(file);
2330 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2333 * In the origin implementation an overflow is handled by returning a
2334 * search header with a len of zero, so reset ret.
2336 if (ret == -EOVERFLOW)
2339 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2344 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2347 struct btrfs_ioctl_search_args_v2 __user *uarg;
2348 struct btrfs_ioctl_search_args_v2 args;
2349 struct inode *inode;
2352 const size_t buf_limit = SZ_16M;
2354 if (!capable(CAP_SYS_ADMIN))
2357 /* copy search header and buffer size */
2358 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2359 if (copy_from_user(&args, uarg, sizeof(args)))
2362 buf_size = args.buf_size;
2364 /* limit result size to 16MB */
2365 if (buf_size > buf_limit)
2366 buf_size = buf_limit;
2368 inode = file_inode(file);
2369 ret = search_ioctl(inode, &args.key, &buf_size,
2370 (char __user *)(&uarg->buf[0]));
2371 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2373 else if (ret == -EOVERFLOW &&
2374 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2381 * Search INODE_REFs to identify path name of 'dirid' directory
2382 * in a 'tree_id' tree. and sets path name to 'name'.
2384 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2385 u64 tree_id, u64 dirid, char *name)
2387 struct btrfs_root *root;
2388 struct btrfs_key key;
2394 struct btrfs_inode_ref *iref;
2395 struct extent_buffer *l;
2396 struct btrfs_path *path;
2398 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2403 path = btrfs_alloc_path();
2407 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2409 key.objectid = tree_id;
2410 key.type = BTRFS_ROOT_ITEM_KEY;
2411 key.offset = (u64)-1;
2412 root = btrfs_read_fs_root_no_name(info, &key);
2414 ret = PTR_ERR(root);
2418 key.objectid = dirid;
2419 key.type = BTRFS_INODE_REF_KEY;
2420 key.offset = (u64)-1;
2423 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2427 ret = btrfs_previous_item(root, path, dirid,
2428 BTRFS_INODE_REF_KEY);
2438 slot = path->slots[0];
2439 btrfs_item_key_to_cpu(l, &key, slot);
2441 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2442 len = btrfs_inode_ref_name_len(l, iref);
2444 total_len += len + 1;
2446 ret = -ENAMETOOLONG;
2451 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2453 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2456 btrfs_release_path(path);
2457 key.objectid = key.offset;
2458 key.offset = (u64)-1;
2459 dirid = key.objectid;
2461 memmove(name, ptr, total_len);
2462 name[total_len] = '\0';
2465 btrfs_free_path(path);
2469 static int btrfs_search_path_in_tree_user(struct inode *inode,
2470 struct btrfs_ioctl_ino_lookup_user_args *args)
2472 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2473 struct super_block *sb = inode->i_sb;
2474 struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2475 u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2476 u64 dirid = args->dirid;
2477 unsigned long item_off;
2478 unsigned long item_len;
2479 struct btrfs_inode_ref *iref;
2480 struct btrfs_root_ref *rref;
2481 struct btrfs_root *root;
2482 struct btrfs_path *path;
2483 struct btrfs_key key, key2;
2484 struct extent_buffer *leaf;
2485 struct inode *temp_inode;
2492 path = btrfs_alloc_path();
2497 * If the bottom subvolume does not exist directly under upper_limit,
2498 * construct the path in from the bottom up.
2500 if (dirid != upper_limit.objectid) {
2501 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2503 key.objectid = treeid;
2504 key.type = BTRFS_ROOT_ITEM_KEY;
2505 key.offset = (u64)-1;
2506 root = btrfs_read_fs_root_no_name(fs_info, &key);
2508 ret = PTR_ERR(root);
2512 key.objectid = dirid;
2513 key.type = BTRFS_INODE_REF_KEY;
2514 key.offset = (u64)-1;
2516 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2519 } else if (ret > 0) {
2520 ret = btrfs_previous_item(root, path, dirid,
2521 BTRFS_INODE_REF_KEY);
2524 } else if (ret > 0) {
2530 leaf = path->nodes[0];
2531 slot = path->slots[0];
2532 btrfs_item_key_to_cpu(leaf, &key, slot);
2534 iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2535 len = btrfs_inode_ref_name_len(leaf, iref);
2537 total_len += len + 1;
2538 if (ptr < args->path) {
2539 ret = -ENAMETOOLONG;
2544 read_extent_buffer(leaf, ptr,
2545 (unsigned long)(iref + 1), len);
2547 /* Check the read+exec permission of this directory */
2548 ret = btrfs_previous_item(root, path, dirid,
2549 BTRFS_INODE_ITEM_KEY);
2552 } else if (ret > 0) {
2557 leaf = path->nodes[0];
2558 slot = path->slots[0];
2559 btrfs_item_key_to_cpu(leaf, &key2, slot);
2560 if (key2.objectid != dirid) {
2565 temp_inode = btrfs_iget(sb, &key2, root, NULL);
2566 if (IS_ERR(temp_inode)) {
2567 ret = PTR_ERR(temp_inode);
2570 ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
2577 if (key.offset == upper_limit.objectid)
2579 if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2584 btrfs_release_path(path);
2585 key.objectid = key.offset;
2586 key.offset = (u64)-1;
2587 dirid = key.objectid;
2590 memmove(args->path, ptr, total_len);
2591 args->path[total_len] = '\0';
2592 btrfs_release_path(path);
2595 /* Get the bottom subvolume's name from ROOT_REF */
2596 root = fs_info->tree_root;
2597 key.objectid = treeid;
2598 key.type = BTRFS_ROOT_REF_KEY;
2599 key.offset = args->treeid;
2600 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2603 } else if (ret > 0) {
2608 leaf = path->nodes[0];
2609 slot = path->slots[0];
2610 btrfs_item_key_to_cpu(leaf, &key, slot);
2612 item_off = btrfs_item_ptr_offset(leaf, slot);
2613 item_len = btrfs_item_size_nr(leaf, slot);
2614 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2615 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2616 if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2621 /* Copy subvolume's name */
2622 item_off += sizeof(struct btrfs_root_ref);
2623 item_len -= sizeof(struct btrfs_root_ref);
2624 read_extent_buffer(leaf, args->name, item_off, item_len);
2625 args->name[item_len] = 0;
2628 btrfs_free_path(path);
2632 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2635 struct btrfs_ioctl_ino_lookup_args *args;
2636 struct inode *inode;
2639 args = memdup_user(argp, sizeof(*args));
2641 return PTR_ERR(args);
2643 inode = file_inode(file);
2646 * Unprivileged query to obtain the containing subvolume root id. The
2647 * path is reset so it's consistent with btrfs_search_path_in_tree.
2649 if (args->treeid == 0)
2650 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2652 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2657 if (!capable(CAP_SYS_ADMIN)) {
2662 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2663 args->treeid, args->objectid,
2667 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2675 * Version of ino_lookup ioctl (unprivileged)
2677 * The main differences from ino_lookup ioctl are:
2679 * 1. Read + Exec permission will be checked using inode_permission() during
2680 * path construction. -EACCES will be returned in case of failure.
2681 * 2. Path construction will be stopped at the inode number which corresponds
2682 * to the fd with which this ioctl is called. If constructed path does not
2683 * exist under fd's inode, -EACCES will be returned.
2684 * 3. The name of bottom subvolume is also searched and filled.
2686 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2688 struct btrfs_ioctl_ino_lookup_user_args *args;
2689 struct inode *inode;
2692 args = memdup_user(argp, sizeof(*args));
2694 return PTR_ERR(args);
2696 inode = file_inode(file);
2698 if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2699 BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2701 * The subvolume does not exist under fd with which this is
2708 ret = btrfs_search_path_in_tree_user(inode, args);
2710 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2717 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
2718 static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
2720 struct btrfs_ioctl_get_subvol_info_args *subvol_info;
2721 struct btrfs_fs_info *fs_info;
2722 struct btrfs_root *root;
2723 struct btrfs_path *path;
2724 struct btrfs_key key;
2725 struct btrfs_root_item *root_item;
2726 struct btrfs_root_ref *rref;
2727 struct extent_buffer *leaf;
2728 unsigned long item_off;
2729 unsigned long item_len;
2730 struct inode *inode;
2734 path = btrfs_alloc_path();
2738 subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
2740 btrfs_free_path(path);
2744 inode = file_inode(file);
2745 fs_info = BTRFS_I(inode)->root->fs_info;
2747 /* Get root_item of inode's subvolume */
2748 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
2749 key.type = BTRFS_ROOT_ITEM_KEY;
2750 key.offset = (u64)-1;
2751 root = btrfs_read_fs_root_no_name(fs_info, &key);
2753 ret = PTR_ERR(root);
2756 root_item = &root->root_item;
2758 subvol_info->treeid = key.objectid;
2760 subvol_info->generation = btrfs_root_generation(root_item);
2761 subvol_info->flags = btrfs_root_flags(root_item);
2763 memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
2764 memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
2766 memcpy(subvol_info->received_uuid, root_item->received_uuid,
2769 subvol_info->ctransid = btrfs_root_ctransid(root_item);
2770 subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
2771 subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
2773 subvol_info->otransid = btrfs_root_otransid(root_item);
2774 subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
2775 subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
2777 subvol_info->stransid = btrfs_root_stransid(root_item);
2778 subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
2779 subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
2781 subvol_info->rtransid = btrfs_root_rtransid(root_item);
2782 subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
2783 subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
2785 if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
2786 /* Search root tree for ROOT_BACKREF of this subvolume */
2787 root = fs_info->tree_root;
2789 key.type = BTRFS_ROOT_BACKREF_KEY;
2791 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2794 } else if (path->slots[0] >=
2795 btrfs_header_nritems(path->nodes[0])) {
2796 ret = btrfs_next_leaf(root, path);
2799 } else if (ret > 0) {
2805 leaf = path->nodes[0];
2806 slot = path->slots[0];
2807 btrfs_item_key_to_cpu(leaf, &key, slot);
2808 if (key.objectid == subvol_info->treeid &&
2809 key.type == BTRFS_ROOT_BACKREF_KEY) {
2810 subvol_info->parent_id = key.offset;
2812 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2813 subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
2815 item_off = btrfs_item_ptr_offset(leaf, slot)
2816 + sizeof(struct btrfs_root_ref);
2817 item_len = btrfs_item_size_nr(leaf, slot)
2818 - sizeof(struct btrfs_root_ref);
2819 read_extent_buffer(leaf, subvol_info->name,
2820 item_off, item_len);
2827 btrfs_free_path(path);
2829 if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
2833 btrfs_free_path(path);
2834 kzfree(subvol_info);
2839 * Return ROOT_REF information of the subvolume containing this inode
2840 * except the subvolume name.
2842 static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
2844 struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
2845 struct btrfs_root_ref *rref;
2846 struct btrfs_root *root;
2847 struct btrfs_path *path;
2848 struct btrfs_key key;
2849 struct extent_buffer *leaf;
2850 struct inode *inode;
2856 path = btrfs_alloc_path();
2860 rootrefs = memdup_user(argp, sizeof(*rootrefs));
2861 if (IS_ERR(rootrefs)) {
2862 btrfs_free_path(path);
2863 return PTR_ERR(rootrefs);
2866 inode = file_inode(file);
2867 root = BTRFS_I(inode)->root->fs_info->tree_root;
2868 objectid = BTRFS_I(inode)->root->root_key.objectid;
2870 key.objectid = objectid;
2871 key.type = BTRFS_ROOT_REF_KEY;
2872 key.offset = rootrefs->min_treeid;
2875 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2878 } else if (path->slots[0] >=
2879 btrfs_header_nritems(path->nodes[0])) {
2880 ret = btrfs_next_leaf(root, path);
2883 } else if (ret > 0) {
2889 leaf = path->nodes[0];
2890 slot = path->slots[0];
2892 btrfs_item_key_to_cpu(leaf, &key, slot);
2893 if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
2898 if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
2903 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2904 rootrefs->rootref[found].treeid = key.offset;
2905 rootrefs->rootref[found].dirid =
2906 btrfs_root_ref_dirid(leaf, rref);
2909 ret = btrfs_next_item(root, path);
2912 } else if (ret > 0) {
2919 btrfs_free_path(path);
2921 if (!ret || ret == -EOVERFLOW) {
2922 rootrefs->num_items = found;
2923 /* update min_treeid for next search */
2925 rootrefs->min_treeid =
2926 rootrefs->rootref[found - 1].treeid + 1;
2927 if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
2936 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2939 struct dentry *parent = file->f_path.dentry;
2940 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2941 struct dentry *dentry;
2942 struct inode *dir = d_inode(parent);
2943 struct inode *inode;
2944 struct btrfs_root *root = BTRFS_I(dir)->root;
2945 struct btrfs_root *dest = NULL;
2946 struct btrfs_ioctl_vol_args *vol_args;
2950 if (!S_ISDIR(dir->i_mode))
2953 vol_args = memdup_user(arg, sizeof(*vol_args));
2954 if (IS_ERR(vol_args))
2955 return PTR_ERR(vol_args);
2957 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2958 namelen = strlen(vol_args->name);
2959 if (strchr(vol_args->name, '/') ||
2960 strncmp(vol_args->name, "..", namelen) == 0) {
2965 err = mnt_want_write_file(file);
2970 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2972 goto out_drop_write;
2973 dentry = lookup_one_len(vol_args->name, parent, namelen);
2974 if (IS_ERR(dentry)) {
2975 err = PTR_ERR(dentry);
2976 goto out_unlock_dir;
2979 if (d_really_is_negative(dentry)) {
2984 inode = d_inode(dentry);
2985 dest = BTRFS_I(inode)->root;
2986 if (!capable(CAP_SYS_ADMIN)) {
2988 * Regular user. Only allow this with a special mount
2989 * option, when the user has write+exec access to the
2990 * subvol root, and when rmdir(2) would have been
2993 * Note that this is _not_ check that the subvol is
2994 * empty or doesn't contain data that we wouldn't
2995 * otherwise be able to delete.
2997 * Users who want to delete empty subvols should try
3001 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3005 * Do not allow deletion if the parent dir is the same
3006 * as the dir to be deleted. That means the ioctl
3007 * must be called on the dentry referencing the root
3008 * of the subvol, not a random directory contained
3015 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
3020 /* check if subvolume may be deleted by a user */
3021 err = btrfs_may_delete(dir, dentry, 1);
3025 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3031 err = btrfs_delete_subvolume(dir, dentry);
3032 inode_unlock(inode);
3034 d_delete_notify(dir, dentry);
3041 mnt_drop_write_file(file);
3047 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
3049 struct inode *inode = file_inode(file);
3050 struct btrfs_root *root = BTRFS_I(inode)->root;
3051 struct btrfs_ioctl_defrag_range_args *range;
3054 ret = mnt_want_write_file(file);
3058 if (btrfs_root_readonly(root)) {
3063 switch (inode->i_mode & S_IFMT) {
3065 if (!capable(CAP_SYS_ADMIN)) {
3069 ret = btrfs_defrag_root(root);
3073 * Note that this does not check the file descriptor for write
3074 * access. This prevents defragmenting executables that are
3075 * running and allows defrag on files open in read-only mode.
3077 if (!capable(CAP_SYS_ADMIN) &&
3078 inode_permission(inode, MAY_WRITE)) {
3083 range = kzalloc(sizeof(*range), GFP_KERNEL);
3090 if (copy_from_user(range, argp,
3096 /* compression requires us to start the IO */
3097 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
3098 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
3099 range->extent_thresh = (u32)-1;
3102 /* the rest are all set to zero by kzalloc */
3103 range->len = (u64)-1;
3105 ret = btrfs_defrag_file(file_inode(file), file,
3106 range, BTRFS_OLDEST_GENERATION, 0);
3115 mnt_drop_write_file(file);
3119 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
3121 struct btrfs_ioctl_vol_args *vol_args;
3124 if (!capable(CAP_SYS_ADMIN))
3127 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
3128 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3130 vol_args = memdup_user(arg, sizeof(*vol_args));
3131 if (IS_ERR(vol_args)) {
3132 ret = PTR_ERR(vol_args);
3136 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3137 ret = btrfs_init_new_device(fs_info, vol_args->name);
3140 btrfs_info(fs_info, "disk added %s", vol_args->name);
3144 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3148 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3150 struct inode *inode = file_inode(file);
3151 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3152 struct btrfs_ioctl_vol_args_v2 *vol_args;
3155 if (!capable(CAP_SYS_ADMIN))
3158 ret = mnt_want_write_file(file);
3162 vol_args = memdup_user(arg, sizeof(*vol_args));
3163 if (IS_ERR(vol_args)) {
3164 ret = PTR_ERR(vol_args);
3168 /* Check for compatibility reject unknown flags */
3169 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
3174 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3175 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3179 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3180 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
3182 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3183 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3185 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3188 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3189 btrfs_info(fs_info, "device deleted: id %llu",
3192 btrfs_info(fs_info, "device deleted: %s",
3198 mnt_drop_write_file(file);
3202 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3204 struct inode *inode = file_inode(file);
3205 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3206 struct btrfs_ioctl_vol_args *vol_args;
3209 if (!capable(CAP_SYS_ADMIN))
3212 ret = mnt_want_write_file(file);
3216 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3217 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3218 goto out_drop_write;
3221 vol_args = memdup_user(arg, sizeof(*vol_args));
3222 if (IS_ERR(vol_args)) {
3223 ret = PTR_ERR(vol_args);
3227 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3228 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3231 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3234 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3236 mnt_drop_write_file(file);
3241 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3244 struct btrfs_ioctl_fs_info_args *fi_args;
3245 struct btrfs_device *device;
3246 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3249 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
3254 fi_args->num_devices = fs_devices->num_devices;
3256 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3257 if (device->devid > fi_args->max_id)
3258 fi_args->max_id = device->devid;
3262 memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3263 fi_args->nodesize = fs_info->nodesize;
3264 fi_args->sectorsize = fs_info->sectorsize;
3265 fi_args->clone_alignment = fs_info->sectorsize;
3267 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3274 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3277 struct btrfs_ioctl_dev_info_args *di_args;
3278 struct btrfs_device *dev;
3280 char *s_uuid = NULL;
3282 di_args = memdup_user(arg, sizeof(*di_args));
3283 if (IS_ERR(di_args))
3284 return PTR_ERR(di_args);
3286 if (!btrfs_is_empty_uuid(di_args->uuid))
3287 s_uuid = di_args->uuid;
3290 dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
3298 di_args->devid = dev->devid;
3299 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3300 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3301 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3303 strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path));
3305 di_args->path[0] = '\0';
3309 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3316 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3317 struct inode *inode2, u64 loff2, u64 len)
3319 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3320 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3323 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3324 struct inode *inode2, u64 loff2, u64 len)
3326 if (inode1 < inode2) {
3327 swap(inode1, inode2);
3329 } else if (inode1 == inode2 && loff2 < loff1) {
3332 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3333 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3336 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
3337 struct inode *dst, u64 dst_loff)
3339 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3343 * Lock destination range to serialize with concurrent readpages() and
3344 * source range to serialize with relocation.
3346 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
3347 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
3348 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3353 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3355 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3356 struct inode *dst, u64 dst_loff)
3359 u64 i, tail_len, chunk_count;
3360 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
3362 spin_lock(&root_dst->root_item_lock);
3363 if (root_dst->send_in_progress) {
3364 btrfs_warn_rl(root_dst->fs_info,
3365 "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
3366 root_dst->root_key.objectid,
3367 root_dst->send_in_progress);
3368 spin_unlock(&root_dst->root_item_lock);
3371 root_dst->dedupe_in_progress++;
3372 spin_unlock(&root_dst->root_item_lock);
3374 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
3375 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
3377 for (i = 0; i < chunk_count; i++) {
3378 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
3383 loff += BTRFS_MAX_DEDUPE_LEN;
3384 dst_loff += BTRFS_MAX_DEDUPE_LEN;
3388 ret = btrfs_extent_same_range(src, loff, tail_len, dst,
3391 spin_lock(&root_dst->root_item_lock);
3392 root_dst->dedupe_in_progress--;
3393 spin_unlock(&root_dst->root_item_lock);
3398 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3399 struct inode *inode,
3405 struct btrfs_root *root = BTRFS_I(inode)->root;
3408 inode_inc_iversion(inode);
3409 if (!no_time_update)
3410 inode->i_mtime = inode->i_ctime = current_time(inode);
3412 * We round up to the block size at eof when determining which
3413 * extents to clone above, but shouldn't round up the file size.
3415 if (endoff > destoff + olen)
3416 endoff = destoff + olen;
3417 if (endoff > inode->i_size)
3418 btrfs_i_size_write(BTRFS_I(inode), endoff);
3420 ret = btrfs_update_inode(trans, root, inode);
3422 btrfs_abort_transaction(trans, ret);
3423 btrfs_end_transaction(trans);
3426 ret = btrfs_end_transaction(trans);
3432 * Make sure we do not end up inserting an inline extent into a file that has
3433 * already other (non-inline) extents. If a file has an inline extent it can
3434 * not have any other extents and the (single) inline extent must start at the
3435 * file offset 0. Failing to respect these rules will lead to file corruption,
3436 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3438 * We can have extents that have been already written to disk or we can have
3439 * dirty ranges still in delalloc, in which case the extent maps and items are
3440 * created only when we run delalloc, and the delalloc ranges might fall outside
3441 * the range we are currently locking in the inode's io tree. So we check the
3442 * inode's i_size because of that (i_size updates are done while holding the
3443 * i_mutex, which we are holding here).
3444 * We also check to see if the inode has a size not greater than "datal" but has
3445 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3446 * protected against such concurrent fallocate calls by the i_mutex).
3448 * If the file has no extents but a size greater than datal, do not allow the
3449 * copy because we would need turn the inline extent into a non-inline one (even
3450 * with NO_HOLES enabled). If we find our destination inode only has one inline
3451 * extent, just overwrite it with the source inline extent if its size is less
3452 * than the source extent's size, or we could copy the source inline extent's
3453 * data into the destination inode's inline extent if the later is greater then
3456 static int clone_copy_inline_extent(struct inode *dst,
3457 struct btrfs_trans_handle *trans,
3458 struct btrfs_path *path,
3459 struct btrfs_key *new_key,
3460 const u64 drop_start,
3466 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3467 struct btrfs_root *root = BTRFS_I(dst)->root;
3468 const u64 aligned_end = ALIGN(new_key->offset + datal,
3469 fs_info->sectorsize);
3471 struct btrfs_key key;
3473 if (new_key->offset > 0)
3476 key.objectid = btrfs_ino(BTRFS_I(dst));
3477 key.type = BTRFS_EXTENT_DATA_KEY;
3479 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3482 } else if (ret > 0) {
3483 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3484 ret = btrfs_next_leaf(root, path);
3488 goto copy_inline_extent;
3490 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3491 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3492 key.type == BTRFS_EXTENT_DATA_KEY) {
3493 ASSERT(key.offset > 0);
3496 } else if (i_size_read(dst) <= datal) {
3497 struct btrfs_file_extent_item *ei;
3501 * If the file size is <= datal, make sure there are no other
3502 * extents following (can happen do to an fallocate call with
3503 * the flag FALLOC_FL_KEEP_SIZE).
3505 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3506 struct btrfs_file_extent_item);
3508 * If it's an inline extent, it can not have other extents
3511 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3512 BTRFS_FILE_EXTENT_INLINE)
3513 goto copy_inline_extent;
3515 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3516 if (ext_len > aligned_end)
3519 ret = btrfs_next_item(root, path);
3522 } else if (ret == 0) {
3523 btrfs_item_key_to_cpu(path->nodes[0], &key,
3525 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3526 key.type == BTRFS_EXTENT_DATA_KEY)
3533 * We have no extent items, or we have an extent at offset 0 which may
3534 * or may not be inlined. All these cases are dealt the same way.
3536 if (i_size_read(dst) > datal) {
3538 * If the destination inode has an inline extent...
3539 * This would require copying the data from the source inline
3540 * extent into the beginning of the destination's inline extent.
3541 * But this is really complex, both extents can be compressed
3542 * or just one of them, which would require decompressing and
3543 * re-compressing data (which could increase the new compressed
3544 * size, not allowing the compressed data to fit anymore in an
3546 * So just don't support this case for now (it should be rare,
3547 * we are not really saving space when cloning inline extents).
3552 btrfs_release_path(path);
3553 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3556 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3561 const u32 start = btrfs_file_extent_calc_inline_size(0);
3563 memmove(inline_data + start, inline_data + start + skip, datal);
3566 write_extent_buffer(path->nodes[0], inline_data,
3567 btrfs_item_ptr_offset(path->nodes[0],
3570 inode_add_bytes(dst, datal);
3571 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
3577 * btrfs_clone() - clone a range from inode file to another
3579 * @src: Inode to clone from
3580 * @inode: Inode to clone to
3581 * @off: Offset within source to start clone from
3582 * @olen: Original length, passed by user, of range to clone
3583 * @olen_aligned: Block-aligned value of olen
3584 * @destoff: Offset within @inode to start clone
3585 * @no_time_update: Whether to update mtime/ctime on the target inode
3587 static int btrfs_clone(struct inode *src, struct inode *inode,
3588 const u64 off, const u64 olen, const u64 olen_aligned,
3589 const u64 destoff, int no_time_update)
3591 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3592 struct btrfs_root *root = BTRFS_I(inode)->root;
3593 struct btrfs_path *path = NULL;
3594 struct extent_buffer *leaf;
3595 struct btrfs_trans_handle *trans;
3597 struct btrfs_key key;
3601 const u64 len = olen_aligned;
3602 u64 last_dest_end = destoff;
3605 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3609 path = btrfs_alloc_path();
3615 path->reada = READA_FORWARD;
3617 key.objectid = btrfs_ino(BTRFS_I(src));
3618 key.type = BTRFS_EXTENT_DATA_KEY;
3622 u64 next_key_min_offset = key.offset + 1;
3623 struct btrfs_file_extent_item *extent;
3626 struct btrfs_key new_key;
3627 u64 disko = 0, diskl = 0;
3628 u64 datao = 0, datal = 0;
3633 * note the key will change type as we walk through the
3636 path->leave_spinning = 1;
3637 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3642 * First search, if no extent item that starts at offset off was
3643 * found but the previous item is an extent item, it's possible
3644 * it might overlap our target range, therefore process it.
3646 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3647 btrfs_item_key_to_cpu(path->nodes[0], &key,
3648 path->slots[0] - 1);
3649 if (key.type == BTRFS_EXTENT_DATA_KEY)
3653 nritems = btrfs_header_nritems(path->nodes[0]);
3655 if (path->slots[0] >= nritems) {
3656 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3661 nritems = btrfs_header_nritems(path->nodes[0]);
3663 leaf = path->nodes[0];
3664 slot = path->slots[0];
3666 btrfs_item_key_to_cpu(leaf, &key, slot);
3667 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3668 key.objectid != btrfs_ino(BTRFS_I(src)))
3671 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
3673 extent = btrfs_item_ptr(leaf, slot,
3674 struct btrfs_file_extent_item);
3675 comp = btrfs_file_extent_compression(leaf, extent);
3676 type = btrfs_file_extent_type(leaf, extent);
3677 if (type == BTRFS_FILE_EXTENT_REG ||
3678 type == BTRFS_FILE_EXTENT_PREALLOC) {
3679 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
3680 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
3681 datao = btrfs_file_extent_offset(leaf, extent);
3682 datal = btrfs_file_extent_num_bytes(leaf, extent);
3683 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3684 /* Take upper bound, may be compressed */
3685 datal = btrfs_file_extent_ram_bytes(leaf, extent);
3689 * The first search might have left us at an extent item that
3690 * ends before our target range's start, can happen if we have
3691 * holes and NO_HOLES feature enabled.
3693 if (key.offset + datal <= off) {
3696 } else if (key.offset >= off + len) {
3699 next_key_min_offset = key.offset + datal;
3700 size = btrfs_item_size_nr(leaf, slot);
3701 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
3704 btrfs_release_path(path);
3705 path->leave_spinning = 0;
3707 memcpy(&new_key, &key, sizeof(new_key));
3708 new_key.objectid = btrfs_ino(BTRFS_I(inode));
3709 if (off <= key.offset)
3710 new_key.offset = key.offset + destoff - off;
3712 new_key.offset = destoff;
3715 * Deal with a hole that doesn't have an extent item that
3716 * represents it (NO_HOLES feature enabled).
3717 * This hole is either in the middle of the cloning range or at
3718 * the beginning (fully overlaps it or partially overlaps it).
3720 if (new_key.offset != last_dest_end)
3721 drop_start = last_dest_end;
3723 drop_start = new_key.offset;
3725 if (type == BTRFS_FILE_EXTENT_REG ||
3726 type == BTRFS_FILE_EXTENT_PREALLOC) {
3727 struct btrfs_clone_extent_info clone_info;
3730 * a | --- range to clone ---| b
3731 * | ------------- extent ------------- |
3734 /* Subtract range b */
3735 if (key.offset + datal > off + len)
3736 datal = off + len - key.offset;
3738 /* Subtract range a */
3739 if (off > key.offset) {
3740 datao += off - key.offset;
3741 datal -= off - key.offset;
3744 clone_info.disk_offset = disko;
3745 clone_info.disk_len = diskl;
3746 clone_info.data_offset = datao;
3747 clone_info.data_len = datal;
3748 clone_info.file_offset = new_key.offset;
3749 clone_info.extent_buf = buf;
3750 clone_info.item_size = size;
3751 ret = btrfs_punch_hole_range(inode, path,
3753 new_key.offset + datal - 1,
3754 &clone_info, &trans);
3757 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3761 if (off > key.offset) {
3762 skip = off - key.offset;
3763 new_key.offset += skip;
3766 if (key.offset + datal > off + len)
3767 trim = key.offset + datal - (off + len);
3769 if (comp && (skip || trim)) {
3773 size -= skip + trim;
3774 datal -= skip + trim;
3777 * If our extent is inline, we know we will drop or
3778 * adjust at most 1 extent item in the destination root.
3780 * 1 - adjusting old extent (we may have to split it)
3781 * 1 - add new extent
3784 trans = btrfs_start_transaction(root, 3);
3785 if (IS_ERR(trans)) {
3786 ret = PTR_ERR(trans);
3790 ret = clone_copy_inline_extent(inode, trans, path,
3791 &new_key, drop_start,
3792 datal, skip, size, buf);
3794 if (ret != -EOPNOTSUPP)
3795 btrfs_abort_transaction(trans, ret);
3796 btrfs_end_transaction(trans);
3801 btrfs_release_path(path);
3803 last_dest_end = ALIGN(new_key.offset + datal,
3804 fs_info->sectorsize);
3805 ret = clone_finish_inode_update(trans, inode, last_dest_end,
3806 destoff, olen, no_time_update);
3809 if (new_key.offset + datal >= destoff + len)
3812 btrfs_release_path(path);
3813 key.offset = next_key_min_offset;
3815 if (fatal_signal_pending(current)) {
3824 if (last_dest_end < destoff + len) {
3826 * We have an implicit hole that fully or partially overlaps our
3827 * cloning range at its end. This means that we either have the
3828 * NO_HOLES feature enabled or the implicit hole happened due to
3829 * mixing buffered and direct IO writes against this file.
3831 btrfs_release_path(path);
3832 path->leave_spinning = 0;
3834 ret = btrfs_punch_hole_range(inode, path,
3835 last_dest_end, destoff + len - 1,
3840 ret = clone_finish_inode_update(trans, inode, destoff + len,
3841 destoff, olen, no_time_update);
3845 btrfs_free_path(path);
3850 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3851 u64 off, u64 olen, u64 destoff)
3853 struct inode *inode = file_inode(file);
3854 struct inode *src = file_inode(file_src);
3855 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3858 u64 bs = fs_info->sb->s_blocksize;
3862 * - split compressed inline extents. annoying: we need to
3863 * decompress into destination's address_space (the file offset
3864 * may change, so source mapping won't do), then recompress (or
3865 * otherwise reinsert) a subrange.
3867 * - split destination inode's inline extents. The inline extents can
3868 * be either compressed or non-compressed.
3872 * VFS's generic_remap_file_range_prep() protects us from cloning the
3873 * eof block into the middle of a file, which would result in corruption
3874 * if the file size is not blocksize aligned. So we don't need to check
3875 * for that case here.
3877 if (off + len == src->i_size)
3878 len = ALIGN(src->i_size, bs) - off;
3880 if (destoff > inode->i_size) {
3881 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
3883 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3887 * We may have truncated the last block if the inode's size is
3888 * not sector size aligned, so we need to wait for writeback to
3889 * complete before proceeding further, otherwise we can race
3890 * with cloning and attempt to increment a reference to an
3891 * extent that no longer exists (writeback completed right after
3892 * we found the previous extent covering eof and before we
3893 * attempted to increment its reference count).
3895 ret = btrfs_wait_ordered_range(inode, wb_start,
3896 destoff - wb_start);
3902 * Lock destination range to serialize with concurrent readpages() and
3903 * source range to serialize with relocation.
3905 btrfs_double_extent_lock(src, off, inode, destoff, len);
3906 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3907 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3909 * Truncate page cache pages so that future reads will see the cloned
3910 * data immediately and not the previous data.
3912 truncate_inode_pages_range(&inode->i_data,
3913 round_down(destoff, PAGE_SIZE),
3914 round_up(destoff + len, PAGE_SIZE) - 1);
3919 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
3920 struct file *file_out, loff_t pos_out,
3921 loff_t *len, unsigned int remap_flags)
3923 struct inode *inode_in = file_inode(file_in);
3924 struct inode *inode_out = file_inode(file_out);
3925 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
3926 bool same_inode = inode_out == inode_in;
3930 if (!(remap_flags & REMAP_FILE_DEDUP)) {
3931 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
3933 if (btrfs_root_readonly(root_out))
3936 if (file_in->f_path.mnt != file_out->f_path.mnt ||
3937 inode_in->i_sb != inode_out->i_sb)
3941 /* don't make the dst file partly checksummed */
3942 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
3943 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
3948 * Now that the inodes are locked, we need to start writeback ourselves
3949 * and can not rely on the writeback from the VFS's generic helper
3950 * generic_remap_file_range_prep() because:
3952 * 1) For compression we must call filemap_fdatawrite_range() range
3953 * twice (btrfs_fdatawrite_range() does it for us), and the generic
3954 * helper only calls it once;
3956 * 2) filemap_fdatawrite_range(), called by the generic helper only
3957 * waits for the writeback to complete, i.e. for IO to be done, and
3958 * not for the ordered extents to complete. We need to wait for them
3959 * to complete so that new file extent items are in the fs tree.
3961 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
3962 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
3964 wb_len = ALIGN(*len, bs);
3967 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
3968 * any in progress could create its ordered extents after we wait for
3969 * existing ordered extents below).
3971 inode_dio_wait(inode_in);
3973 inode_dio_wait(inode_out);
3976 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
3978 * Btrfs' back references do not have a block level granularity, they
3979 * work at the whole extent level.
3980 * NOCOW buffered write without data space reserved may not be able
3981 * to fall back to CoW due to lack of data space, thus could cause
3984 * Here we take a shortcut by flushing the whole inode, so that all
3985 * nocow write should reach disk as nocow before we increase the
3986 * reference of the extent. We could do better by only flushing NOCOW
3987 * data, but that needs extra accounting.
3989 * Also we don't need to check ASYNC_EXTENT, as async extent will be
3990 * CoWed anyway, not affecting nocow part.
3992 ret = filemap_flush(inode_in->i_mapping);
3996 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
4000 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
4005 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
4009 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
4010 struct file *dst_file, loff_t destoff, loff_t len,
4011 unsigned int remap_flags)
4013 struct inode *src_inode = file_inode(src_file);
4014 struct inode *dst_inode = file_inode(dst_file);
4015 bool same_inode = dst_inode == src_inode;
4018 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
4022 inode_lock(src_inode);
4024 lock_two_nondirectories(src_inode, dst_inode);
4026 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
4028 if (ret < 0 || len == 0)
4031 if (remap_flags & REMAP_FILE_DEDUP)
4032 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
4034 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
4038 inode_unlock(src_inode);
4040 unlock_two_nondirectories(src_inode, dst_inode);
4042 return ret < 0 ? ret : len;
4045 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4047 struct inode *inode = file_inode(file);
4048 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4049 struct btrfs_root *root = BTRFS_I(inode)->root;
4050 struct btrfs_root *new_root;
4051 struct btrfs_dir_item *di;
4052 struct btrfs_trans_handle *trans;
4053 struct btrfs_path *path;
4054 struct btrfs_key location;
4055 struct btrfs_disk_key disk_key;
4060 if (!capable(CAP_SYS_ADMIN))
4063 ret = mnt_want_write_file(file);
4067 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4073 objectid = BTRFS_FS_TREE_OBJECTID;
4075 location.objectid = objectid;
4076 location.type = BTRFS_ROOT_ITEM_KEY;
4077 location.offset = (u64)-1;
4079 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4080 if (IS_ERR(new_root)) {
4081 ret = PTR_ERR(new_root);
4084 if (!is_fstree(new_root->root_key.objectid)) {
4089 path = btrfs_alloc_path();
4094 path->leave_spinning = 1;
4096 trans = btrfs_start_transaction(root, 1);
4097 if (IS_ERR(trans)) {
4098 btrfs_free_path(path);
4099 ret = PTR_ERR(trans);
4103 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4104 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4105 dir_id, "default", 7, 1);
4106 if (IS_ERR_OR_NULL(di)) {
4107 btrfs_free_path(path);
4108 btrfs_end_transaction(trans);
4110 "Umm, you don't have the default diritem, this isn't going to work");
4115 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4116 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4117 btrfs_mark_buffer_dirty(path->nodes[0]);
4118 btrfs_free_path(path);
4120 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4121 btrfs_end_transaction(trans);
4123 mnt_drop_write_file(file);
4127 static void get_block_group_info(struct list_head *groups_list,
4128 struct btrfs_ioctl_space_info *space)
4130 struct btrfs_block_group_cache *block_group;
4132 space->total_bytes = 0;
4133 space->used_bytes = 0;
4135 list_for_each_entry(block_group, groups_list, list) {
4136 space->flags = block_group->flags;
4137 space->total_bytes += block_group->key.offset;
4138 space->used_bytes +=
4139 btrfs_block_group_used(&block_group->item);
4143 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4146 struct btrfs_ioctl_space_args space_args;
4147 struct btrfs_ioctl_space_info space;
4148 struct btrfs_ioctl_space_info *dest;
4149 struct btrfs_ioctl_space_info *dest_orig;
4150 struct btrfs_ioctl_space_info __user *user_dest;
4151 struct btrfs_space_info *info;
4152 static const u64 types[] = {
4153 BTRFS_BLOCK_GROUP_DATA,
4154 BTRFS_BLOCK_GROUP_SYSTEM,
4155 BTRFS_BLOCK_GROUP_METADATA,
4156 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
4164 if (copy_from_user(&space_args,
4165 (struct btrfs_ioctl_space_args __user *)arg,
4166 sizeof(space_args)))
4169 for (i = 0; i < num_types; i++) {
4170 struct btrfs_space_info *tmp;
4174 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4176 if (tmp->flags == types[i]) {
4186 down_read(&info->groups_sem);
4187 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4188 if (!list_empty(&info->block_groups[c]))
4191 up_read(&info->groups_sem);
4195 * Global block reserve, exported as a space_info
4199 /* space_slots == 0 means they are asking for a count */
4200 if (space_args.space_slots == 0) {
4201 space_args.total_spaces = slot_count;
4205 slot_count = min_t(u64, space_args.space_slots, slot_count);
4207 alloc_size = sizeof(*dest) * slot_count;
4209 /* we generally have at most 6 or so space infos, one for each raid
4210 * level. So, a whole page should be more than enough for everyone
4212 if (alloc_size > PAGE_SIZE)
4215 space_args.total_spaces = 0;
4216 dest = kmalloc(alloc_size, GFP_KERNEL);
4221 /* now we have a buffer to copy into */
4222 for (i = 0; i < num_types; i++) {
4223 struct btrfs_space_info *tmp;
4230 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4232 if (tmp->flags == types[i]) {
4241 down_read(&info->groups_sem);
4242 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4243 if (!list_empty(&info->block_groups[c])) {
4244 get_block_group_info(&info->block_groups[c],
4246 memcpy(dest, &space, sizeof(space));
4248 space_args.total_spaces++;
4254 up_read(&info->groups_sem);
4258 * Add global block reserve
4261 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4263 spin_lock(&block_rsv->lock);
4264 space.total_bytes = block_rsv->size;
4265 space.used_bytes = block_rsv->size - block_rsv->reserved;
4266 spin_unlock(&block_rsv->lock);
4267 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4268 memcpy(dest, &space, sizeof(space));
4269 space_args.total_spaces++;
4272 user_dest = (struct btrfs_ioctl_space_info __user *)
4273 (arg + sizeof(struct btrfs_ioctl_space_args));
4275 if (copy_to_user(user_dest, dest_orig, alloc_size))
4280 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4286 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4289 struct btrfs_trans_handle *trans;
4293 trans = btrfs_attach_transaction_barrier(root);
4294 if (IS_ERR(trans)) {
4295 if (PTR_ERR(trans) != -ENOENT)
4296 return PTR_ERR(trans);
4298 /* No running transaction, don't bother */
4299 transid = root->fs_info->last_trans_committed;
4302 transid = trans->transid;
4303 ret = btrfs_commit_transaction_async(trans, 0);
4305 btrfs_end_transaction(trans);
4310 if (copy_to_user(argp, &transid, sizeof(transid)))
4315 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4321 if (copy_from_user(&transid, argp, sizeof(transid)))
4324 transid = 0; /* current trans */
4326 return btrfs_wait_for_commit(fs_info, transid);
4329 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4331 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4332 struct btrfs_ioctl_scrub_args *sa;
4335 if (!capable(CAP_SYS_ADMIN))
4338 sa = memdup_user(arg, sizeof(*sa));
4342 if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
4347 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4348 ret = mnt_want_write_file(file);
4353 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4354 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4358 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
4359 * error. This is important as it allows user space to know how much
4360 * progress scrub has done. For example, if scrub is canceled we get
4361 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
4362 * space. Later user space can inspect the progress from the structure
4363 * btrfs_ioctl_scrub_args and resume scrub from where it left off
4364 * previously (btrfs-progs does this).
4365 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
4366 * then return -EFAULT to signal the structure was not copied or it may
4367 * be corrupt and unreliable due to a partial copy.
4369 if (copy_to_user(arg, sa, sizeof(*sa)))
4372 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4373 mnt_drop_write_file(file);
4379 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4381 if (!capable(CAP_SYS_ADMIN))
4384 return btrfs_scrub_cancel(fs_info);
4387 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4390 struct btrfs_ioctl_scrub_args *sa;
4393 if (!capable(CAP_SYS_ADMIN))
4396 sa = memdup_user(arg, sizeof(*sa));
4400 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4402 if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
4409 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4412 struct btrfs_ioctl_get_dev_stats *sa;
4415 sa = memdup_user(arg, sizeof(*sa));
4419 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4424 ret = btrfs_get_dev_stats(fs_info, sa);
4426 if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
4433 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4436 struct btrfs_ioctl_dev_replace_args *p;
4439 if (!capable(CAP_SYS_ADMIN))
4442 p = memdup_user(arg, sizeof(*p));
4447 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4448 if (sb_rdonly(fs_info->sb)) {
4452 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4453 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4455 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4456 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4459 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4460 btrfs_dev_replace_status(fs_info, p);
4463 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4464 p->result = btrfs_dev_replace_cancel(fs_info);
4472 if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
4479 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4485 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4486 struct inode_fs_paths *ipath = NULL;
4487 struct btrfs_path *path;
4489 if (!capable(CAP_DAC_READ_SEARCH))
4492 path = btrfs_alloc_path();
4498 ipa = memdup_user(arg, sizeof(*ipa));
4505 size = min_t(u32, ipa->size, 4096);
4506 ipath = init_ipath(size, root, path);
4507 if (IS_ERR(ipath)) {
4508 ret = PTR_ERR(ipath);
4513 ret = paths_from_inode(ipa->inum, ipath);
4517 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4518 rel_ptr = ipath->fspath->val[i] -
4519 (u64)(unsigned long)ipath->fspath->val;
4520 ipath->fspath->val[i] = rel_ptr;
4523 btrfs_free_path(path);
4525 ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
4526 ipath->fspath, size);
4533 btrfs_free_path(path);
4540 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4542 struct btrfs_data_container *inodes = ctx;
4543 const size_t c = 3 * sizeof(u64);
4545 if (inodes->bytes_left >= c) {
4546 inodes->bytes_left -= c;
4547 inodes->val[inodes->elem_cnt] = inum;
4548 inodes->val[inodes->elem_cnt + 1] = offset;
4549 inodes->val[inodes->elem_cnt + 2] = root;
4550 inodes->elem_cnt += 3;
4552 inodes->bytes_missing += c - inodes->bytes_left;
4553 inodes->bytes_left = 0;
4554 inodes->elem_missed += 3;
4560 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4561 void __user *arg, int version)
4565 struct btrfs_ioctl_logical_ino_args *loi;
4566 struct btrfs_data_container *inodes = NULL;
4567 struct btrfs_path *path = NULL;
4570 if (!capable(CAP_SYS_ADMIN))
4573 loi = memdup_user(arg, sizeof(*loi));
4575 return PTR_ERR(loi);
4578 ignore_offset = false;
4579 size = min_t(u32, loi->size, SZ_64K);
4581 /* All reserved bits must be 0 for now */
4582 if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4586 /* Only accept flags we have defined so far */
4587 if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4591 ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4592 size = min_t(u32, loi->size, SZ_16M);
4595 inodes = init_data_container(size);
4596 if (IS_ERR(inodes)) {
4597 ret = PTR_ERR(inodes);
4601 path = btrfs_alloc_path();
4606 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4607 build_ino_list, inodes, ignore_offset);
4608 btrfs_free_path(path);
4614 ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4627 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4628 struct btrfs_ioctl_balance_args *bargs)
4630 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4632 bargs->flags = bctl->flags;
4634 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4635 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4636 if (atomic_read(&fs_info->balance_pause_req))
4637 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4638 if (atomic_read(&fs_info->balance_cancel_req))
4639 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4641 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4642 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4643 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4645 spin_lock(&fs_info->balance_lock);
4646 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4647 spin_unlock(&fs_info->balance_lock);
4650 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4652 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4653 struct btrfs_fs_info *fs_info = root->fs_info;
4654 struct btrfs_ioctl_balance_args *bargs;
4655 struct btrfs_balance_control *bctl;
4656 bool need_unlock; /* for mut. excl. ops lock */
4659 if (!capable(CAP_SYS_ADMIN))
4662 ret = mnt_want_write_file(file);
4667 if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4668 mutex_lock(&fs_info->balance_mutex);
4674 * mut. excl. ops lock is locked. Three possibilities:
4675 * (1) some other op is running
4676 * (2) balance is running
4677 * (3) balance is paused -- special case (think resume)
4679 mutex_lock(&fs_info->balance_mutex);
4680 if (fs_info->balance_ctl) {
4681 /* this is either (2) or (3) */
4682 if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4683 mutex_unlock(&fs_info->balance_mutex);
4685 * Lock released to allow other waiters to continue,
4686 * we'll reexamine the status again.
4688 mutex_lock(&fs_info->balance_mutex);
4690 if (fs_info->balance_ctl &&
4691 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4693 need_unlock = false;
4697 mutex_unlock(&fs_info->balance_mutex);
4701 mutex_unlock(&fs_info->balance_mutex);
4707 mutex_unlock(&fs_info->balance_mutex);
4708 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4713 BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
4716 bargs = memdup_user(arg, sizeof(*bargs));
4717 if (IS_ERR(bargs)) {
4718 ret = PTR_ERR(bargs);
4722 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4723 if (!fs_info->balance_ctl) {
4728 bctl = fs_info->balance_ctl;
4729 spin_lock(&fs_info->balance_lock);
4730 bctl->flags |= BTRFS_BALANCE_RESUME;
4731 spin_unlock(&fs_info->balance_lock);
4739 if (fs_info->balance_ctl) {
4744 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4751 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4752 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4753 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4755 bctl->flags = bargs->flags;
4757 /* balance everything - no filters */
4758 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4761 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4768 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to
4769 * btrfs_balance. bctl is freed in reset_balance_state, or, if
4770 * restriper was paused all the way until unmount, in free_fs_info.
4771 * The flag should be cleared after reset_balance_state.
4773 need_unlock = false;
4775 ret = btrfs_balance(fs_info, bctl, bargs);
4778 if ((ret == 0 || ret == -ECANCELED) && arg) {
4779 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4788 mutex_unlock(&fs_info->balance_mutex);
4790 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4792 mnt_drop_write_file(file);
4796 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4798 if (!capable(CAP_SYS_ADMIN))
4802 case BTRFS_BALANCE_CTL_PAUSE:
4803 return btrfs_pause_balance(fs_info);
4804 case BTRFS_BALANCE_CTL_CANCEL:
4805 return btrfs_cancel_balance(fs_info);
4811 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4814 struct btrfs_ioctl_balance_args *bargs;
4817 if (!capable(CAP_SYS_ADMIN))
4820 mutex_lock(&fs_info->balance_mutex);
4821 if (!fs_info->balance_ctl) {
4826 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4832 btrfs_update_ioctl_balance_args(fs_info, bargs);
4834 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4839 mutex_unlock(&fs_info->balance_mutex);
4843 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4845 struct inode *inode = file_inode(file);
4846 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4847 struct btrfs_ioctl_quota_ctl_args *sa;
4850 if (!capable(CAP_SYS_ADMIN))
4853 ret = mnt_want_write_file(file);
4857 sa = memdup_user(arg, sizeof(*sa));
4863 down_write(&fs_info->subvol_sem);
4866 case BTRFS_QUOTA_CTL_ENABLE:
4867 ret = btrfs_quota_enable(fs_info);
4869 case BTRFS_QUOTA_CTL_DISABLE:
4870 ret = btrfs_quota_disable(fs_info);
4878 up_write(&fs_info->subvol_sem);
4880 mnt_drop_write_file(file);
4884 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4886 struct inode *inode = file_inode(file);
4887 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4888 struct btrfs_root *root = BTRFS_I(inode)->root;
4889 struct btrfs_ioctl_qgroup_assign_args *sa;
4890 struct btrfs_trans_handle *trans;
4894 if (!capable(CAP_SYS_ADMIN))
4897 ret = mnt_want_write_file(file);
4901 sa = memdup_user(arg, sizeof(*sa));
4907 trans = btrfs_join_transaction(root);
4908 if (IS_ERR(trans)) {
4909 ret = PTR_ERR(trans);
4914 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
4916 ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
4919 /* update qgroup status and info */
4920 mutex_lock(&fs_info->qgroup_ioctl_lock);
4921 err = btrfs_run_qgroups(trans);
4922 mutex_unlock(&fs_info->qgroup_ioctl_lock);
4924 btrfs_handle_fs_error(fs_info, err,
4925 "failed to update qgroup status and info");
4926 err = btrfs_end_transaction(trans);
4933 mnt_drop_write_file(file);
4937 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4939 struct inode *inode = file_inode(file);
4940 struct btrfs_root *root = BTRFS_I(inode)->root;
4941 struct btrfs_ioctl_qgroup_create_args *sa;
4942 struct btrfs_trans_handle *trans;
4946 if (!capable(CAP_SYS_ADMIN))
4949 ret = mnt_want_write_file(file);
4953 sa = memdup_user(arg, sizeof(*sa));
4959 if (!sa->qgroupid) {
4964 trans = btrfs_join_transaction(root);
4965 if (IS_ERR(trans)) {
4966 ret = PTR_ERR(trans);
4971 ret = btrfs_create_qgroup(trans, sa->qgroupid);
4973 ret = btrfs_remove_qgroup(trans, sa->qgroupid);
4976 err = btrfs_end_transaction(trans);
4983 mnt_drop_write_file(file);
4987 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
4989 struct inode *inode = file_inode(file);
4990 struct btrfs_root *root = BTRFS_I(inode)->root;
4991 struct btrfs_ioctl_qgroup_limit_args *sa;
4992 struct btrfs_trans_handle *trans;
4997 if (!capable(CAP_SYS_ADMIN))
5000 ret = mnt_want_write_file(file);
5004 sa = memdup_user(arg, sizeof(*sa));
5010 trans = btrfs_join_transaction(root);
5011 if (IS_ERR(trans)) {
5012 ret = PTR_ERR(trans);
5016 qgroupid = sa->qgroupid;
5018 /* take the current subvol as qgroup */
5019 qgroupid = root->root_key.objectid;
5022 ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
5024 err = btrfs_end_transaction(trans);
5031 mnt_drop_write_file(file);
5035 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5037 struct inode *inode = file_inode(file);
5038 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5039 struct btrfs_ioctl_quota_rescan_args *qsa;
5042 if (!capable(CAP_SYS_ADMIN))
5045 ret = mnt_want_write_file(file);
5049 qsa = memdup_user(arg, sizeof(*qsa));
5060 ret = btrfs_qgroup_rescan(fs_info);
5065 mnt_drop_write_file(file);
5069 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5071 struct inode *inode = file_inode(file);
5072 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5073 struct btrfs_ioctl_quota_rescan_args *qsa;
5076 if (!capable(CAP_SYS_ADMIN))
5079 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5083 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5085 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5088 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5095 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5097 struct inode *inode = file_inode(file);
5098 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5100 if (!capable(CAP_SYS_ADMIN))
5103 return btrfs_qgroup_wait_for_completion(fs_info, true);
5106 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5107 struct btrfs_ioctl_received_subvol_args *sa)
5109 struct inode *inode = file_inode(file);
5110 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5111 struct btrfs_root *root = BTRFS_I(inode)->root;
5112 struct btrfs_root_item *root_item = &root->root_item;
5113 struct btrfs_trans_handle *trans;
5114 struct timespec64 ct = current_time(inode);
5116 int received_uuid_changed;
5118 if (!inode_owner_or_capable(inode))
5121 ret = mnt_want_write_file(file);
5125 down_write(&fs_info->subvol_sem);
5127 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5132 if (btrfs_root_readonly(root)) {
5139 * 2 - uuid items (received uuid + subvol uuid)
5141 trans = btrfs_start_transaction(root, 3);
5142 if (IS_ERR(trans)) {
5143 ret = PTR_ERR(trans);
5148 sa->rtransid = trans->transid;
5149 sa->rtime.sec = ct.tv_sec;
5150 sa->rtime.nsec = ct.tv_nsec;
5152 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5154 if (received_uuid_changed &&
5155 !btrfs_is_empty_uuid(root_item->received_uuid)) {
5156 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
5157 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5158 root->root_key.objectid);
5159 if (ret && ret != -ENOENT) {
5160 btrfs_abort_transaction(trans, ret);
5161 btrfs_end_transaction(trans);
5165 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5166 btrfs_set_root_stransid(root_item, sa->stransid);
5167 btrfs_set_root_rtransid(root_item, sa->rtransid);
5168 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5169 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5170 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5171 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5173 ret = btrfs_update_root(trans, fs_info->tree_root,
5174 &root->root_key, &root->root_item);
5176 btrfs_end_transaction(trans);
5179 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5180 ret = btrfs_uuid_tree_add(trans, sa->uuid,
5181 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5182 root->root_key.objectid);
5183 if (ret < 0 && ret != -EEXIST) {
5184 btrfs_abort_transaction(trans, ret);
5185 btrfs_end_transaction(trans);
5189 ret = btrfs_commit_transaction(trans);
5191 up_write(&fs_info->subvol_sem);
5192 mnt_drop_write_file(file);
5197 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5200 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5201 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5204 args32 = memdup_user(arg, sizeof(*args32));
5206 return PTR_ERR(args32);
5208 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5214 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5215 args64->stransid = args32->stransid;
5216 args64->rtransid = args32->rtransid;
5217 args64->stime.sec = args32->stime.sec;
5218 args64->stime.nsec = args32->stime.nsec;
5219 args64->rtime.sec = args32->rtime.sec;
5220 args64->rtime.nsec = args32->rtime.nsec;
5221 args64->flags = args32->flags;
5223 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5227 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5228 args32->stransid = args64->stransid;
5229 args32->rtransid = args64->rtransid;
5230 args32->stime.sec = args64->stime.sec;
5231 args32->stime.nsec = args64->stime.nsec;
5232 args32->rtime.sec = args64->rtime.sec;
5233 args32->rtime.nsec = args64->rtime.nsec;
5234 args32->flags = args64->flags;
5236 ret = copy_to_user(arg, args32, sizeof(*args32));
5247 static long btrfs_ioctl_set_received_subvol(struct file *file,
5250 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5253 sa = memdup_user(arg, sizeof(*sa));
5257 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5262 ret = copy_to_user(arg, sa, sizeof(*sa));
5271 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5273 struct inode *inode = file_inode(file);
5274 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5277 char label[BTRFS_LABEL_SIZE];
5279 spin_lock(&fs_info->super_lock);
5280 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5281 spin_unlock(&fs_info->super_lock);
5283 len = strnlen(label, BTRFS_LABEL_SIZE);
5285 if (len == BTRFS_LABEL_SIZE) {
5287 "label is too long, return the first %zu bytes",
5291 ret = copy_to_user(arg, label, len);
5293 return ret ? -EFAULT : 0;
5296 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5298 struct inode *inode = file_inode(file);
5299 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5300 struct btrfs_root *root = BTRFS_I(inode)->root;
5301 struct btrfs_super_block *super_block = fs_info->super_copy;
5302 struct btrfs_trans_handle *trans;
5303 char label[BTRFS_LABEL_SIZE];
5306 if (!capable(CAP_SYS_ADMIN))
5309 if (copy_from_user(label, arg, sizeof(label)))
5312 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5314 "unable to set label with more than %d bytes",
5315 BTRFS_LABEL_SIZE - 1);
5319 ret = mnt_want_write_file(file);
5323 trans = btrfs_start_transaction(root, 0);
5324 if (IS_ERR(trans)) {
5325 ret = PTR_ERR(trans);
5329 spin_lock(&fs_info->super_lock);
5330 strcpy(super_block->label, label);
5331 spin_unlock(&fs_info->super_lock);
5332 ret = btrfs_commit_transaction(trans);
5335 mnt_drop_write_file(file);
5339 #define INIT_FEATURE_FLAGS(suffix) \
5340 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5341 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5342 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5344 int btrfs_ioctl_get_supported_features(void __user *arg)
5346 static const struct btrfs_ioctl_feature_flags features[3] = {
5347 INIT_FEATURE_FLAGS(SUPP),
5348 INIT_FEATURE_FLAGS(SAFE_SET),
5349 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5352 if (copy_to_user(arg, &features, sizeof(features)))
5358 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5360 struct inode *inode = file_inode(file);
5361 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5362 struct btrfs_super_block *super_block = fs_info->super_copy;
5363 struct btrfs_ioctl_feature_flags features;
5365 features.compat_flags = btrfs_super_compat_flags(super_block);
5366 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5367 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5369 if (copy_to_user(arg, &features, sizeof(features)))
5375 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5376 enum btrfs_feature_set set,
5377 u64 change_mask, u64 flags, u64 supported_flags,
5378 u64 safe_set, u64 safe_clear)
5380 const char *type = btrfs_feature_set_name(set);
5382 u64 disallowed, unsupported;
5383 u64 set_mask = flags & change_mask;
5384 u64 clear_mask = ~flags & change_mask;
5386 unsupported = set_mask & ~supported_flags;
5388 names = btrfs_printable_features(set, unsupported);
5391 "this kernel does not support the %s feature bit%s",
5392 names, strchr(names, ',') ? "s" : "");
5396 "this kernel does not support %s bits 0x%llx",
5401 disallowed = set_mask & ~safe_set;
5403 names = btrfs_printable_features(set, disallowed);
5406 "can't set the %s feature bit%s while mounted",
5407 names, strchr(names, ',') ? "s" : "");
5411 "can't set %s bits 0x%llx while mounted",
5416 disallowed = clear_mask & ~safe_clear;
5418 names = btrfs_printable_features(set, disallowed);
5421 "can't clear the %s feature bit%s while mounted",
5422 names, strchr(names, ',') ? "s" : "");
5426 "can't clear %s bits 0x%llx while mounted",
5434 #define check_feature(fs_info, change_mask, flags, mask_base) \
5435 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5436 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5437 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5438 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5440 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5442 struct inode *inode = file_inode(file);
5443 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5444 struct btrfs_root *root = BTRFS_I(inode)->root;
5445 struct btrfs_super_block *super_block = fs_info->super_copy;
5446 struct btrfs_ioctl_feature_flags flags[2];
5447 struct btrfs_trans_handle *trans;
5451 if (!capable(CAP_SYS_ADMIN))
5454 if (copy_from_user(flags, arg, sizeof(flags)))
5458 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5459 !flags[0].incompat_flags)
5462 ret = check_feature(fs_info, flags[0].compat_flags,
5463 flags[1].compat_flags, COMPAT);
5467 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5468 flags[1].compat_ro_flags, COMPAT_RO);
5472 ret = check_feature(fs_info, flags[0].incompat_flags,
5473 flags[1].incompat_flags, INCOMPAT);
5477 ret = mnt_want_write_file(file);
5481 trans = btrfs_start_transaction(root, 0);
5482 if (IS_ERR(trans)) {
5483 ret = PTR_ERR(trans);
5484 goto out_drop_write;
5487 spin_lock(&fs_info->super_lock);
5488 newflags = btrfs_super_compat_flags(super_block);
5489 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5490 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5491 btrfs_set_super_compat_flags(super_block, newflags);
5493 newflags = btrfs_super_compat_ro_flags(super_block);
5494 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5495 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5496 btrfs_set_super_compat_ro_flags(super_block, newflags);
5498 newflags = btrfs_super_incompat_flags(super_block);
5499 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5500 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5501 btrfs_set_super_incompat_flags(super_block, newflags);
5502 spin_unlock(&fs_info->super_lock);
5504 ret = btrfs_commit_transaction(trans);
5506 mnt_drop_write_file(file);
5511 static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
5513 struct btrfs_ioctl_send_args *arg;
5517 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5518 struct btrfs_ioctl_send_args_32 args32;
5520 ret = copy_from_user(&args32, argp, sizeof(args32));
5523 arg = kzalloc(sizeof(*arg), GFP_KERNEL);
5526 arg->send_fd = args32.send_fd;
5527 arg->clone_sources_count = args32.clone_sources_count;
5528 arg->clone_sources = compat_ptr(args32.clone_sources);
5529 arg->parent_root = args32.parent_root;
5530 arg->flags = args32.flags;
5531 memcpy(arg->reserved, args32.reserved,
5532 sizeof(args32.reserved));
5537 arg = memdup_user(argp, sizeof(*arg));
5539 return PTR_ERR(arg);
5541 ret = btrfs_ioctl_send(file, arg);
5546 long btrfs_ioctl(struct file *file, unsigned int
5547 cmd, unsigned long arg)
5549 struct inode *inode = file_inode(file);
5550 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5551 struct btrfs_root *root = BTRFS_I(inode)->root;
5552 void __user *argp = (void __user *)arg;
5555 case FS_IOC_GETFLAGS:
5556 return btrfs_ioctl_getflags(file, argp);
5557 case FS_IOC_SETFLAGS:
5558 return btrfs_ioctl_setflags(file, argp);
5559 case FS_IOC_GETVERSION:
5560 return btrfs_ioctl_getversion(file, argp);
5561 case FS_IOC_GETFSLABEL:
5562 return btrfs_ioctl_get_fslabel(file, argp);
5563 case FS_IOC_SETFSLABEL:
5564 return btrfs_ioctl_set_fslabel(file, argp);
5566 return btrfs_ioctl_fitrim(file, argp);
5567 case BTRFS_IOC_SNAP_CREATE:
5568 return btrfs_ioctl_snap_create(file, argp, 0);
5569 case BTRFS_IOC_SNAP_CREATE_V2:
5570 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5571 case BTRFS_IOC_SUBVOL_CREATE:
5572 return btrfs_ioctl_snap_create(file, argp, 1);
5573 case BTRFS_IOC_SUBVOL_CREATE_V2:
5574 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5575 case BTRFS_IOC_SNAP_DESTROY:
5576 return btrfs_ioctl_snap_destroy(file, argp);
5577 case BTRFS_IOC_SUBVOL_GETFLAGS:
5578 return btrfs_ioctl_subvol_getflags(file, argp);
5579 case BTRFS_IOC_SUBVOL_SETFLAGS:
5580 return btrfs_ioctl_subvol_setflags(file, argp);
5581 case BTRFS_IOC_DEFAULT_SUBVOL:
5582 return btrfs_ioctl_default_subvol(file, argp);
5583 case BTRFS_IOC_DEFRAG:
5584 return btrfs_ioctl_defrag(file, NULL);
5585 case BTRFS_IOC_DEFRAG_RANGE:
5586 return btrfs_ioctl_defrag(file, argp);
5587 case BTRFS_IOC_RESIZE:
5588 return btrfs_ioctl_resize(file, argp);
5589 case BTRFS_IOC_ADD_DEV:
5590 return btrfs_ioctl_add_dev(fs_info, argp);
5591 case BTRFS_IOC_RM_DEV:
5592 return btrfs_ioctl_rm_dev(file, argp);
5593 case BTRFS_IOC_RM_DEV_V2:
5594 return btrfs_ioctl_rm_dev_v2(file, argp);
5595 case BTRFS_IOC_FS_INFO:
5596 return btrfs_ioctl_fs_info(fs_info, argp);
5597 case BTRFS_IOC_DEV_INFO:
5598 return btrfs_ioctl_dev_info(fs_info, argp);
5599 case BTRFS_IOC_BALANCE:
5600 return btrfs_ioctl_balance(file, NULL);
5601 case BTRFS_IOC_TREE_SEARCH:
5602 return btrfs_ioctl_tree_search(file, argp);
5603 case BTRFS_IOC_TREE_SEARCH_V2:
5604 return btrfs_ioctl_tree_search_v2(file, argp);
5605 case BTRFS_IOC_INO_LOOKUP:
5606 return btrfs_ioctl_ino_lookup(file, argp);
5607 case BTRFS_IOC_INO_PATHS:
5608 return btrfs_ioctl_ino_to_path(root, argp);
5609 case BTRFS_IOC_LOGICAL_INO:
5610 return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5611 case BTRFS_IOC_LOGICAL_INO_V2:
5612 return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5613 case BTRFS_IOC_SPACE_INFO:
5614 return btrfs_ioctl_space_info(fs_info, argp);
5615 case BTRFS_IOC_SYNC: {
5618 ret = btrfs_start_delalloc_roots(fs_info, -1);
5621 ret = btrfs_sync_fs(inode->i_sb, 1);
5623 * The transaction thread may want to do more work,
5624 * namely it pokes the cleaner kthread that will start
5625 * processing uncleaned subvols.
5627 wake_up_process(fs_info->transaction_kthread);
5630 case BTRFS_IOC_START_SYNC:
5631 return btrfs_ioctl_start_sync(root, argp);
5632 case BTRFS_IOC_WAIT_SYNC:
5633 return btrfs_ioctl_wait_sync(fs_info, argp);
5634 case BTRFS_IOC_SCRUB:
5635 return btrfs_ioctl_scrub(file, argp);
5636 case BTRFS_IOC_SCRUB_CANCEL:
5637 return btrfs_ioctl_scrub_cancel(fs_info);
5638 case BTRFS_IOC_SCRUB_PROGRESS:
5639 return btrfs_ioctl_scrub_progress(fs_info, argp);
5640 case BTRFS_IOC_BALANCE_V2:
5641 return btrfs_ioctl_balance(file, argp);
5642 case BTRFS_IOC_BALANCE_CTL:
5643 return btrfs_ioctl_balance_ctl(fs_info, arg);
5644 case BTRFS_IOC_BALANCE_PROGRESS:
5645 return btrfs_ioctl_balance_progress(fs_info, argp);
5646 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5647 return btrfs_ioctl_set_received_subvol(file, argp);
5649 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5650 return btrfs_ioctl_set_received_subvol_32(file, argp);
5652 case BTRFS_IOC_SEND:
5653 return _btrfs_ioctl_send(file, argp, false);
5654 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5655 case BTRFS_IOC_SEND_32:
5656 return _btrfs_ioctl_send(file, argp, true);
5658 case BTRFS_IOC_GET_DEV_STATS:
5659 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5660 case BTRFS_IOC_QUOTA_CTL:
5661 return btrfs_ioctl_quota_ctl(file, argp);
5662 case BTRFS_IOC_QGROUP_ASSIGN:
5663 return btrfs_ioctl_qgroup_assign(file, argp);
5664 case BTRFS_IOC_QGROUP_CREATE:
5665 return btrfs_ioctl_qgroup_create(file, argp);
5666 case BTRFS_IOC_QGROUP_LIMIT:
5667 return btrfs_ioctl_qgroup_limit(file, argp);
5668 case BTRFS_IOC_QUOTA_RESCAN:
5669 return btrfs_ioctl_quota_rescan(file, argp);
5670 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5671 return btrfs_ioctl_quota_rescan_status(file, argp);
5672 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5673 return btrfs_ioctl_quota_rescan_wait(file, argp);
5674 case BTRFS_IOC_DEV_REPLACE:
5675 return btrfs_ioctl_dev_replace(fs_info, argp);
5676 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5677 return btrfs_ioctl_get_supported_features(argp);
5678 case BTRFS_IOC_GET_FEATURES:
5679 return btrfs_ioctl_get_features(file, argp);
5680 case BTRFS_IOC_SET_FEATURES:
5681 return btrfs_ioctl_set_features(file, argp);
5682 case FS_IOC_FSGETXATTR:
5683 return btrfs_ioctl_fsgetxattr(file, argp);
5684 case FS_IOC_FSSETXATTR:
5685 return btrfs_ioctl_fssetxattr(file, argp);
5686 case BTRFS_IOC_GET_SUBVOL_INFO:
5687 return btrfs_ioctl_get_subvol_info(file, argp);
5688 case BTRFS_IOC_GET_SUBVOL_ROOTREF:
5689 return btrfs_ioctl_get_subvol_rootref(file, argp);
5690 case BTRFS_IOC_INO_LOOKUP_USER:
5691 return btrfs_ioctl_ino_lookup_user(file, argp);
5697 #ifdef CONFIG_COMPAT
5698 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5701 * These all access 32-bit values anyway so no further
5702 * handling is necessary.
5705 case FS_IOC32_GETFLAGS:
5706 cmd = FS_IOC_GETFLAGS;
5708 case FS_IOC32_SETFLAGS:
5709 cmd = FS_IOC_SETFLAGS;
5711 case FS_IOC32_GETVERSION:
5712 cmd = FS_IOC_GETVERSION;
5716 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));