1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
31 #include "transaction.h"
32 #include "btrfs_inode.h"
33 #include "print-tree.h"
36 #include "inode-map.h"
38 #include "rcu-string.h"
40 #include "dev-replace.h"
45 #include "compression.h"
48 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
49 * structures are incorrect, as the timespec structure from userspace
50 * is 4 bytes too small. We define these alternatives here to teach
51 * the kernel about the 32-bit struct packing.
53 struct btrfs_ioctl_timespec_32 {
56 } __attribute__ ((__packed__));
58 struct btrfs_ioctl_received_subvol_args_32 {
59 char uuid[BTRFS_UUID_SIZE]; /* in */
60 __u64 stransid; /* in */
61 __u64 rtransid; /* out */
62 struct btrfs_ioctl_timespec_32 stime; /* in */
63 struct btrfs_ioctl_timespec_32 rtime; /* out */
65 __u64 reserved[16]; /* in */
66 } __attribute__ ((__packed__));
68 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
69 struct btrfs_ioctl_received_subvol_args_32)
72 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
73 struct btrfs_ioctl_send_args_32 {
74 __s64 send_fd; /* in */
75 __u64 clone_sources_count; /* in */
76 compat_uptr_t clone_sources; /* in */
77 __u64 parent_root; /* in */
79 __u64 reserved[4]; /* in */
80 } __attribute__ ((__packed__));
82 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
83 struct btrfs_ioctl_send_args_32)
86 static int btrfs_clone(struct inode *src, struct inode *inode,
87 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
90 /* Mask out flags that are inappropriate for the given type of inode. */
91 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
94 if (S_ISDIR(inode->i_mode))
96 else if (S_ISREG(inode->i_mode))
97 return flags & ~FS_DIRSYNC_FL;
99 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
103 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
106 static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
108 unsigned int iflags = 0;
110 if (flags & BTRFS_INODE_SYNC)
111 iflags |= FS_SYNC_FL;
112 if (flags & BTRFS_INODE_IMMUTABLE)
113 iflags |= FS_IMMUTABLE_FL;
114 if (flags & BTRFS_INODE_APPEND)
115 iflags |= FS_APPEND_FL;
116 if (flags & BTRFS_INODE_NODUMP)
117 iflags |= FS_NODUMP_FL;
118 if (flags & BTRFS_INODE_NOATIME)
119 iflags |= FS_NOATIME_FL;
120 if (flags & BTRFS_INODE_DIRSYNC)
121 iflags |= FS_DIRSYNC_FL;
122 if (flags & BTRFS_INODE_NODATACOW)
123 iflags |= FS_NOCOW_FL;
125 if (flags & BTRFS_INODE_NOCOMPRESS)
126 iflags |= FS_NOCOMP_FL;
127 else if (flags & BTRFS_INODE_COMPRESS)
128 iflags |= FS_COMPR_FL;
134 * Update inode->i_flags based on the btrfs internal flags.
136 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
138 struct btrfs_inode *binode = BTRFS_I(inode);
139 unsigned int new_fl = 0;
141 if (binode->flags & BTRFS_INODE_SYNC)
143 if (binode->flags & BTRFS_INODE_IMMUTABLE)
144 new_fl |= S_IMMUTABLE;
145 if (binode->flags & BTRFS_INODE_APPEND)
147 if (binode->flags & BTRFS_INODE_NOATIME)
149 if (binode->flags & BTRFS_INODE_DIRSYNC)
152 set_mask_bits(&inode->i_flags,
153 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
157 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
159 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
160 unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags);
162 if (copy_to_user(arg, &flags, sizeof(flags)))
167 /* Check if @flags are a supported and valid set of FS_*_FL flags */
168 static int check_fsflags(unsigned int flags)
170 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
171 FS_NOATIME_FL | FS_NODUMP_FL | \
172 FS_SYNC_FL | FS_DIRSYNC_FL | \
173 FS_NOCOMP_FL | FS_COMPR_FL |
177 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
183 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
185 struct inode *inode = file_inode(file);
186 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
187 struct btrfs_inode *binode = BTRFS_I(inode);
188 struct btrfs_root *root = binode->root;
189 struct btrfs_trans_handle *trans;
190 unsigned int fsflags, old_fsflags;
193 unsigned int old_i_flags;
196 if (!inode_owner_or_capable(inode))
199 if (btrfs_root_readonly(root))
202 if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
205 ret = check_fsflags(fsflags);
209 ret = mnt_want_write_file(file);
215 old_flags = binode->flags;
216 old_i_flags = inode->i_flags;
217 mode = inode->i_mode;
219 fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
220 old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
221 if ((fsflags ^ old_fsflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
222 if (!capable(CAP_LINUX_IMMUTABLE)) {
228 if (fsflags & FS_SYNC_FL)
229 binode->flags |= BTRFS_INODE_SYNC;
231 binode->flags &= ~BTRFS_INODE_SYNC;
232 if (fsflags & FS_IMMUTABLE_FL)
233 binode->flags |= BTRFS_INODE_IMMUTABLE;
235 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
236 if (fsflags & FS_APPEND_FL)
237 binode->flags |= BTRFS_INODE_APPEND;
239 binode->flags &= ~BTRFS_INODE_APPEND;
240 if (fsflags & FS_NODUMP_FL)
241 binode->flags |= BTRFS_INODE_NODUMP;
243 binode->flags &= ~BTRFS_INODE_NODUMP;
244 if (fsflags & FS_NOATIME_FL)
245 binode->flags |= BTRFS_INODE_NOATIME;
247 binode->flags &= ~BTRFS_INODE_NOATIME;
248 if (fsflags & FS_DIRSYNC_FL)
249 binode->flags |= BTRFS_INODE_DIRSYNC;
251 binode->flags &= ~BTRFS_INODE_DIRSYNC;
252 if (fsflags & FS_NOCOW_FL) {
255 * It's safe to turn csums off here, no extents exist.
256 * Otherwise we want the flag to reflect the real COW
257 * status of the file and will not set it.
259 if (inode->i_size == 0)
260 binode->flags |= BTRFS_INODE_NODATACOW
261 | BTRFS_INODE_NODATASUM;
263 binode->flags |= BTRFS_INODE_NODATACOW;
267 * Revert back under same assumptions as above
270 if (inode->i_size == 0)
271 binode->flags &= ~(BTRFS_INODE_NODATACOW
272 | BTRFS_INODE_NODATASUM);
274 binode->flags &= ~BTRFS_INODE_NODATACOW;
279 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
280 * flag may be changed automatically if compression code won't make
283 if (fsflags & FS_NOCOMP_FL) {
284 binode->flags &= ~BTRFS_INODE_COMPRESS;
285 binode->flags |= BTRFS_INODE_NOCOMPRESS;
287 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
288 if (ret && ret != -ENODATA)
290 } else if (fsflags & FS_COMPR_FL) {
293 binode->flags |= BTRFS_INODE_COMPRESS;
294 binode->flags &= ~BTRFS_INODE_NOCOMPRESS;
296 comp = btrfs_compress_type2str(fs_info->compress_type);
297 if (!comp || comp[0] == 0)
298 comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
300 ret = btrfs_set_prop(inode, "btrfs.compression",
301 comp, strlen(comp), 0);
306 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
307 if (ret && ret != -ENODATA)
309 binode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
312 trans = btrfs_start_transaction(root, 1);
314 ret = PTR_ERR(trans);
318 btrfs_sync_inode_flags_to_i_flags(inode);
319 inode_inc_iversion(inode);
320 inode->i_ctime = current_time(inode);
321 ret = btrfs_update_inode(trans, root, inode);
323 btrfs_end_transaction(trans);
326 binode->flags = old_flags;
327 inode->i_flags = old_i_flags;
332 mnt_drop_write_file(file);
337 * Translate btrfs internal inode flags to xflags as expected by the
338 * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are
341 static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags)
343 unsigned int xflags = 0;
345 if (flags & BTRFS_INODE_APPEND)
346 xflags |= FS_XFLAG_APPEND;
347 if (flags & BTRFS_INODE_IMMUTABLE)
348 xflags |= FS_XFLAG_IMMUTABLE;
349 if (flags & BTRFS_INODE_NOATIME)
350 xflags |= FS_XFLAG_NOATIME;
351 if (flags & BTRFS_INODE_NODUMP)
352 xflags |= FS_XFLAG_NODUMP;
353 if (flags & BTRFS_INODE_SYNC)
354 xflags |= FS_XFLAG_SYNC;
359 /* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */
360 static int check_xflags(unsigned int flags)
362 if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
363 FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
369 * Set the xflags from the internal inode flags. The remaining items of fsxattr
372 static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg)
374 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
377 memset(&fa, 0, sizeof(fa));
378 fa.fsx_xflags = btrfs_inode_flags_to_xflags(binode->flags);
380 if (copy_to_user(arg, &fa, sizeof(fa)))
386 static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
388 struct inode *inode = file_inode(file);
389 struct btrfs_inode *binode = BTRFS_I(inode);
390 struct btrfs_root *root = binode->root;
391 struct btrfs_trans_handle *trans;
394 unsigned old_i_flags;
397 if (!inode_owner_or_capable(inode))
400 if (btrfs_root_readonly(root))
403 memset(&fa, 0, sizeof(fa));
404 if (copy_from_user(&fa, arg, sizeof(fa)))
407 ret = check_xflags(fa.fsx_xflags);
411 if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0)
414 ret = mnt_want_write_file(file);
420 old_flags = binode->flags;
421 old_i_flags = inode->i_flags;
423 /* We need the capabilities to change append-only or immutable inode */
424 if (((old_flags & (BTRFS_INODE_APPEND | BTRFS_INODE_IMMUTABLE)) ||
425 (fa.fsx_xflags & (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE))) &&
426 !capable(CAP_LINUX_IMMUTABLE)) {
431 if (fa.fsx_xflags & FS_XFLAG_SYNC)
432 binode->flags |= BTRFS_INODE_SYNC;
434 binode->flags &= ~BTRFS_INODE_SYNC;
435 if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE)
436 binode->flags |= BTRFS_INODE_IMMUTABLE;
438 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
439 if (fa.fsx_xflags & FS_XFLAG_APPEND)
440 binode->flags |= BTRFS_INODE_APPEND;
442 binode->flags &= ~BTRFS_INODE_APPEND;
443 if (fa.fsx_xflags & FS_XFLAG_NODUMP)
444 binode->flags |= BTRFS_INODE_NODUMP;
446 binode->flags &= ~BTRFS_INODE_NODUMP;
447 if (fa.fsx_xflags & FS_XFLAG_NOATIME)
448 binode->flags |= BTRFS_INODE_NOATIME;
450 binode->flags &= ~BTRFS_INODE_NOATIME;
452 /* 1 item for the inode */
453 trans = btrfs_start_transaction(root, 1);
455 ret = PTR_ERR(trans);
459 btrfs_sync_inode_flags_to_i_flags(inode);
460 inode_inc_iversion(inode);
461 inode->i_ctime = current_time(inode);
462 ret = btrfs_update_inode(trans, root, inode);
464 btrfs_end_transaction(trans);
468 binode->flags = old_flags;
469 inode->i_flags = old_i_flags;
473 mnt_drop_write_file(file);
478 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
480 struct inode *inode = file_inode(file);
482 return put_user(inode->i_generation, arg);
485 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
487 struct inode *inode = file_inode(file);
488 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
489 struct btrfs_device *device;
490 struct request_queue *q;
491 struct fstrim_range range;
492 u64 minlen = ULLONG_MAX;
496 if (!capable(CAP_SYS_ADMIN))
500 * If the fs is mounted with nologreplay, which requires it to be
501 * mounted in RO mode as well, we can not allow discard on free space
502 * inside block groups, because log trees refer to extents that are not
503 * pinned in a block group's free space cache (pinning the extents is
504 * precisely the first phase of replaying a log tree).
506 if (btrfs_test_opt(fs_info, NOLOGREPLAY))
510 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
514 q = bdev_get_queue(device->bdev);
515 if (blk_queue_discard(q)) {
517 minlen = min_t(u64, q->limits.discard_granularity,
525 if (copy_from_user(&range, arg, sizeof(range)))
529 * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
530 * block group is in the logical address space, which can be any
531 * sectorsize aligned bytenr in the range [0, U64_MAX].
533 if (range.len < fs_info->sb->s_blocksize)
536 range.minlen = max(range.minlen, minlen);
537 ret = btrfs_trim_fs(fs_info, &range);
541 if (copy_to_user(arg, &range, sizeof(range)))
547 int btrfs_is_empty_uuid(u8 *uuid)
551 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
558 static noinline int create_subvol(struct inode *dir,
559 struct dentry *dentry,
560 const char *name, int namelen,
562 struct btrfs_qgroup_inherit *inherit)
564 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
565 struct btrfs_trans_handle *trans;
566 struct btrfs_key key;
567 struct btrfs_root_item *root_item;
568 struct btrfs_inode_item *inode_item;
569 struct extent_buffer *leaf;
570 struct btrfs_root *root = BTRFS_I(dir)->root;
571 struct btrfs_root *new_root;
572 struct btrfs_block_rsv block_rsv;
573 struct timespec64 cur_time = current_time(dir);
578 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
582 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
586 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
591 * Don't create subvolume whose level is not zero. Or qgroup will be
592 * screwed up since it assumes subvolume qgroup's level to be 0.
594 if (btrfs_qgroup_level(objectid)) {
599 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
601 * The same as the snapshot creation, please see the comment
602 * of create_snapshot().
604 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
608 trans = btrfs_start_transaction(root, 0);
610 ret = PTR_ERR(trans);
611 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
614 trans->block_rsv = &block_rsv;
615 trans->bytes_reserved = block_rsv.size;
617 ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
621 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
627 btrfs_mark_buffer_dirty(leaf);
629 inode_item = &root_item->inode;
630 btrfs_set_stack_inode_generation(inode_item, 1);
631 btrfs_set_stack_inode_size(inode_item, 3);
632 btrfs_set_stack_inode_nlink(inode_item, 1);
633 btrfs_set_stack_inode_nbytes(inode_item,
635 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
637 btrfs_set_root_flags(root_item, 0);
638 btrfs_set_root_limit(root_item, 0);
639 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
641 btrfs_set_root_bytenr(root_item, leaf->start);
642 btrfs_set_root_generation(root_item, trans->transid);
643 btrfs_set_root_level(root_item, 0);
644 btrfs_set_root_refs(root_item, 1);
645 btrfs_set_root_used(root_item, leaf->len);
646 btrfs_set_root_last_snapshot(root_item, 0);
648 btrfs_set_root_generation_v2(root_item,
649 btrfs_root_generation(root_item));
650 uuid_le_gen(&new_uuid);
651 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
652 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
653 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
654 root_item->ctime = root_item->otime;
655 btrfs_set_root_ctransid(root_item, trans->transid);
656 btrfs_set_root_otransid(root_item, trans->transid);
658 btrfs_tree_unlock(leaf);
660 btrfs_set_root_dirid(root_item, new_dirid);
662 key.objectid = objectid;
664 key.type = BTRFS_ROOT_ITEM_KEY;
665 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
669 * Since we don't abort the transaction in this case, free the
670 * tree block so that we don't leak space and leave the
671 * filesystem in an inconsistent state (an extent item in the
672 * extent tree without backreferences). Also no need to have
673 * the tree block locked since it is not in any tree at this
674 * point, so no other task can find it and use it.
676 btrfs_free_tree_block(trans, root, leaf, 0, 1);
677 free_extent_buffer(leaf);
681 free_extent_buffer(leaf);
684 key.offset = (u64)-1;
685 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
686 if (IS_ERR(new_root)) {
687 ret = PTR_ERR(new_root);
688 btrfs_abort_transaction(trans, ret);
692 btrfs_record_root_in_trans(trans, new_root);
694 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
696 /* We potentially lose an unused inode item here */
697 btrfs_abort_transaction(trans, ret);
701 mutex_lock(&new_root->objectid_mutex);
702 new_root->highest_objectid = new_dirid;
703 mutex_unlock(&new_root->objectid_mutex);
706 * insert the directory item
708 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
710 btrfs_abort_transaction(trans, ret);
714 ret = btrfs_insert_dir_item(trans, root,
715 name, namelen, BTRFS_I(dir), &key,
716 BTRFS_FT_DIR, index);
718 btrfs_abort_transaction(trans, ret);
722 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
723 ret = btrfs_update_inode(trans, root, dir);
725 btrfs_abort_transaction(trans, ret);
729 ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
730 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
732 btrfs_abort_transaction(trans, ret);
736 ret = btrfs_uuid_tree_add(trans, root_item->uuid,
737 BTRFS_UUID_KEY_SUBVOL, objectid);
739 btrfs_abort_transaction(trans, ret);
743 trans->block_rsv = NULL;
744 trans->bytes_reserved = 0;
745 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
748 *async_transid = trans->transid;
749 err = btrfs_commit_transaction_async(trans, 1);
751 err = btrfs_commit_transaction(trans);
753 err = btrfs_commit_transaction(trans);
759 inode = btrfs_lookup_dentry(dir, dentry);
761 return PTR_ERR(inode);
762 d_instantiate(dentry, inode);
771 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
772 struct dentry *dentry,
773 u64 *async_transid, bool readonly,
774 struct btrfs_qgroup_inherit *inherit)
776 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
778 struct btrfs_pending_snapshot *pending_snapshot;
779 struct btrfs_trans_handle *trans;
781 bool snapshot_force_cow = false;
783 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
786 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
787 if (!pending_snapshot)
790 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
792 pending_snapshot->path = btrfs_alloc_path();
793 if (!pending_snapshot->root_item || !pending_snapshot->path) {
799 * Force new buffered writes to reserve space even when NOCOW is
800 * possible. This is to avoid later writeback (running dealloc) to
801 * fallback to COW mode and unexpectedly fail with ENOSPC.
803 atomic_inc(&root->will_be_snapshotted);
804 smp_mb__after_atomic();
805 /* wait for no snapshot writes */
806 wait_event(root->subv_writers->wait,
807 percpu_counter_sum(&root->subv_writers->counter) == 0);
809 ret = btrfs_start_delalloc_snapshot(root);
814 * All previous writes have started writeback in NOCOW mode, so now
815 * we force future writes to fallback to COW mode during snapshot
818 atomic_inc(&root->snapshot_force_cow);
819 snapshot_force_cow = true;
821 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
823 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
824 BTRFS_BLOCK_RSV_TEMP);
826 * 1 - parent dir inode
829 * 2 - root ref/backref
830 * 1 - root of snapshot
833 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
834 &pending_snapshot->block_rsv, 8,
839 pending_snapshot->dentry = dentry;
840 pending_snapshot->root = root;
841 pending_snapshot->readonly = readonly;
842 pending_snapshot->dir = dir;
843 pending_snapshot->inherit = inherit;
845 trans = btrfs_start_transaction(root, 0);
847 ret = PTR_ERR(trans);
851 spin_lock(&fs_info->trans_lock);
852 list_add(&pending_snapshot->list,
853 &trans->transaction->pending_snapshots);
854 spin_unlock(&fs_info->trans_lock);
856 *async_transid = trans->transid;
857 ret = btrfs_commit_transaction_async(trans, 1);
859 ret = btrfs_commit_transaction(trans);
861 ret = btrfs_commit_transaction(trans);
866 ret = pending_snapshot->error;
870 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
874 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
876 ret = PTR_ERR(inode);
880 d_instantiate(dentry, inode);
883 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
885 if (snapshot_force_cow)
886 atomic_dec(&root->snapshot_force_cow);
887 if (atomic_dec_and_test(&root->will_be_snapshotted))
888 wake_up_var(&root->will_be_snapshotted);
890 kfree(pending_snapshot->root_item);
891 btrfs_free_path(pending_snapshot->path);
892 kfree(pending_snapshot);
897 /* copy of may_delete in fs/namei.c()
898 * Check whether we can remove a link victim from directory dir, check
899 * whether the type of victim is right.
900 * 1. We can't do it if dir is read-only (done in permission())
901 * 2. We should have write and exec permissions on dir
902 * 3. We can't remove anything from append-only dir
903 * 4. We can't do anything with immutable dir (done in permission())
904 * 5. If the sticky bit on dir is set we should either
905 * a. be owner of dir, or
906 * b. be owner of victim, or
907 * c. have CAP_FOWNER capability
908 * 6. If the victim is append-only or immutable we can't do anything with
909 * links pointing to it.
910 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
911 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
912 * 9. We can't remove a root or mountpoint.
913 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
914 * nfs_async_unlink().
917 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
921 if (d_really_is_negative(victim))
924 BUG_ON(d_inode(victim->d_parent) != dir);
925 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
927 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
932 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
933 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
936 if (!d_is_dir(victim))
940 } else if (d_is_dir(victim))
944 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
949 /* copy of may_create in fs/namei.c() */
950 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
952 if (d_really_is_positive(child))
956 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
960 * Create a new subvolume below @parent. This is largely modeled after
961 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
962 * inside this filesystem so it's quite a bit simpler.
964 static noinline int btrfs_mksubvol(const struct path *parent,
965 const char *name, int namelen,
966 struct btrfs_root *snap_src,
967 u64 *async_transid, bool readonly,
968 struct btrfs_qgroup_inherit *inherit)
970 struct inode *dir = d_inode(parent->dentry);
971 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
972 struct dentry *dentry;
975 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
979 dentry = lookup_one_len(name, parent->dentry, namelen);
980 error = PTR_ERR(dentry);
984 error = btrfs_may_create(dir, dentry);
989 * even if this name doesn't exist, we may get hash collisions.
990 * check for them now when we can safely fail
992 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
998 down_read(&fs_info->subvol_sem);
1000 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
1004 error = create_snapshot(snap_src, dir, dentry,
1005 async_transid, readonly, inherit);
1007 error = create_subvol(dir, dentry, name, namelen,
1008 async_transid, inherit);
1011 fsnotify_mkdir(dir, dentry);
1013 up_read(&fs_info->subvol_sem);
1022 * When we're defragging a range, we don't want to kick it off again
1023 * if it is really just waiting for delalloc to send it down.
1024 * If we find a nice big extent or delalloc range for the bytes in the
1025 * file you want to defrag, we return 0 to let you know to skip this
1028 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
1030 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1031 struct extent_map *em = NULL;
1032 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1035 read_lock(&em_tree->lock);
1036 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
1037 read_unlock(&em_tree->lock);
1040 end = extent_map_end(em);
1041 free_extent_map(em);
1042 if (end - offset > thresh)
1045 /* if we already have a nice delalloc here, just stop */
1047 end = count_range_bits(io_tree, &offset, offset + thresh,
1048 thresh, EXTENT_DELALLOC, 1);
1055 * helper function to walk through a file and find extents
1056 * newer than a specific transid, and smaller than thresh.
1058 * This is used by the defragging code to find new and small
1061 static int find_new_extents(struct btrfs_root *root,
1062 struct inode *inode, u64 newer_than,
1063 u64 *off, u32 thresh)
1065 struct btrfs_path *path;
1066 struct btrfs_key min_key;
1067 struct extent_buffer *leaf;
1068 struct btrfs_file_extent_item *extent;
1071 u64 ino = btrfs_ino(BTRFS_I(inode));
1073 path = btrfs_alloc_path();
1077 min_key.objectid = ino;
1078 min_key.type = BTRFS_EXTENT_DATA_KEY;
1079 min_key.offset = *off;
1082 ret = btrfs_search_forward(root, &min_key, path, newer_than);
1086 if (min_key.objectid != ino)
1088 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
1091 leaf = path->nodes[0];
1092 extent = btrfs_item_ptr(leaf, path->slots[0],
1093 struct btrfs_file_extent_item);
1095 type = btrfs_file_extent_type(leaf, extent);
1096 if (type == BTRFS_FILE_EXTENT_REG &&
1097 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
1098 check_defrag_in_cache(inode, min_key.offset, thresh)) {
1099 *off = min_key.offset;
1100 btrfs_free_path(path);
1105 if (path->slots[0] < btrfs_header_nritems(leaf)) {
1106 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
1110 if (min_key.offset == (u64)-1)
1114 btrfs_release_path(path);
1117 btrfs_free_path(path);
1121 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1123 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1124 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1125 struct extent_map *em;
1126 u64 len = PAGE_SIZE;
1129 * hopefully we have this extent in the tree already, try without
1130 * the full extent lock
1132 read_lock(&em_tree->lock);
1133 em = lookup_extent_mapping(em_tree, start, len);
1134 read_unlock(&em_tree->lock);
1137 struct extent_state *cached = NULL;
1138 u64 end = start + len - 1;
1140 /* get the big lock and read metadata off disk */
1141 lock_extent_bits(io_tree, start, end, &cached);
1142 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1143 unlock_extent_cached(io_tree, start, end, &cached);
1152 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1154 struct extent_map *next;
1157 /* this is the last extent */
1158 if (em->start + em->len >= i_size_read(inode))
1161 next = defrag_lookup_extent(inode, em->start + em->len);
1162 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1164 else if ((em->block_start + em->block_len == next->block_start) &&
1165 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1168 free_extent_map(next);
1172 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1173 u64 *last_len, u64 *skip, u64 *defrag_end,
1176 struct extent_map *em;
1178 bool next_mergeable = true;
1179 bool prev_mergeable = true;
1182 * make sure that once we start defragging an extent, we keep on
1185 if (start < *defrag_end)
1190 em = defrag_lookup_extent(inode, start);
1194 /* this will cover holes, and inline extents */
1195 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1201 prev_mergeable = false;
1203 next_mergeable = defrag_check_next_extent(inode, em);
1205 * we hit a real extent, if it is big or the next extent is not a
1206 * real extent, don't bother defragging it
1208 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1209 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1213 * last_len ends up being a counter of how many bytes we've defragged.
1214 * every time we choose not to defrag an extent, we reset *last_len
1215 * so that the next tiny extent will force a defrag.
1217 * The end result of this is that tiny extents before a single big
1218 * extent will force at least part of that big extent to be defragged.
1221 *defrag_end = extent_map_end(em);
1224 *skip = extent_map_end(em);
1228 free_extent_map(em);
1233 * it doesn't do much good to defrag one or two pages
1234 * at a time. This pulls in a nice chunk of pages
1235 * to COW and defrag.
1237 * It also makes sure the delalloc code has enough
1238 * dirty data to avoid making new small extents as part
1241 * It's a good idea to start RA on this range
1242 * before calling this.
1244 static int cluster_pages_for_defrag(struct inode *inode,
1245 struct page **pages,
1246 unsigned long start_index,
1247 unsigned long num_pages)
1249 unsigned long file_end;
1250 u64 isize = i_size_read(inode);
1254 u64 start = (u64)start_index << PAGE_SHIFT;
1258 struct btrfs_ordered_extent *ordered;
1259 struct extent_state *cached_state = NULL;
1260 struct extent_io_tree *tree;
1261 struct extent_changeset *data_reserved = NULL;
1262 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1264 file_end = (isize - 1) >> PAGE_SHIFT;
1265 if (!isize || start_index > file_end)
1268 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1270 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1271 start, page_cnt << PAGE_SHIFT);
1275 tree = &BTRFS_I(inode)->io_tree;
1277 /* step one, lock all the pages */
1278 for (i = 0; i < page_cnt; i++) {
1281 page = find_or_create_page(inode->i_mapping,
1282 start_index + i, mask);
1286 page_start = page_offset(page);
1287 page_end = page_start + PAGE_SIZE - 1;
1289 lock_extent_bits(tree, page_start, page_end,
1291 ordered = btrfs_lookup_ordered_extent(inode,
1293 unlock_extent_cached(tree, page_start, page_end,
1299 btrfs_start_ordered_extent(inode, ordered, 1);
1300 btrfs_put_ordered_extent(ordered);
1303 * we unlocked the page above, so we need check if
1304 * it was released or not.
1306 if (page->mapping != inode->i_mapping) {
1313 if (!PageUptodate(page)) {
1314 btrfs_readpage(NULL, page);
1316 if (!PageUptodate(page)) {
1324 if (page->mapping != inode->i_mapping) {
1336 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1340 * so now we have a nice long stream of locked
1341 * and up to date pages, lets wait on them
1343 for (i = 0; i < i_done; i++)
1344 wait_on_page_writeback(pages[i]);
1346 page_start = page_offset(pages[0]);
1347 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1349 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1350 page_start, page_end - 1, &cached_state);
1351 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1352 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1353 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1356 if (i_done != page_cnt) {
1357 spin_lock(&BTRFS_I(inode)->lock);
1358 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1359 spin_unlock(&BTRFS_I(inode)->lock);
1360 btrfs_delalloc_release_space(inode, data_reserved,
1361 start, (page_cnt - i_done) << PAGE_SHIFT, true);
1365 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1368 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1369 page_start, page_end - 1, &cached_state);
1371 for (i = 0; i < i_done; i++) {
1372 clear_page_dirty_for_io(pages[i]);
1373 ClearPageChecked(pages[i]);
1374 set_page_extent_mapped(pages[i]);
1375 set_page_dirty(pages[i]);
1376 unlock_page(pages[i]);
1379 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1380 extent_changeset_free(data_reserved);
1383 for (i = 0; i < i_done; i++) {
1384 unlock_page(pages[i]);
1387 btrfs_delalloc_release_space(inode, data_reserved,
1388 start, page_cnt << PAGE_SHIFT, true);
1389 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1390 extent_changeset_free(data_reserved);
1395 int btrfs_defrag_file(struct inode *inode, struct file *file,
1396 struct btrfs_ioctl_defrag_range_args *range,
1397 u64 newer_than, unsigned long max_to_defrag)
1399 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1400 struct btrfs_root *root = BTRFS_I(inode)->root;
1401 struct file_ra_state *ra = NULL;
1402 unsigned long last_index;
1403 u64 isize = i_size_read(inode);
1407 u64 newer_off = range->start;
1409 unsigned long ra_index = 0;
1411 int defrag_count = 0;
1412 int compress_type = BTRFS_COMPRESS_ZLIB;
1413 u32 extent_thresh = range->extent_thresh;
1414 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1415 unsigned long cluster = max_cluster;
1416 u64 new_align = ~((u64)SZ_128K - 1);
1417 struct page **pages = NULL;
1418 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1423 if (range->start >= isize)
1427 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1429 if (range->compress_type)
1430 compress_type = range->compress_type;
1433 if (extent_thresh == 0)
1434 extent_thresh = SZ_256K;
1437 * If we were not given a file, allocate a readahead context. As
1438 * readahead is just an optimization, defrag will work without it so
1439 * we don't error out.
1442 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1444 file_ra_state_init(ra, inode->i_mapping);
1449 pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
1455 /* find the last page to defrag */
1456 if (range->start + range->len > range->start) {
1457 last_index = min_t(u64, isize - 1,
1458 range->start + range->len - 1) >> PAGE_SHIFT;
1460 last_index = (isize - 1) >> PAGE_SHIFT;
1464 ret = find_new_extents(root, inode, newer_than,
1465 &newer_off, SZ_64K);
1467 range->start = newer_off;
1469 * we always align our defrag to help keep
1470 * the extents in the file evenly spaced
1472 i = (newer_off & new_align) >> PAGE_SHIFT;
1476 i = range->start >> PAGE_SHIFT;
1479 max_to_defrag = last_index - i + 1;
1482 * make writeback starts from i, so the defrag range can be
1483 * written sequentially.
1485 if (i < inode->i_mapping->writeback_index)
1486 inode->i_mapping->writeback_index = i;
1488 while (i <= last_index && defrag_count < max_to_defrag &&
1489 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1491 * make sure we stop running if someone unmounts
1494 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1497 if (btrfs_defrag_cancelled(fs_info)) {
1498 btrfs_debug(fs_info, "defrag_file cancelled");
1503 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1504 extent_thresh, &last_len, &skip,
1505 &defrag_end, do_compress)){
1508 * the should_defrag function tells us how much to skip
1509 * bump our counter by the suggested amount
1511 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1512 i = max(i + 1, next);
1517 cluster = (PAGE_ALIGN(defrag_end) >>
1519 cluster = min(cluster, max_cluster);
1521 cluster = max_cluster;
1524 if (i + cluster > ra_index) {
1525 ra_index = max(i, ra_index);
1527 page_cache_sync_readahead(inode->i_mapping, ra,
1528 file, ra_index, cluster);
1529 ra_index += cluster;
1534 BTRFS_I(inode)->defrag_compress = compress_type;
1535 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1537 inode_unlock(inode);
1541 defrag_count += ret;
1542 balance_dirty_pages_ratelimited(inode->i_mapping);
1543 inode_unlock(inode);
1546 if (newer_off == (u64)-1)
1552 newer_off = max(newer_off + 1,
1553 (u64)i << PAGE_SHIFT);
1555 ret = find_new_extents(root, inode, newer_than,
1556 &newer_off, SZ_64K);
1558 range->start = newer_off;
1559 i = (newer_off & new_align) >> PAGE_SHIFT;
1566 last_len += ret << PAGE_SHIFT;
1574 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1575 filemap_flush(inode->i_mapping);
1576 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1577 &BTRFS_I(inode)->runtime_flags))
1578 filemap_flush(inode->i_mapping);
1581 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1582 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1583 } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
1584 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1592 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1593 inode_unlock(inode);
1601 static noinline int btrfs_ioctl_resize(struct file *file,
1604 struct inode *inode = file_inode(file);
1605 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1609 struct btrfs_root *root = BTRFS_I(inode)->root;
1610 struct btrfs_ioctl_vol_args *vol_args;
1611 struct btrfs_trans_handle *trans;
1612 struct btrfs_device *device = NULL;
1615 char *devstr = NULL;
1619 if (!capable(CAP_SYS_ADMIN))
1622 ret = mnt_want_write_file(file);
1626 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1627 mnt_drop_write_file(file);
1628 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1631 vol_args = memdup_user(arg, sizeof(*vol_args));
1632 if (IS_ERR(vol_args)) {
1633 ret = PTR_ERR(vol_args);
1637 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1639 sizestr = vol_args->name;
1640 devstr = strchr(sizestr, ':');
1642 sizestr = devstr + 1;
1644 devstr = vol_args->name;
1645 ret = kstrtoull(devstr, 10, &devid);
1652 btrfs_info(fs_info, "resizing devid %llu", devid);
1655 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
1657 btrfs_info(fs_info, "resizer unable to find device %llu",
1663 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1665 "resizer unable to apply on readonly device %llu",
1671 if (!strcmp(sizestr, "max"))
1672 new_size = device->bdev->bd_inode->i_size;
1674 if (sizestr[0] == '-') {
1677 } else if (sizestr[0] == '+') {
1681 new_size = memparse(sizestr, &retptr);
1682 if (*retptr != '\0' || new_size == 0) {
1688 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1693 old_size = btrfs_device_get_total_bytes(device);
1696 if (new_size > old_size) {
1700 new_size = old_size - new_size;
1701 } else if (mod > 0) {
1702 if (new_size > ULLONG_MAX - old_size) {
1706 new_size = old_size + new_size;
1709 if (new_size < SZ_256M) {
1713 if (new_size > device->bdev->bd_inode->i_size) {
1718 new_size = round_down(new_size, fs_info->sectorsize);
1720 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1721 rcu_str_deref(device->name), new_size);
1723 if (new_size > old_size) {
1724 trans = btrfs_start_transaction(root, 0);
1725 if (IS_ERR(trans)) {
1726 ret = PTR_ERR(trans);
1729 ret = btrfs_grow_device(trans, device, new_size);
1730 btrfs_commit_transaction(trans);
1731 } else if (new_size < old_size) {
1732 ret = btrfs_shrink_device(device, new_size);
1733 } /* equal, nothing need to do */
1738 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1739 mnt_drop_write_file(file);
1743 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1744 const char *name, unsigned long fd, int subvol,
1745 u64 *transid, bool readonly,
1746 struct btrfs_qgroup_inherit *inherit)
1751 if (!S_ISDIR(file_inode(file)->i_mode))
1754 ret = mnt_want_write_file(file);
1758 namelen = strlen(name);
1759 if (strchr(name, '/')) {
1761 goto out_drop_write;
1764 if (name[0] == '.' &&
1765 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1767 goto out_drop_write;
1771 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1772 NULL, transid, readonly, inherit);
1774 struct fd src = fdget(fd);
1775 struct inode *src_inode;
1778 goto out_drop_write;
1781 src_inode = file_inode(src.file);
1782 if (src_inode->i_sb != file_inode(file)->i_sb) {
1783 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1784 "Snapshot src from another FS");
1786 } else if (!inode_owner_or_capable(src_inode)) {
1788 * Subvolume creation is not restricted, but snapshots
1789 * are limited to own subvolumes only
1793 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1794 BTRFS_I(src_inode)->root,
1795 transid, readonly, inherit);
1800 mnt_drop_write_file(file);
1805 static noinline int btrfs_ioctl_snap_create(struct file *file,
1806 void __user *arg, int subvol)
1808 struct btrfs_ioctl_vol_args *vol_args;
1811 if (!S_ISDIR(file_inode(file)->i_mode))
1814 vol_args = memdup_user(arg, sizeof(*vol_args));
1815 if (IS_ERR(vol_args))
1816 return PTR_ERR(vol_args);
1817 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1819 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1820 vol_args->fd, subvol,
1827 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1828 void __user *arg, int subvol)
1830 struct btrfs_ioctl_vol_args_v2 *vol_args;
1834 bool readonly = false;
1835 struct btrfs_qgroup_inherit *inherit = NULL;
1837 if (!S_ISDIR(file_inode(file)->i_mode))
1840 vol_args = memdup_user(arg, sizeof(*vol_args));
1841 if (IS_ERR(vol_args))
1842 return PTR_ERR(vol_args);
1843 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1845 if (vol_args->flags &
1846 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1847 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1852 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1854 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1856 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1859 if (vol_args->size < sizeof(*inherit) ||
1860 vol_args->size > PAGE_SIZE) {
1864 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1865 if (IS_ERR(inherit)) {
1866 ret = PTR_ERR(inherit);
1870 if (inherit->num_qgroups > PAGE_SIZE ||
1871 inherit->num_ref_copies > PAGE_SIZE ||
1872 inherit->num_excl_copies > PAGE_SIZE) {
1877 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1878 2 * inherit->num_excl_copies;
1879 if (vol_args->size != struct_size(inherit, qgroups, nums)) {
1885 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1886 vol_args->fd, subvol, ptr,
1891 if (ptr && copy_to_user(arg +
1892 offsetof(struct btrfs_ioctl_vol_args_v2,
1904 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1907 struct inode *inode = file_inode(file);
1908 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1909 struct btrfs_root *root = BTRFS_I(inode)->root;
1913 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1916 down_read(&fs_info->subvol_sem);
1917 if (btrfs_root_readonly(root))
1918 flags |= BTRFS_SUBVOL_RDONLY;
1919 up_read(&fs_info->subvol_sem);
1921 if (copy_to_user(arg, &flags, sizeof(flags)))
1927 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1930 struct inode *inode = file_inode(file);
1931 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1932 struct btrfs_root *root = BTRFS_I(inode)->root;
1933 struct btrfs_trans_handle *trans;
1938 if (!inode_owner_or_capable(inode))
1941 ret = mnt_want_write_file(file);
1945 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1947 goto out_drop_write;
1950 if (copy_from_user(&flags, arg, sizeof(flags))) {
1952 goto out_drop_write;
1955 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1957 goto out_drop_write;
1960 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1962 goto out_drop_write;
1965 down_write(&fs_info->subvol_sem);
1968 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1971 root_flags = btrfs_root_flags(&root->root_item);
1972 if (flags & BTRFS_SUBVOL_RDONLY) {
1973 btrfs_set_root_flags(&root->root_item,
1974 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1977 * Block RO -> RW transition if this subvolume is involved in
1980 spin_lock(&root->root_item_lock);
1981 if (root->send_in_progress == 0) {
1982 btrfs_set_root_flags(&root->root_item,
1983 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1984 spin_unlock(&root->root_item_lock);
1986 spin_unlock(&root->root_item_lock);
1988 "Attempt to set subvolume %llu read-write during send",
1989 root->root_key.objectid);
1995 trans = btrfs_start_transaction(root, 1);
1996 if (IS_ERR(trans)) {
1997 ret = PTR_ERR(trans);
2001 ret = btrfs_update_root(trans, fs_info->tree_root,
2002 &root->root_key, &root->root_item);
2004 btrfs_end_transaction(trans);
2008 ret = btrfs_commit_transaction(trans);
2012 btrfs_set_root_flags(&root->root_item, root_flags);
2014 up_write(&fs_info->subvol_sem);
2016 mnt_drop_write_file(file);
2021 static noinline int key_in_sk(struct btrfs_key *key,
2022 struct btrfs_ioctl_search_key *sk)
2024 struct btrfs_key test;
2027 test.objectid = sk->min_objectid;
2028 test.type = sk->min_type;
2029 test.offset = sk->min_offset;
2031 ret = btrfs_comp_cpu_keys(key, &test);
2035 test.objectid = sk->max_objectid;
2036 test.type = sk->max_type;
2037 test.offset = sk->max_offset;
2039 ret = btrfs_comp_cpu_keys(key, &test);
2045 static noinline int copy_to_sk(struct btrfs_path *path,
2046 struct btrfs_key *key,
2047 struct btrfs_ioctl_search_key *sk,
2050 unsigned long *sk_offset,
2054 struct extent_buffer *leaf;
2055 struct btrfs_ioctl_search_header sh;
2056 struct btrfs_key test;
2057 unsigned long item_off;
2058 unsigned long item_len;
2064 leaf = path->nodes[0];
2065 slot = path->slots[0];
2066 nritems = btrfs_header_nritems(leaf);
2068 if (btrfs_header_generation(leaf) > sk->max_transid) {
2072 found_transid = btrfs_header_generation(leaf);
2074 for (i = slot; i < nritems; i++) {
2075 item_off = btrfs_item_ptr_offset(leaf, i);
2076 item_len = btrfs_item_size_nr(leaf, i);
2078 btrfs_item_key_to_cpu(leaf, key, i);
2079 if (!key_in_sk(key, sk))
2082 if (sizeof(sh) + item_len > *buf_size) {
2089 * return one empty item back for v1, which does not
2093 *buf_size = sizeof(sh) + item_len;
2098 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2103 sh.objectid = key->objectid;
2104 sh.offset = key->offset;
2105 sh.type = key->type;
2107 sh.transid = found_transid;
2110 * Copy search result header. If we fault then loop again so we
2111 * can fault in the pages and -EFAULT there if there's a
2112 * problem. Otherwise we'll fault and then copy the buffer in
2113 * properly this next time through
2115 if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
2120 *sk_offset += sizeof(sh);
2123 char __user *up = ubuf + *sk_offset;
2125 * Copy the item, same behavior as above, but reset the
2126 * * sk_offset so we copy the full thing again.
2128 if (read_extent_buffer_to_user_nofault(leaf, up,
2129 item_off, item_len)) {
2131 *sk_offset -= sizeof(sh);
2135 *sk_offset += item_len;
2139 if (ret) /* -EOVERFLOW from above */
2142 if (*num_found >= sk->nr_items) {
2149 test.objectid = sk->max_objectid;
2150 test.type = sk->max_type;
2151 test.offset = sk->max_offset;
2152 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2154 else if (key->offset < (u64)-1)
2156 else if (key->type < (u8)-1) {
2159 } else if (key->objectid < (u64)-1) {
2167 * 0: all items from this leaf copied, continue with next
2168 * 1: * more items can be copied, but unused buffer is too small
2169 * * all items were found
2170 * Either way, it will stops the loop which iterates to the next
2172 * -EOVERFLOW: item was to large for buffer
2173 * -EFAULT: could not copy extent buffer back to userspace
2178 static noinline int search_ioctl(struct inode *inode,
2179 struct btrfs_ioctl_search_key *sk,
2183 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2184 struct btrfs_root *root;
2185 struct btrfs_key key;
2186 struct btrfs_path *path;
2189 unsigned long sk_offset = 0;
2191 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2192 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2196 path = btrfs_alloc_path();
2200 if (sk->tree_id == 0) {
2201 /* search the root of the inode that was passed */
2202 root = BTRFS_I(inode)->root;
2204 key.objectid = sk->tree_id;
2205 key.type = BTRFS_ROOT_ITEM_KEY;
2206 key.offset = (u64)-1;
2207 root = btrfs_read_fs_root_no_name(info, &key);
2209 btrfs_free_path(path);
2210 return PTR_ERR(root);
2214 key.objectid = sk->min_objectid;
2215 key.type = sk->min_type;
2216 key.offset = sk->min_offset;
2219 ret = fault_in_pages_writeable(ubuf + sk_offset,
2220 *buf_size - sk_offset);
2224 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2230 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2231 &sk_offset, &num_found);
2232 btrfs_release_path(path);
2240 sk->nr_items = num_found;
2241 btrfs_free_path(path);
2245 static noinline int btrfs_ioctl_tree_search(struct file *file,
2248 struct btrfs_ioctl_search_args __user *uargs;
2249 struct btrfs_ioctl_search_key sk;
2250 struct inode *inode;
2254 if (!capable(CAP_SYS_ADMIN))
2257 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2259 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2262 buf_size = sizeof(uargs->buf);
2264 inode = file_inode(file);
2265 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2268 * In the origin implementation an overflow is handled by returning a
2269 * search header with a len of zero, so reset ret.
2271 if (ret == -EOVERFLOW)
2274 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2279 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2282 struct btrfs_ioctl_search_args_v2 __user *uarg;
2283 struct btrfs_ioctl_search_args_v2 args;
2284 struct inode *inode;
2287 const size_t buf_limit = SZ_16M;
2289 if (!capable(CAP_SYS_ADMIN))
2292 /* copy search header and buffer size */
2293 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2294 if (copy_from_user(&args, uarg, sizeof(args)))
2297 buf_size = args.buf_size;
2299 /* limit result size to 16MB */
2300 if (buf_size > buf_limit)
2301 buf_size = buf_limit;
2303 inode = file_inode(file);
2304 ret = search_ioctl(inode, &args.key, &buf_size,
2305 (char __user *)(&uarg->buf[0]));
2306 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2308 else if (ret == -EOVERFLOW &&
2309 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2316 * Search INODE_REFs to identify path name of 'dirid' directory
2317 * in a 'tree_id' tree. and sets path name to 'name'.
2319 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2320 u64 tree_id, u64 dirid, char *name)
2322 struct btrfs_root *root;
2323 struct btrfs_key key;
2329 struct btrfs_inode_ref *iref;
2330 struct extent_buffer *l;
2331 struct btrfs_path *path;
2333 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2338 path = btrfs_alloc_path();
2342 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2344 key.objectid = tree_id;
2345 key.type = BTRFS_ROOT_ITEM_KEY;
2346 key.offset = (u64)-1;
2347 root = btrfs_read_fs_root_no_name(info, &key);
2349 ret = PTR_ERR(root);
2353 key.objectid = dirid;
2354 key.type = BTRFS_INODE_REF_KEY;
2355 key.offset = (u64)-1;
2358 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2362 ret = btrfs_previous_item(root, path, dirid,
2363 BTRFS_INODE_REF_KEY);
2373 slot = path->slots[0];
2374 btrfs_item_key_to_cpu(l, &key, slot);
2376 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2377 len = btrfs_inode_ref_name_len(l, iref);
2379 total_len += len + 1;
2381 ret = -ENAMETOOLONG;
2386 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2388 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2391 btrfs_release_path(path);
2392 key.objectid = key.offset;
2393 key.offset = (u64)-1;
2394 dirid = key.objectid;
2396 memmove(name, ptr, total_len);
2397 name[total_len] = '\0';
2400 btrfs_free_path(path);
2404 static int btrfs_search_path_in_tree_user(struct inode *inode,
2405 struct btrfs_ioctl_ino_lookup_user_args *args)
2407 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2408 struct super_block *sb = inode->i_sb;
2409 struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2410 u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2411 u64 dirid = args->dirid;
2412 unsigned long item_off;
2413 unsigned long item_len;
2414 struct btrfs_inode_ref *iref;
2415 struct btrfs_root_ref *rref;
2416 struct btrfs_root *root;
2417 struct btrfs_path *path;
2418 struct btrfs_key key, key2;
2419 struct extent_buffer *leaf;
2420 struct inode *temp_inode;
2427 path = btrfs_alloc_path();
2432 * If the bottom subvolume does not exist directly under upper_limit,
2433 * construct the path in from the bottom up.
2435 if (dirid != upper_limit.objectid) {
2436 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2438 key.objectid = treeid;
2439 key.type = BTRFS_ROOT_ITEM_KEY;
2440 key.offset = (u64)-1;
2441 root = btrfs_read_fs_root_no_name(fs_info, &key);
2443 ret = PTR_ERR(root);
2447 key.objectid = dirid;
2448 key.type = BTRFS_INODE_REF_KEY;
2449 key.offset = (u64)-1;
2451 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2454 } else if (ret > 0) {
2455 ret = btrfs_previous_item(root, path, dirid,
2456 BTRFS_INODE_REF_KEY);
2459 } else if (ret > 0) {
2465 leaf = path->nodes[0];
2466 slot = path->slots[0];
2467 btrfs_item_key_to_cpu(leaf, &key, slot);
2469 iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2470 len = btrfs_inode_ref_name_len(leaf, iref);
2472 total_len += len + 1;
2473 if (ptr < args->path) {
2474 ret = -ENAMETOOLONG;
2479 read_extent_buffer(leaf, ptr,
2480 (unsigned long)(iref + 1), len);
2482 /* Check the read+exec permission of this directory */
2483 ret = btrfs_previous_item(root, path, dirid,
2484 BTRFS_INODE_ITEM_KEY);
2487 } else if (ret > 0) {
2492 leaf = path->nodes[0];
2493 slot = path->slots[0];
2494 btrfs_item_key_to_cpu(leaf, &key2, slot);
2495 if (key2.objectid != dirid) {
2500 temp_inode = btrfs_iget(sb, &key2, root, NULL);
2501 if (IS_ERR(temp_inode)) {
2502 ret = PTR_ERR(temp_inode);
2505 ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
2512 if (key.offset == upper_limit.objectid)
2514 if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2519 btrfs_release_path(path);
2520 key.objectid = key.offset;
2521 key.offset = (u64)-1;
2522 dirid = key.objectid;
2525 memmove(args->path, ptr, total_len);
2526 args->path[total_len] = '\0';
2527 btrfs_release_path(path);
2530 /* Get the bottom subvolume's name from ROOT_REF */
2531 root = fs_info->tree_root;
2532 key.objectid = treeid;
2533 key.type = BTRFS_ROOT_REF_KEY;
2534 key.offset = args->treeid;
2535 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2538 } else if (ret > 0) {
2543 leaf = path->nodes[0];
2544 slot = path->slots[0];
2545 btrfs_item_key_to_cpu(leaf, &key, slot);
2547 item_off = btrfs_item_ptr_offset(leaf, slot);
2548 item_len = btrfs_item_size_nr(leaf, slot);
2549 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2550 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2551 if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2556 /* Copy subvolume's name */
2557 item_off += sizeof(struct btrfs_root_ref);
2558 item_len -= sizeof(struct btrfs_root_ref);
2559 read_extent_buffer(leaf, args->name, item_off, item_len);
2560 args->name[item_len] = 0;
2563 btrfs_free_path(path);
2567 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2570 struct btrfs_ioctl_ino_lookup_args *args;
2571 struct inode *inode;
2574 args = memdup_user(argp, sizeof(*args));
2576 return PTR_ERR(args);
2578 inode = file_inode(file);
2581 * Unprivileged query to obtain the containing subvolume root id. The
2582 * path is reset so it's consistent with btrfs_search_path_in_tree.
2584 if (args->treeid == 0)
2585 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2587 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2592 if (!capable(CAP_SYS_ADMIN)) {
2597 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2598 args->treeid, args->objectid,
2602 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2610 * Version of ino_lookup ioctl (unprivileged)
2612 * The main differences from ino_lookup ioctl are:
2614 * 1. Read + Exec permission will be checked using inode_permission() during
2615 * path construction. -EACCES will be returned in case of failure.
2616 * 2. Path construction will be stopped at the inode number which corresponds
2617 * to the fd with which this ioctl is called. If constructed path does not
2618 * exist under fd's inode, -EACCES will be returned.
2619 * 3. The name of bottom subvolume is also searched and filled.
2621 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2623 struct btrfs_ioctl_ino_lookup_user_args *args;
2624 struct inode *inode;
2627 args = memdup_user(argp, sizeof(*args));
2629 return PTR_ERR(args);
2631 inode = file_inode(file);
2633 if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2634 BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2636 * The subvolume does not exist under fd with which this is
2643 ret = btrfs_search_path_in_tree_user(inode, args);
2645 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2652 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
2653 static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
2655 struct btrfs_ioctl_get_subvol_info_args *subvol_info;
2656 struct btrfs_fs_info *fs_info;
2657 struct btrfs_root *root;
2658 struct btrfs_path *path;
2659 struct btrfs_key key;
2660 struct btrfs_root_item *root_item;
2661 struct btrfs_root_ref *rref;
2662 struct extent_buffer *leaf;
2663 unsigned long item_off;
2664 unsigned long item_len;
2665 struct inode *inode;
2669 path = btrfs_alloc_path();
2673 subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
2675 btrfs_free_path(path);
2679 inode = file_inode(file);
2680 fs_info = BTRFS_I(inode)->root->fs_info;
2682 /* Get root_item of inode's subvolume */
2683 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
2684 key.type = BTRFS_ROOT_ITEM_KEY;
2685 key.offset = (u64)-1;
2686 root = btrfs_read_fs_root_no_name(fs_info, &key);
2688 ret = PTR_ERR(root);
2691 root_item = &root->root_item;
2693 subvol_info->treeid = key.objectid;
2695 subvol_info->generation = btrfs_root_generation(root_item);
2696 subvol_info->flags = btrfs_root_flags(root_item);
2698 memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
2699 memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
2701 memcpy(subvol_info->received_uuid, root_item->received_uuid,
2704 subvol_info->ctransid = btrfs_root_ctransid(root_item);
2705 subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
2706 subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
2708 subvol_info->otransid = btrfs_root_otransid(root_item);
2709 subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
2710 subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
2712 subvol_info->stransid = btrfs_root_stransid(root_item);
2713 subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
2714 subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
2716 subvol_info->rtransid = btrfs_root_rtransid(root_item);
2717 subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
2718 subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
2720 if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
2721 /* Search root tree for ROOT_BACKREF of this subvolume */
2722 root = fs_info->tree_root;
2724 key.type = BTRFS_ROOT_BACKREF_KEY;
2726 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2729 } else if (path->slots[0] >=
2730 btrfs_header_nritems(path->nodes[0])) {
2731 ret = btrfs_next_leaf(root, path);
2734 } else if (ret > 0) {
2740 leaf = path->nodes[0];
2741 slot = path->slots[0];
2742 btrfs_item_key_to_cpu(leaf, &key, slot);
2743 if (key.objectid == subvol_info->treeid &&
2744 key.type == BTRFS_ROOT_BACKREF_KEY) {
2745 subvol_info->parent_id = key.offset;
2747 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2748 subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
2750 item_off = btrfs_item_ptr_offset(leaf, slot)
2751 + sizeof(struct btrfs_root_ref);
2752 item_len = btrfs_item_size_nr(leaf, slot)
2753 - sizeof(struct btrfs_root_ref);
2754 read_extent_buffer(leaf, subvol_info->name,
2755 item_off, item_len);
2762 btrfs_free_path(path);
2764 if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
2768 btrfs_free_path(path);
2769 kzfree(subvol_info);
2774 * Return ROOT_REF information of the subvolume containing this inode
2775 * except the subvolume name.
2777 static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
2779 struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
2780 struct btrfs_root_ref *rref;
2781 struct btrfs_root *root;
2782 struct btrfs_path *path;
2783 struct btrfs_key key;
2784 struct extent_buffer *leaf;
2785 struct inode *inode;
2791 path = btrfs_alloc_path();
2795 rootrefs = memdup_user(argp, sizeof(*rootrefs));
2796 if (IS_ERR(rootrefs)) {
2797 btrfs_free_path(path);
2798 return PTR_ERR(rootrefs);
2801 inode = file_inode(file);
2802 root = BTRFS_I(inode)->root->fs_info->tree_root;
2803 objectid = BTRFS_I(inode)->root->root_key.objectid;
2805 key.objectid = objectid;
2806 key.type = BTRFS_ROOT_REF_KEY;
2807 key.offset = rootrefs->min_treeid;
2810 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2813 } else if (path->slots[0] >=
2814 btrfs_header_nritems(path->nodes[0])) {
2815 ret = btrfs_next_leaf(root, path);
2818 } else if (ret > 0) {
2824 leaf = path->nodes[0];
2825 slot = path->slots[0];
2827 btrfs_item_key_to_cpu(leaf, &key, slot);
2828 if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
2833 if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
2838 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2839 rootrefs->rootref[found].treeid = key.offset;
2840 rootrefs->rootref[found].dirid =
2841 btrfs_root_ref_dirid(leaf, rref);
2844 ret = btrfs_next_item(root, path);
2847 } else if (ret > 0) {
2854 btrfs_free_path(path);
2856 if (!ret || ret == -EOVERFLOW) {
2857 rootrefs->num_items = found;
2858 /* update min_treeid for next search */
2860 rootrefs->min_treeid =
2861 rootrefs->rootref[found - 1].treeid + 1;
2862 if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
2871 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2874 struct dentry *parent = file->f_path.dentry;
2875 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2876 struct dentry *dentry;
2877 struct inode *dir = d_inode(parent);
2878 struct inode *inode;
2879 struct btrfs_root *root = BTRFS_I(dir)->root;
2880 struct btrfs_root *dest = NULL;
2881 struct btrfs_ioctl_vol_args *vol_args;
2885 if (!S_ISDIR(dir->i_mode))
2888 vol_args = memdup_user(arg, sizeof(*vol_args));
2889 if (IS_ERR(vol_args))
2890 return PTR_ERR(vol_args);
2892 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2893 namelen = strlen(vol_args->name);
2894 if (strchr(vol_args->name, '/') ||
2895 strncmp(vol_args->name, "..", namelen) == 0) {
2900 err = mnt_want_write_file(file);
2905 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2907 goto out_drop_write;
2908 dentry = lookup_one_len(vol_args->name, parent, namelen);
2909 if (IS_ERR(dentry)) {
2910 err = PTR_ERR(dentry);
2911 goto out_unlock_dir;
2914 if (d_really_is_negative(dentry)) {
2919 inode = d_inode(dentry);
2920 dest = BTRFS_I(inode)->root;
2921 if (!capable(CAP_SYS_ADMIN)) {
2923 * Regular user. Only allow this with a special mount
2924 * option, when the user has write+exec access to the
2925 * subvol root, and when rmdir(2) would have been
2928 * Note that this is _not_ check that the subvol is
2929 * empty or doesn't contain data that we wouldn't
2930 * otherwise be able to delete.
2932 * Users who want to delete empty subvols should try
2936 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2940 * Do not allow deletion if the parent dir is the same
2941 * as the dir to be deleted. That means the ioctl
2942 * must be called on the dentry referencing the root
2943 * of the subvol, not a random directory contained
2950 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2955 /* check if subvolume may be deleted by a user */
2956 err = btrfs_may_delete(dir, dentry, 1);
2960 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2966 err = btrfs_delete_subvolume(dir, dentry);
2967 inode_unlock(inode);
2976 mnt_drop_write_file(file);
2982 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2984 struct inode *inode = file_inode(file);
2985 struct btrfs_root *root = BTRFS_I(inode)->root;
2986 struct btrfs_ioctl_defrag_range_args *range;
2989 ret = mnt_want_write_file(file);
2993 if (btrfs_root_readonly(root)) {
2998 switch (inode->i_mode & S_IFMT) {
3000 if (!capable(CAP_SYS_ADMIN)) {
3004 ret = btrfs_defrag_root(root);
3008 * Note that this does not check the file descriptor for write
3009 * access. This prevents defragmenting executables that are
3010 * running and allows defrag on files open in read-only mode.
3012 if (!capable(CAP_SYS_ADMIN) &&
3013 inode_permission(inode, MAY_WRITE)) {
3018 range = kzalloc(sizeof(*range), GFP_KERNEL);
3025 if (copy_from_user(range, argp,
3031 /* compression requires us to start the IO */
3032 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
3033 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
3034 range->extent_thresh = (u32)-1;
3037 /* the rest are all set to zero by kzalloc */
3038 range->len = (u64)-1;
3040 ret = btrfs_defrag_file(file_inode(file), file,
3041 range, BTRFS_OLDEST_GENERATION, 0);
3050 mnt_drop_write_file(file);
3054 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
3056 struct btrfs_ioctl_vol_args *vol_args;
3059 if (!capable(CAP_SYS_ADMIN))
3062 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
3063 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3065 vol_args = memdup_user(arg, sizeof(*vol_args));
3066 if (IS_ERR(vol_args)) {
3067 ret = PTR_ERR(vol_args);
3071 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3072 ret = btrfs_init_new_device(fs_info, vol_args->name);
3075 btrfs_info(fs_info, "disk added %s", vol_args->name);
3079 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3083 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3085 struct inode *inode = file_inode(file);
3086 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3087 struct btrfs_ioctl_vol_args_v2 *vol_args;
3090 if (!capable(CAP_SYS_ADMIN))
3093 ret = mnt_want_write_file(file);
3097 vol_args = memdup_user(arg, sizeof(*vol_args));
3098 if (IS_ERR(vol_args)) {
3099 ret = PTR_ERR(vol_args);
3103 /* Check for compatibility reject unknown flags */
3104 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
3109 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3110 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3114 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3115 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
3117 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3118 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3120 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3123 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3124 btrfs_info(fs_info, "device deleted: id %llu",
3127 btrfs_info(fs_info, "device deleted: %s",
3133 mnt_drop_write_file(file);
3137 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3139 struct inode *inode = file_inode(file);
3140 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3141 struct btrfs_ioctl_vol_args *vol_args;
3144 if (!capable(CAP_SYS_ADMIN))
3147 ret = mnt_want_write_file(file);
3151 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3152 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3153 goto out_drop_write;
3156 vol_args = memdup_user(arg, sizeof(*vol_args));
3157 if (IS_ERR(vol_args)) {
3158 ret = PTR_ERR(vol_args);
3162 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3163 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3166 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3169 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3171 mnt_drop_write_file(file);
3176 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3179 struct btrfs_ioctl_fs_info_args *fi_args;
3180 struct btrfs_device *device;
3181 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3184 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
3189 fi_args->num_devices = fs_devices->num_devices;
3191 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3192 if (device->devid > fi_args->max_id)
3193 fi_args->max_id = device->devid;
3197 memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
3198 fi_args->nodesize = fs_info->nodesize;
3199 fi_args->sectorsize = fs_info->sectorsize;
3200 fi_args->clone_alignment = fs_info->sectorsize;
3202 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3209 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3212 struct btrfs_ioctl_dev_info_args *di_args;
3213 struct btrfs_device *dev;
3215 char *s_uuid = NULL;
3217 di_args = memdup_user(arg, sizeof(*di_args));
3218 if (IS_ERR(di_args))
3219 return PTR_ERR(di_args);
3221 if (!btrfs_is_empty_uuid(di_args->uuid))
3222 s_uuid = di_args->uuid;
3225 dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
3233 di_args->devid = dev->devid;
3234 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3235 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3236 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3238 strncpy(di_args->path, rcu_str_deref(dev->name),
3239 sizeof(di_args->path) - 1);
3240 di_args->path[sizeof(di_args->path) - 1] = 0;
3242 di_args->path[0] = '\0';
3247 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3254 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
3258 page = grab_cache_page(inode->i_mapping, index);
3260 return ERR_PTR(-ENOMEM);
3262 if (!PageUptodate(page)) {
3265 ret = btrfs_readpage(NULL, page);
3267 return ERR_PTR(ret);
3269 if (!PageUptodate(page)) {
3272 return ERR_PTR(-EIO);
3274 if (page->mapping != inode->i_mapping) {
3277 return ERR_PTR(-EAGAIN);
3284 static int gather_extent_pages(struct inode *inode, struct page **pages,
3285 int num_pages, u64 off)
3288 pgoff_t index = off >> PAGE_SHIFT;
3290 for (i = 0; i < num_pages; i++) {
3292 pages[i] = extent_same_get_page(inode, index + i);
3293 if (IS_ERR(pages[i])) {
3294 int err = PTR_ERR(pages[i]);
3305 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
3306 bool retry_range_locking)
3309 * Do any pending delalloc/csum calculations on inode, one way or
3310 * another, and lock file content.
3311 * The locking order is:
3314 * 2) range in the inode's io tree
3317 struct btrfs_ordered_extent *ordered;
3318 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3319 ordered = btrfs_lookup_first_ordered_extent(inode,
3322 ordered->file_offset + ordered->len <= off ||
3323 ordered->file_offset >= off + len) &&
3324 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
3325 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
3327 btrfs_put_ordered_extent(ordered);
3330 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3332 btrfs_put_ordered_extent(ordered);
3333 if (!retry_range_locking)
3335 btrfs_wait_ordered_range(inode, off, len);
3340 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
3342 inode_unlock(inode1);
3343 inode_unlock(inode2);
3346 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3348 if (inode1 < inode2)
3349 swap(inode1, inode2);
3351 inode_lock_nested(inode1, I_MUTEX_PARENT);
3352 inode_lock_nested(inode2, I_MUTEX_CHILD);
3355 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3356 struct inode *inode2, u64 loff2, u64 len)
3358 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3359 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3362 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3363 struct inode *inode2, u64 loff2, u64 len,
3364 bool retry_range_locking)
3368 if (inode1 < inode2) {
3369 swap(inode1, inode2);
3372 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
3375 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
3377 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
3384 struct page **src_pages;
3385 struct page **dst_pages;
3388 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
3393 for (i = 0; i < cmp->num_pages; i++) {
3394 pg = cmp->src_pages[i];
3398 cmp->src_pages[i] = NULL;
3400 pg = cmp->dst_pages[i];
3404 cmp->dst_pages[i] = NULL;
3409 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3410 struct inode *dst, u64 dst_loff,
3411 u64 len, struct cmp_pages *cmp)
3414 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3416 cmp->num_pages = num_pages;
3418 ret = gather_extent_pages(src, cmp->src_pages, num_pages, loff);
3422 ret = gather_extent_pages(dst, cmp->dst_pages, num_pages, dst_loff);
3426 btrfs_cmp_data_free(cmp);
3430 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
3434 struct page *src_page, *dst_page;
3435 unsigned int cmp_len = PAGE_SIZE;
3436 void *addr, *dst_addr;
3440 if (len < PAGE_SIZE)
3443 BUG_ON(i >= cmp->num_pages);
3445 src_page = cmp->src_pages[i];
3446 dst_page = cmp->dst_pages[i];
3447 ASSERT(PageLocked(src_page));
3448 ASSERT(PageLocked(dst_page));
3450 addr = kmap_atomic(src_page);
3451 dst_addr = kmap_atomic(dst_page);
3453 flush_dcache_page(src_page);
3454 flush_dcache_page(dst_page);
3456 if (memcmp(addr, dst_addr, cmp_len))
3459 kunmap_atomic(addr);
3460 kunmap_atomic(dst_addr);
3472 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3476 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3478 if (off + olen > inode->i_size || off + olen < off)
3481 /* if we extend to eof, continue to block boundary */
3482 if (off + len == inode->i_size)
3483 *plen = len = ALIGN(inode->i_size, bs) - off;
3485 /* Check that we are block aligned - btrfs_clone() requires this */
3486 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3492 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3493 struct inode *dst, u64 dst_loff,
3494 struct cmp_pages *cmp)
3498 bool same_inode = (src == dst);
3499 u64 same_lock_start = 0;
3500 u64 same_lock_len = 0;
3502 ret = extent_same_check_offsets(src, loff, &len, olen);
3506 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3512 * Single inode case wants the same checks, except we
3513 * don't want our length pushed out past i_size as
3514 * comparing that data range makes no sense.
3516 * extent_same_check_offsets() will do this for an
3517 * unaligned length at i_size, so catch it here and
3518 * reject the request.
3520 * This effectively means we require aligned extents
3521 * for the single-inode case, whereas the other cases
3522 * allow an unaligned length so long as it ends at
3528 /* Check for overlapping ranges */
3529 if (dst_loff + len > loff && dst_loff < loff + len)
3532 same_lock_start = min_t(u64, loff, dst_loff);
3533 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3536 * If the source and destination inodes are different, the
3537 * source's range end offset matches the source's i_size, that
3538 * i_size is not a multiple of the sector size, and the
3539 * destination range does not go past the destination's i_size,
3540 * we must round down the length to the nearest sector size
3541 * multiple. If we don't do this adjustment we end replacing
3542 * with zeroes the bytes in the range that starts at the
3543 * deduplication range's end offset and ends at the next sector
3546 if (loff + olen == i_size_read(src) &&
3547 dst_loff + len < i_size_read(dst)) {
3548 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3550 len = round_down(i_size_read(src), sz) - loff;
3558 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, cmp);
3563 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3566 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3569 * If one of the inodes has dirty pages in the respective range or
3570 * ordered extents, we need to flush dellaloc and wait for all ordered
3571 * extents in the range. We must unlock the pages and the ranges in the
3572 * io trees to avoid deadlocks when flushing delalloc (requires locking
3573 * pages) and when waiting for ordered extents to complete (they require
3576 if (ret == -EAGAIN) {
3578 * Ranges in the io trees already unlocked. Now unlock all
3579 * pages before waiting for all IO to complete.
3581 btrfs_cmp_data_free(cmp);
3583 btrfs_wait_ordered_range(src, same_lock_start,
3586 btrfs_wait_ordered_range(src, loff, len);
3587 btrfs_wait_ordered_range(dst, dst_loff, len);
3593 /* ranges in the io trees already unlocked */
3594 btrfs_cmp_data_free(cmp);
3598 /* pass original length for comparison so we stay within i_size */
3599 ret = btrfs_cmp_data(olen, cmp);
3601 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3604 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3605 same_lock_start + same_lock_len - 1);
3607 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3609 btrfs_cmp_data_free(cmp);
3614 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3616 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3617 struct inode *dst, u64 dst_loff)
3620 struct cmp_pages cmp;
3621 int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
3622 bool same_inode = (src == dst);
3623 u64 i, tail_len, chunk_count;
3631 btrfs_double_inode_lock(src, dst);
3633 /* don't make the dst file partly checksummed */
3634 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3635 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3640 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
3641 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
3642 if (chunk_count == 0)
3643 num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;
3646 * If deduping ranges in the same inode, locking rules make it
3647 * mandatory to always lock pages in ascending order to avoid deadlocks
3648 * with concurrent tasks (such as starting writeback/delalloc).
3650 if (same_inode && dst_loff < loff)
3651 swap(loff, dst_loff);
3654 * We must gather up all the pages before we initiate our extent
3655 * locking. We use an array for the page pointers. Size of the array is
3656 * bounded by len, which is in turn bounded by BTRFS_MAX_DEDUPE_LEN.
3658 cmp.src_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3659 GFP_KERNEL | __GFP_ZERO);
3660 cmp.dst_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3661 GFP_KERNEL | __GFP_ZERO);
3662 if (!cmp.src_pages || !cmp.dst_pages) {
3667 for (i = 0; i < chunk_count; i++) {
3668 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
3669 dst, dst_loff, &cmp);
3673 loff += BTRFS_MAX_DEDUPE_LEN;
3674 dst_loff += BTRFS_MAX_DEDUPE_LEN;
3678 ret = btrfs_extent_same_range(src, loff, tail_len, dst,
3682 kvfree(cmp.src_pages);
3683 kvfree(cmp.dst_pages);
3689 btrfs_double_inode_unlock(src, dst);
3694 int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
3695 struct file *dst_file, loff_t dst_loff,
3698 struct inode *src = file_inode(src_file);
3699 struct inode *dst = file_inode(dst_file);
3700 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3702 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3704 * Btrfs does not support blocksize < page_size. As a
3705 * result, btrfs_cmp_data() won't correctly handle
3706 * this situation without an update.
3711 return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
3714 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3715 struct inode *inode,
3721 struct btrfs_root *root = BTRFS_I(inode)->root;
3724 inode_inc_iversion(inode);
3725 if (!no_time_update)
3726 inode->i_mtime = inode->i_ctime = current_time(inode);
3728 * We round up to the block size at eof when determining which
3729 * extents to clone above, but shouldn't round up the file size.
3731 if (endoff > destoff + olen)
3732 endoff = destoff + olen;
3733 if (endoff > inode->i_size)
3734 btrfs_i_size_write(BTRFS_I(inode), endoff);
3736 ret = btrfs_update_inode(trans, root, inode);
3738 btrfs_abort_transaction(trans, ret);
3739 btrfs_end_transaction(trans);
3742 ret = btrfs_end_transaction(trans);
3747 static void clone_update_extent_map(struct btrfs_inode *inode,
3748 const struct btrfs_trans_handle *trans,
3749 const struct btrfs_path *path,
3750 const u64 hole_offset,
3753 struct extent_map_tree *em_tree = &inode->extent_tree;
3754 struct extent_map *em;
3757 em = alloc_extent_map();
3759 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3764 struct btrfs_file_extent_item *fi;
3766 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3767 struct btrfs_file_extent_item);
3768 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3769 em->generation = -1;
3770 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3771 BTRFS_FILE_EXTENT_INLINE)
3772 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3773 &inode->runtime_flags);
3775 em->start = hole_offset;
3777 em->ram_bytes = em->len;
3778 em->orig_start = hole_offset;
3779 em->block_start = EXTENT_MAP_HOLE;
3781 em->orig_block_len = 0;
3782 em->compress_type = BTRFS_COMPRESS_NONE;
3783 em->generation = trans->transid;
3787 write_lock(&em_tree->lock);
3788 ret = add_extent_mapping(em_tree, em, 1);
3789 write_unlock(&em_tree->lock);
3790 if (ret != -EEXIST) {
3791 free_extent_map(em);
3794 btrfs_drop_extent_cache(inode, em->start,
3795 em->start + em->len - 1, 0);
3799 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3803 * Make sure we do not end up inserting an inline extent into a file that has
3804 * already other (non-inline) extents. If a file has an inline extent it can
3805 * not have any other extents and the (single) inline extent must start at the
3806 * file offset 0. Failing to respect these rules will lead to file corruption,
3807 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3809 * We can have extents that have been already written to disk or we can have
3810 * dirty ranges still in delalloc, in which case the extent maps and items are
3811 * created only when we run delalloc, and the delalloc ranges might fall outside
3812 * the range we are currently locking in the inode's io tree. So we check the
3813 * inode's i_size because of that (i_size updates are done while holding the
3814 * i_mutex, which we are holding here).
3815 * We also check to see if the inode has a size not greater than "datal" but has
3816 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3817 * protected against such concurrent fallocate calls by the i_mutex).
3819 * If the file has no extents but a size greater than datal, do not allow the
3820 * copy because we would need turn the inline extent into a non-inline one (even
3821 * with NO_HOLES enabled). If we find our destination inode only has one inline
3822 * extent, just overwrite it with the source inline extent if its size is less
3823 * than the source extent's size, or we could copy the source inline extent's
3824 * data into the destination inode's inline extent if the later is greater then
3827 static int clone_copy_inline_extent(struct inode *dst,
3828 struct btrfs_trans_handle *trans,
3829 struct btrfs_path *path,
3830 struct btrfs_key *new_key,
3831 const u64 drop_start,
3837 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3838 struct btrfs_root *root = BTRFS_I(dst)->root;
3839 const u64 aligned_end = ALIGN(new_key->offset + datal,
3840 fs_info->sectorsize);
3842 struct btrfs_key key;
3844 if (new_key->offset > 0)
3847 key.objectid = btrfs_ino(BTRFS_I(dst));
3848 key.type = BTRFS_EXTENT_DATA_KEY;
3850 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3853 } else if (ret > 0) {
3854 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3855 ret = btrfs_next_leaf(root, path);
3859 goto copy_inline_extent;
3861 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3862 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3863 key.type == BTRFS_EXTENT_DATA_KEY) {
3864 ASSERT(key.offset > 0);
3867 } else if (i_size_read(dst) <= datal) {
3868 struct btrfs_file_extent_item *ei;
3872 * If the file size is <= datal, make sure there are no other
3873 * extents following (can happen do to an fallocate call with
3874 * the flag FALLOC_FL_KEEP_SIZE).
3876 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3877 struct btrfs_file_extent_item);
3879 * If it's an inline extent, it can not have other extents
3882 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3883 BTRFS_FILE_EXTENT_INLINE)
3884 goto copy_inline_extent;
3886 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3887 if (ext_len > aligned_end)
3890 ret = btrfs_next_item(root, path);
3893 } else if (ret == 0) {
3894 btrfs_item_key_to_cpu(path->nodes[0], &key,
3896 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3897 key.type == BTRFS_EXTENT_DATA_KEY)
3904 * We have no extent items, or we have an extent at offset 0 which may
3905 * or may not be inlined. All these cases are dealt the same way.
3907 if (i_size_read(dst) > datal) {
3909 * If the destination inode has an inline extent...
3910 * This would require copying the data from the source inline
3911 * extent into the beginning of the destination's inline extent.
3912 * But this is really complex, both extents can be compressed
3913 * or just one of them, which would require decompressing and
3914 * re-compressing data (which could increase the new compressed
3915 * size, not allowing the compressed data to fit anymore in an
3917 * So just don't support this case for now (it should be rare,
3918 * we are not really saving space when cloning inline extents).
3923 btrfs_release_path(path);
3924 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3927 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3932 const u32 start = btrfs_file_extent_calc_inline_size(0);
3934 memmove(inline_data + start, inline_data + start + skip, datal);
3937 write_extent_buffer(path->nodes[0], inline_data,
3938 btrfs_item_ptr_offset(path->nodes[0],
3941 inode_add_bytes(dst, datal);
3947 * btrfs_clone() - clone a range from inode file to another
3949 * @src: Inode to clone from
3950 * @inode: Inode to clone to
3951 * @off: Offset within source to start clone from
3952 * @olen: Original length, passed by user, of range to clone
3953 * @olen_aligned: Block-aligned value of olen
3954 * @destoff: Offset within @inode to start clone
3955 * @no_time_update: Whether to update mtime/ctime on the target inode
3957 static int btrfs_clone(struct inode *src, struct inode *inode,
3958 const u64 off, const u64 olen, const u64 olen_aligned,
3959 const u64 destoff, int no_time_update)
3961 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3962 struct btrfs_root *root = BTRFS_I(inode)->root;
3963 struct btrfs_path *path = NULL;
3964 struct extent_buffer *leaf;
3965 struct btrfs_trans_handle *trans;
3967 struct btrfs_key key;
3971 const u64 len = olen_aligned;
3972 u64 last_dest_end = destoff;
3975 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3979 path = btrfs_alloc_path();
3985 path->reada = READA_FORWARD;
3987 key.objectid = btrfs_ino(BTRFS_I(src));
3988 key.type = BTRFS_EXTENT_DATA_KEY;
3992 u64 next_key_min_offset = key.offset + 1;
3995 * note the key will change type as we walk through the
3998 path->leave_spinning = 1;
3999 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
4004 * First search, if no extent item that starts at offset off was
4005 * found but the previous item is an extent item, it's possible
4006 * it might overlap our target range, therefore process it.
4008 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
4009 btrfs_item_key_to_cpu(path->nodes[0], &key,
4010 path->slots[0] - 1);
4011 if (key.type == BTRFS_EXTENT_DATA_KEY)
4015 nritems = btrfs_header_nritems(path->nodes[0]);
4017 if (path->slots[0] >= nritems) {
4018 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
4023 nritems = btrfs_header_nritems(path->nodes[0]);
4025 leaf = path->nodes[0];
4026 slot = path->slots[0];
4028 btrfs_item_key_to_cpu(leaf, &key, slot);
4029 if (key.type > BTRFS_EXTENT_DATA_KEY ||
4030 key.objectid != btrfs_ino(BTRFS_I(src)))
4033 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4034 struct btrfs_file_extent_item *extent;
4037 struct btrfs_key new_key;
4038 u64 disko = 0, diskl = 0;
4039 u64 datao = 0, datal = 0;
4043 extent = btrfs_item_ptr(leaf, slot,
4044 struct btrfs_file_extent_item);
4045 comp = btrfs_file_extent_compression(leaf, extent);
4046 type = btrfs_file_extent_type(leaf, extent);
4047 if (type == BTRFS_FILE_EXTENT_REG ||
4048 type == BTRFS_FILE_EXTENT_PREALLOC) {
4049 disko = btrfs_file_extent_disk_bytenr(leaf,
4051 diskl = btrfs_file_extent_disk_num_bytes(leaf,
4053 datao = btrfs_file_extent_offset(leaf, extent);
4054 datal = btrfs_file_extent_num_bytes(leaf,
4056 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
4057 /* take upper bound, may be compressed */
4058 datal = btrfs_file_extent_ram_bytes(leaf,
4063 * The first search might have left us at an extent
4064 * item that ends before our target range's start, can
4065 * happen if we have holes and NO_HOLES feature enabled.
4067 if (key.offset + datal <= off) {
4070 } else if (key.offset >= off + len) {
4073 next_key_min_offset = key.offset + datal;
4074 size = btrfs_item_size_nr(leaf, slot);
4075 read_extent_buffer(leaf, buf,
4076 btrfs_item_ptr_offset(leaf, slot),
4079 btrfs_release_path(path);
4080 path->leave_spinning = 0;
4082 memcpy(&new_key, &key, sizeof(new_key));
4083 new_key.objectid = btrfs_ino(BTRFS_I(inode));
4084 if (off <= key.offset)
4085 new_key.offset = key.offset + destoff - off;
4087 new_key.offset = destoff;
4090 * Deal with a hole that doesn't have an extent item
4091 * that represents it (NO_HOLES feature enabled).
4092 * This hole is either in the middle of the cloning
4093 * range or at the beginning (fully overlaps it or
4094 * partially overlaps it).
4096 if (new_key.offset != last_dest_end)
4097 drop_start = last_dest_end;
4099 drop_start = new_key.offset;
4102 * 1 - adjusting old extent (we may have to split it)
4103 * 1 - add new extent
4106 trans = btrfs_start_transaction(root, 3);
4107 if (IS_ERR(trans)) {
4108 ret = PTR_ERR(trans);
4112 if (type == BTRFS_FILE_EXTENT_REG ||
4113 type == BTRFS_FILE_EXTENT_PREALLOC) {
4115 * a | --- range to clone ---| b
4116 * | ------------- extent ------------- |
4119 /* subtract range b */
4120 if (key.offset + datal > off + len)
4121 datal = off + len - key.offset;
4123 /* subtract range a */
4124 if (off > key.offset) {
4125 datao += off - key.offset;
4126 datal -= off - key.offset;
4129 ret = btrfs_drop_extents(trans, root, inode,
4131 new_key.offset + datal,
4134 if (ret != -EOPNOTSUPP)
4135 btrfs_abort_transaction(trans,
4137 btrfs_end_transaction(trans);
4141 ret = btrfs_insert_empty_item(trans, root, path,
4144 btrfs_abort_transaction(trans, ret);
4145 btrfs_end_transaction(trans);
4149 leaf = path->nodes[0];
4150 slot = path->slots[0];
4151 write_extent_buffer(leaf, buf,
4152 btrfs_item_ptr_offset(leaf, slot),
4155 extent = btrfs_item_ptr(leaf, slot,
4156 struct btrfs_file_extent_item);
4158 /* disko == 0 means it's a hole */
4162 btrfs_set_file_extent_offset(leaf, extent,
4164 btrfs_set_file_extent_num_bytes(leaf, extent,
4168 inode_add_bytes(inode, datal);
4169 ret = btrfs_inc_extent_ref(trans,
4172 root->root_key.objectid,
4173 btrfs_ino(BTRFS_I(inode)),
4174 new_key.offset - datao);
4176 btrfs_abort_transaction(trans,
4178 btrfs_end_transaction(trans);
4183 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
4187 if (off > key.offset) {
4188 skip = off - key.offset;
4189 new_key.offset += skip;
4192 if (key.offset + datal > off + len)
4193 trim = key.offset + datal - (off + len);
4195 if (comp && (skip || trim)) {
4197 btrfs_end_transaction(trans);
4200 size -= skip + trim;
4201 datal -= skip + trim;
4203 ret = clone_copy_inline_extent(inode,
4210 if (ret != -EOPNOTSUPP)
4211 btrfs_abort_transaction(trans,
4213 btrfs_end_transaction(trans);
4216 leaf = path->nodes[0];
4217 slot = path->slots[0];
4220 /* If we have an implicit hole (NO_HOLES feature). */
4221 if (drop_start < new_key.offset)
4222 clone_update_extent_map(BTRFS_I(inode), trans,
4224 new_key.offset - drop_start);
4226 clone_update_extent_map(BTRFS_I(inode), trans,
4229 btrfs_mark_buffer_dirty(leaf);
4230 btrfs_release_path(path);
4232 last_dest_end = ALIGN(new_key.offset + datal,
4233 fs_info->sectorsize);
4234 ret = clone_finish_inode_update(trans, inode,
4240 if (new_key.offset + datal >= destoff + len)
4243 btrfs_release_path(path);
4244 key.offset = next_key_min_offset;
4246 if (fatal_signal_pending(current)) {
4255 if (last_dest_end < destoff + len) {
4257 * We have an implicit hole (NO_HOLES feature is enabled) that
4258 * fully or partially overlaps our cloning range at its end.
4260 btrfs_release_path(path);
4263 * 1 - remove extent(s)
4266 trans = btrfs_start_transaction(root, 2);
4267 if (IS_ERR(trans)) {
4268 ret = PTR_ERR(trans);
4271 ret = btrfs_drop_extents(trans, root, inode,
4272 last_dest_end, destoff + len, 1);
4274 if (ret != -EOPNOTSUPP)
4275 btrfs_abort_transaction(trans, ret);
4276 btrfs_end_transaction(trans);
4279 clone_update_extent_map(BTRFS_I(inode), trans, NULL,
4281 destoff + len - last_dest_end);
4282 ret = clone_finish_inode_update(trans, inode, destoff + len,
4283 destoff, olen, no_time_update);
4287 btrfs_free_path(path);
4292 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
4293 u64 off, u64 olen, u64 destoff)
4295 struct inode *inode = file_inode(file);
4296 struct inode *src = file_inode(file_src);
4297 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4298 struct btrfs_root *root = BTRFS_I(inode)->root;
4301 u64 bs = fs_info->sb->s_blocksize;
4302 int same_inode = src == inode;
4306 * - split compressed inline extents. annoying: we need to
4307 * decompress into destination's address_space (the file offset
4308 * may change, so source mapping won't do), then recompress (or
4309 * otherwise reinsert) a subrange.
4311 * - split destination inode's inline extents. The inline extents can
4312 * be either compressed or non-compressed.
4315 if (btrfs_root_readonly(root))
4318 if (file_src->f_path.mnt != file->f_path.mnt ||
4319 src->i_sb != inode->i_sb)
4322 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
4326 btrfs_double_inode_lock(src, inode);
4331 /* don't make the dst file partly checksummed */
4332 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
4333 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
4338 /* determine range to clone */
4340 if (off + len > src->i_size || off + len < off)
4343 olen = len = src->i_size - off;
4345 * If we extend to eof, continue to block boundary if and only if the
4346 * destination end offset matches the destination file's size, otherwise
4347 * we would be corrupting data by placing the eof block into the middle
4350 if (off + len == src->i_size) {
4351 if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
4353 len = ALIGN(src->i_size, bs) - off;
4361 /* verify the end result is block aligned */
4362 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
4363 !IS_ALIGNED(destoff, bs))
4366 /* verify if ranges are overlapped within the same file */
4368 if (destoff + len > off && destoff < off + len)
4372 if (destoff > inode->i_size) {
4373 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
4379 * Lock the target range too. Right after we replace the file extent
4380 * items in the fs tree (which now point to the cloned data), we might
4381 * have a worker replace them with extent items relative to a write
4382 * operation that was issued before this clone operation (i.e. confront
4383 * with inode.c:btrfs_finish_ordered_io).
4386 u64 lock_start = min_t(u64, off, destoff);
4387 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
4389 ret = lock_extent_range(src, lock_start, lock_len, true);
4391 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
4396 /* ranges in the io trees already unlocked */
4400 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
4403 u64 lock_start = min_t(u64, off, destoff);
4404 u64 lock_end = max_t(u64, off, destoff) + len - 1;
4406 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
4408 btrfs_double_extent_unlock(src, off, inode, destoff, len);
4411 * Truncate page cache pages so that future reads will see the cloned
4412 * data immediately and not the previous data.
4414 truncate_inode_pages_range(&inode->i_data,
4415 round_down(destoff, PAGE_SIZE),
4416 round_up(destoff + len, PAGE_SIZE) - 1);
4419 btrfs_double_inode_unlock(src, inode);
4425 int btrfs_clone_file_range(struct file *src_file, loff_t off,
4426 struct file *dst_file, loff_t destoff, u64 len)
4428 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
4431 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4433 struct inode *inode = file_inode(file);
4434 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4435 struct btrfs_root *root = BTRFS_I(inode)->root;
4436 struct btrfs_root *new_root;
4437 struct btrfs_dir_item *di;
4438 struct btrfs_trans_handle *trans;
4439 struct btrfs_path *path;
4440 struct btrfs_key location;
4441 struct btrfs_disk_key disk_key;
4446 if (!capable(CAP_SYS_ADMIN))
4449 ret = mnt_want_write_file(file);
4453 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4459 objectid = BTRFS_FS_TREE_OBJECTID;
4461 location.objectid = objectid;
4462 location.type = BTRFS_ROOT_ITEM_KEY;
4463 location.offset = (u64)-1;
4465 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4466 if (IS_ERR(new_root)) {
4467 ret = PTR_ERR(new_root);
4470 if (!is_fstree(new_root->objectid)) {
4475 path = btrfs_alloc_path();
4480 path->leave_spinning = 1;
4482 trans = btrfs_start_transaction(root, 1);
4483 if (IS_ERR(trans)) {
4484 btrfs_free_path(path);
4485 ret = PTR_ERR(trans);
4489 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4490 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4491 dir_id, "default", 7, 1);
4492 if (IS_ERR_OR_NULL(di)) {
4493 btrfs_free_path(path);
4494 btrfs_end_transaction(trans);
4496 "Umm, you don't have the default diritem, this isn't going to work");
4501 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4502 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4503 btrfs_mark_buffer_dirty(path->nodes[0]);
4504 btrfs_free_path(path);
4506 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4507 btrfs_end_transaction(trans);
4509 mnt_drop_write_file(file);
4513 static void get_block_group_info(struct list_head *groups_list,
4514 struct btrfs_ioctl_space_info *space)
4516 struct btrfs_block_group_cache *block_group;
4518 space->total_bytes = 0;
4519 space->used_bytes = 0;
4521 list_for_each_entry(block_group, groups_list, list) {
4522 space->flags = block_group->flags;
4523 space->total_bytes += block_group->key.offset;
4524 space->used_bytes +=
4525 btrfs_block_group_used(&block_group->item);
4529 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4532 struct btrfs_ioctl_space_args space_args;
4533 struct btrfs_ioctl_space_info space;
4534 struct btrfs_ioctl_space_info *dest;
4535 struct btrfs_ioctl_space_info *dest_orig;
4536 struct btrfs_ioctl_space_info __user *user_dest;
4537 struct btrfs_space_info *info;
4538 static const u64 types[] = {
4539 BTRFS_BLOCK_GROUP_DATA,
4540 BTRFS_BLOCK_GROUP_SYSTEM,
4541 BTRFS_BLOCK_GROUP_METADATA,
4542 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
4550 if (copy_from_user(&space_args,
4551 (struct btrfs_ioctl_space_args __user *)arg,
4552 sizeof(space_args)))
4555 for (i = 0; i < num_types; i++) {
4556 struct btrfs_space_info *tmp;
4560 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4562 if (tmp->flags == types[i]) {
4572 down_read(&info->groups_sem);
4573 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4574 if (!list_empty(&info->block_groups[c]))
4577 up_read(&info->groups_sem);
4581 * Global block reserve, exported as a space_info
4585 /* space_slots == 0 means they are asking for a count */
4586 if (space_args.space_slots == 0) {
4587 space_args.total_spaces = slot_count;
4591 slot_count = min_t(u64, space_args.space_slots, slot_count);
4593 alloc_size = sizeof(*dest) * slot_count;
4595 /* we generally have at most 6 or so space infos, one for each raid
4596 * level. So, a whole page should be more than enough for everyone
4598 if (alloc_size > PAGE_SIZE)
4601 space_args.total_spaces = 0;
4602 dest = kmalloc(alloc_size, GFP_KERNEL);
4607 /* now we have a buffer to copy into */
4608 for (i = 0; i < num_types; i++) {
4609 struct btrfs_space_info *tmp;
4616 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4618 if (tmp->flags == types[i]) {
4627 down_read(&info->groups_sem);
4628 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4629 if (!list_empty(&info->block_groups[c])) {
4630 get_block_group_info(&info->block_groups[c],
4632 memcpy(dest, &space, sizeof(space));
4634 space_args.total_spaces++;
4640 up_read(&info->groups_sem);
4644 * Add global block reserve
4647 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4649 spin_lock(&block_rsv->lock);
4650 space.total_bytes = block_rsv->size;
4651 space.used_bytes = block_rsv->size - block_rsv->reserved;
4652 spin_unlock(&block_rsv->lock);
4653 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4654 memcpy(dest, &space, sizeof(space));
4655 space_args.total_spaces++;
4658 user_dest = (struct btrfs_ioctl_space_info __user *)
4659 (arg + sizeof(struct btrfs_ioctl_space_args));
4661 if (copy_to_user(user_dest, dest_orig, alloc_size))
4666 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4672 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4675 struct btrfs_trans_handle *trans;
4679 trans = btrfs_attach_transaction_barrier(root);
4680 if (IS_ERR(trans)) {
4681 if (PTR_ERR(trans) != -ENOENT)
4682 return PTR_ERR(trans);
4684 /* No running transaction, don't bother */
4685 transid = root->fs_info->last_trans_committed;
4688 transid = trans->transid;
4689 ret = btrfs_commit_transaction_async(trans, 0);
4691 btrfs_end_transaction(trans);
4696 if (copy_to_user(argp, &transid, sizeof(transid)))
4701 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4707 if (copy_from_user(&transid, argp, sizeof(transid)))
4710 transid = 0; /* current trans */
4712 return btrfs_wait_for_commit(fs_info, transid);
4715 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4717 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4718 struct btrfs_ioctl_scrub_args *sa;
4721 if (!capable(CAP_SYS_ADMIN))
4724 sa = memdup_user(arg, sizeof(*sa));
4728 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4729 ret = mnt_want_write_file(file);
4734 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4735 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4738 if (copy_to_user(arg, sa, sizeof(*sa)))
4741 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4742 mnt_drop_write_file(file);
4748 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4750 if (!capable(CAP_SYS_ADMIN))
4753 return btrfs_scrub_cancel(fs_info);
4756 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4759 struct btrfs_ioctl_scrub_args *sa;
4762 if (!capable(CAP_SYS_ADMIN))
4765 sa = memdup_user(arg, sizeof(*sa));
4769 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4771 if (copy_to_user(arg, sa, sizeof(*sa)))
4778 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4781 struct btrfs_ioctl_get_dev_stats *sa;
4784 sa = memdup_user(arg, sizeof(*sa));
4788 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4793 ret = btrfs_get_dev_stats(fs_info, sa);
4795 if (copy_to_user(arg, sa, sizeof(*sa)))
4802 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4805 struct btrfs_ioctl_dev_replace_args *p;
4808 if (!capable(CAP_SYS_ADMIN))
4811 p = memdup_user(arg, sizeof(*p));
4816 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4817 if (sb_rdonly(fs_info->sb)) {
4821 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4822 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4824 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4825 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4828 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4829 btrfs_dev_replace_status(fs_info, p);
4832 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4833 p->result = btrfs_dev_replace_cancel(fs_info);
4841 if (copy_to_user(arg, p, sizeof(*p)))
4848 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4854 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4855 struct inode_fs_paths *ipath = NULL;
4856 struct btrfs_path *path;
4858 if (!capable(CAP_DAC_READ_SEARCH))
4861 path = btrfs_alloc_path();
4867 ipa = memdup_user(arg, sizeof(*ipa));
4874 size = min_t(u32, ipa->size, 4096);
4875 ipath = init_ipath(size, root, path);
4876 if (IS_ERR(ipath)) {
4877 ret = PTR_ERR(ipath);
4882 ret = paths_from_inode(ipa->inum, ipath);
4886 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4887 rel_ptr = ipath->fspath->val[i] -
4888 (u64)(unsigned long)ipath->fspath->val;
4889 ipath->fspath->val[i] = rel_ptr;
4892 btrfs_free_path(path);
4894 ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
4895 ipath->fspath, size);
4902 btrfs_free_path(path);
4909 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4911 struct btrfs_data_container *inodes = ctx;
4912 const size_t c = 3 * sizeof(u64);
4914 if (inodes->bytes_left >= c) {
4915 inodes->bytes_left -= c;
4916 inodes->val[inodes->elem_cnt] = inum;
4917 inodes->val[inodes->elem_cnt + 1] = offset;
4918 inodes->val[inodes->elem_cnt + 2] = root;
4919 inodes->elem_cnt += 3;
4921 inodes->bytes_missing += c - inodes->bytes_left;
4922 inodes->bytes_left = 0;
4923 inodes->elem_missed += 3;
4929 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4930 void __user *arg, int version)
4934 struct btrfs_ioctl_logical_ino_args *loi;
4935 struct btrfs_data_container *inodes = NULL;
4936 struct btrfs_path *path = NULL;
4939 if (!capable(CAP_SYS_ADMIN))
4942 loi = memdup_user(arg, sizeof(*loi));
4944 return PTR_ERR(loi);
4947 ignore_offset = false;
4948 size = min_t(u32, loi->size, SZ_64K);
4950 /* All reserved bits must be 0 for now */
4951 if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4955 /* Only accept flags we have defined so far */
4956 if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4960 ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4961 size = min_t(u32, loi->size, SZ_16M);
4964 inodes = init_data_container(size);
4965 if (IS_ERR(inodes)) {
4966 ret = PTR_ERR(inodes);
4970 path = btrfs_alloc_path();
4975 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4976 build_ino_list, inodes, ignore_offset);
4977 btrfs_free_path(path);
4983 ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4996 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4997 struct btrfs_ioctl_balance_args *bargs)
4999 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
5001 bargs->flags = bctl->flags;
5003 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
5004 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
5005 if (atomic_read(&fs_info->balance_pause_req))
5006 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
5007 if (atomic_read(&fs_info->balance_cancel_req))
5008 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
5010 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
5011 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
5012 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
5014 spin_lock(&fs_info->balance_lock);
5015 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
5016 spin_unlock(&fs_info->balance_lock);
5019 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
5021 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5022 struct btrfs_fs_info *fs_info = root->fs_info;
5023 struct btrfs_ioctl_balance_args *bargs;
5024 struct btrfs_balance_control *bctl;
5025 bool need_unlock; /* for mut. excl. ops lock */
5028 if (!capable(CAP_SYS_ADMIN))
5031 ret = mnt_want_write_file(file);
5036 if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
5037 mutex_lock(&fs_info->balance_mutex);
5043 * mut. excl. ops lock is locked. Three possibilities:
5044 * (1) some other op is running
5045 * (2) balance is running
5046 * (3) balance is paused -- special case (think resume)
5048 mutex_lock(&fs_info->balance_mutex);
5049 if (fs_info->balance_ctl) {
5050 /* this is either (2) or (3) */
5051 if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
5052 mutex_unlock(&fs_info->balance_mutex);
5054 * Lock released to allow other waiters to continue,
5055 * we'll reexamine the status again.
5057 mutex_lock(&fs_info->balance_mutex);
5059 if (fs_info->balance_ctl &&
5060 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
5062 need_unlock = false;
5066 mutex_unlock(&fs_info->balance_mutex);
5070 mutex_unlock(&fs_info->balance_mutex);
5076 mutex_unlock(&fs_info->balance_mutex);
5077 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
5082 BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
5085 bargs = memdup_user(arg, sizeof(*bargs));
5086 if (IS_ERR(bargs)) {
5087 ret = PTR_ERR(bargs);
5091 if (bargs->flags & BTRFS_BALANCE_RESUME) {
5092 if (!fs_info->balance_ctl) {
5097 bctl = fs_info->balance_ctl;
5098 spin_lock(&fs_info->balance_lock);
5099 bctl->flags |= BTRFS_BALANCE_RESUME;
5100 spin_unlock(&fs_info->balance_lock);
5108 if (fs_info->balance_ctl) {
5113 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
5120 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
5121 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
5122 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
5124 bctl->flags = bargs->flags;
5126 /* balance everything - no filters */
5127 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
5130 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
5137 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to
5138 * btrfs_balance. bctl is freed in reset_balance_state, or, if
5139 * restriper was paused all the way until unmount, in free_fs_info.
5140 * The flag should be cleared after reset_balance_state.
5142 need_unlock = false;
5144 ret = btrfs_balance(fs_info, bctl, bargs);
5148 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5157 mutex_unlock(&fs_info->balance_mutex);
5159 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
5161 mnt_drop_write_file(file);
5165 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
5167 if (!capable(CAP_SYS_ADMIN))
5171 case BTRFS_BALANCE_CTL_PAUSE:
5172 return btrfs_pause_balance(fs_info);
5173 case BTRFS_BALANCE_CTL_CANCEL:
5174 return btrfs_cancel_balance(fs_info);
5180 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
5183 struct btrfs_ioctl_balance_args *bargs;
5186 if (!capable(CAP_SYS_ADMIN))
5189 mutex_lock(&fs_info->balance_mutex);
5190 if (!fs_info->balance_ctl) {
5195 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
5201 btrfs_update_ioctl_balance_args(fs_info, bargs);
5203 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5208 mutex_unlock(&fs_info->balance_mutex);
5212 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
5214 struct inode *inode = file_inode(file);
5215 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5216 struct btrfs_ioctl_quota_ctl_args *sa;
5219 if (!capable(CAP_SYS_ADMIN))
5222 ret = mnt_want_write_file(file);
5226 sa = memdup_user(arg, sizeof(*sa));
5232 down_write(&fs_info->subvol_sem);
5235 case BTRFS_QUOTA_CTL_ENABLE:
5236 ret = btrfs_quota_enable(fs_info);
5238 case BTRFS_QUOTA_CTL_DISABLE:
5239 ret = btrfs_quota_disable(fs_info);
5247 up_write(&fs_info->subvol_sem);
5249 mnt_drop_write_file(file);
5253 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
5255 struct inode *inode = file_inode(file);
5256 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5257 struct btrfs_root *root = BTRFS_I(inode)->root;
5258 struct btrfs_ioctl_qgroup_assign_args *sa;
5259 struct btrfs_trans_handle *trans;
5263 if (!capable(CAP_SYS_ADMIN))
5266 ret = mnt_want_write_file(file);
5270 sa = memdup_user(arg, sizeof(*sa));
5276 trans = btrfs_join_transaction(root);
5277 if (IS_ERR(trans)) {
5278 ret = PTR_ERR(trans);
5283 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
5285 ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
5288 /* update qgroup status and info */
5289 err = btrfs_run_qgroups(trans);
5291 btrfs_handle_fs_error(fs_info, err,
5292 "failed to update qgroup status and info");
5293 err = btrfs_end_transaction(trans);
5300 mnt_drop_write_file(file);
5304 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
5306 struct inode *inode = file_inode(file);
5307 struct btrfs_root *root = BTRFS_I(inode)->root;
5308 struct btrfs_ioctl_qgroup_create_args *sa;
5309 struct btrfs_trans_handle *trans;
5313 if (!capable(CAP_SYS_ADMIN))
5316 ret = mnt_want_write_file(file);
5320 sa = memdup_user(arg, sizeof(*sa));
5326 if (!sa->qgroupid) {
5331 trans = btrfs_join_transaction(root);
5332 if (IS_ERR(trans)) {
5333 ret = PTR_ERR(trans);
5338 ret = btrfs_create_qgroup(trans, sa->qgroupid);
5340 ret = btrfs_remove_qgroup(trans, sa->qgroupid);
5343 err = btrfs_end_transaction(trans);
5350 mnt_drop_write_file(file);
5354 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
5356 struct inode *inode = file_inode(file);
5357 struct btrfs_root *root = BTRFS_I(inode)->root;
5358 struct btrfs_ioctl_qgroup_limit_args *sa;
5359 struct btrfs_trans_handle *trans;
5364 if (!capable(CAP_SYS_ADMIN))
5367 ret = mnt_want_write_file(file);
5371 sa = memdup_user(arg, sizeof(*sa));
5377 trans = btrfs_join_transaction(root);
5378 if (IS_ERR(trans)) {
5379 ret = PTR_ERR(trans);
5383 qgroupid = sa->qgroupid;
5385 /* take the current subvol as qgroup */
5386 qgroupid = root->root_key.objectid;
5389 ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
5391 err = btrfs_end_transaction(trans);
5398 mnt_drop_write_file(file);
5402 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5404 struct inode *inode = file_inode(file);
5405 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5406 struct btrfs_ioctl_quota_rescan_args *qsa;
5409 if (!capable(CAP_SYS_ADMIN))
5412 ret = mnt_want_write_file(file);
5416 qsa = memdup_user(arg, sizeof(*qsa));
5427 ret = btrfs_qgroup_rescan(fs_info);
5432 mnt_drop_write_file(file);
5436 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5438 struct inode *inode = file_inode(file);
5439 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5440 struct btrfs_ioctl_quota_rescan_args *qsa;
5443 if (!capable(CAP_SYS_ADMIN))
5446 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5450 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5452 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5455 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5462 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5464 struct inode *inode = file_inode(file);
5465 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5467 if (!capable(CAP_SYS_ADMIN))
5470 return btrfs_qgroup_wait_for_completion(fs_info, true);
5473 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5474 struct btrfs_ioctl_received_subvol_args *sa)
5476 struct inode *inode = file_inode(file);
5477 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5478 struct btrfs_root *root = BTRFS_I(inode)->root;
5479 struct btrfs_root_item *root_item = &root->root_item;
5480 struct btrfs_trans_handle *trans;
5481 struct timespec64 ct = current_time(inode);
5483 int received_uuid_changed;
5485 if (!inode_owner_or_capable(inode))
5488 ret = mnt_want_write_file(file);
5492 down_write(&fs_info->subvol_sem);
5494 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5499 if (btrfs_root_readonly(root)) {
5506 * 2 - uuid items (received uuid + subvol uuid)
5508 trans = btrfs_start_transaction(root, 3);
5509 if (IS_ERR(trans)) {
5510 ret = PTR_ERR(trans);
5515 sa->rtransid = trans->transid;
5516 sa->rtime.sec = ct.tv_sec;
5517 sa->rtime.nsec = ct.tv_nsec;
5519 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5521 if (received_uuid_changed &&
5522 !btrfs_is_empty_uuid(root_item->received_uuid)) {
5523 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
5524 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5525 root->root_key.objectid);
5526 if (ret && ret != -ENOENT) {
5527 btrfs_abort_transaction(trans, ret);
5528 btrfs_end_transaction(trans);
5532 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5533 btrfs_set_root_stransid(root_item, sa->stransid);
5534 btrfs_set_root_rtransid(root_item, sa->rtransid);
5535 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5536 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5537 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5538 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5540 ret = btrfs_update_root(trans, fs_info->tree_root,
5541 &root->root_key, &root->root_item);
5543 btrfs_end_transaction(trans);
5546 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5547 ret = btrfs_uuid_tree_add(trans, sa->uuid,
5548 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5549 root->root_key.objectid);
5550 if (ret < 0 && ret != -EEXIST) {
5551 btrfs_abort_transaction(trans, ret);
5552 btrfs_end_transaction(trans);
5556 ret = btrfs_commit_transaction(trans);
5558 up_write(&fs_info->subvol_sem);
5559 mnt_drop_write_file(file);
5564 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5567 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5568 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5571 args32 = memdup_user(arg, sizeof(*args32));
5573 return PTR_ERR(args32);
5575 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5581 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5582 args64->stransid = args32->stransid;
5583 args64->rtransid = args32->rtransid;
5584 args64->stime.sec = args32->stime.sec;
5585 args64->stime.nsec = args32->stime.nsec;
5586 args64->rtime.sec = args32->rtime.sec;
5587 args64->rtime.nsec = args32->rtime.nsec;
5588 args64->flags = args32->flags;
5590 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5594 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5595 args32->stransid = args64->stransid;
5596 args32->rtransid = args64->rtransid;
5597 args32->stime.sec = args64->stime.sec;
5598 args32->stime.nsec = args64->stime.nsec;
5599 args32->rtime.sec = args64->rtime.sec;
5600 args32->rtime.nsec = args64->rtime.nsec;
5601 args32->flags = args64->flags;
5603 ret = copy_to_user(arg, args32, sizeof(*args32));
5614 static long btrfs_ioctl_set_received_subvol(struct file *file,
5617 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5620 sa = memdup_user(arg, sizeof(*sa));
5624 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5629 ret = copy_to_user(arg, sa, sizeof(*sa));
5638 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5640 struct inode *inode = file_inode(file);
5641 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5644 char label[BTRFS_LABEL_SIZE];
5646 spin_lock(&fs_info->super_lock);
5647 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5648 spin_unlock(&fs_info->super_lock);
5650 len = strnlen(label, BTRFS_LABEL_SIZE);
5652 if (len == BTRFS_LABEL_SIZE) {
5654 "label is too long, return the first %zu bytes",
5658 ret = copy_to_user(arg, label, len);
5660 return ret ? -EFAULT : 0;
5663 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5665 struct inode *inode = file_inode(file);
5666 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5667 struct btrfs_root *root = BTRFS_I(inode)->root;
5668 struct btrfs_super_block *super_block = fs_info->super_copy;
5669 struct btrfs_trans_handle *trans;
5670 char label[BTRFS_LABEL_SIZE];
5673 if (!capable(CAP_SYS_ADMIN))
5676 if (copy_from_user(label, arg, sizeof(label)))
5679 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5681 "unable to set label with more than %d bytes",
5682 BTRFS_LABEL_SIZE - 1);
5686 ret = mnt_want_write_file(file);
5690 trans = btrfs_start_transaction(root, 0);
5691 if (IS_ERR(trans)) {
5692 ret = PTR_ERR(trans);
5696 spin_lock(&fs_info->super_lock);
5697 strcpy(super_block->label, label);
5698 spin_unlock(&fs_info->super_lock);
5699 ret = btrfs_commit_transaction(trans);
5702 mnt_drop_write_file(file);
5706 #define INIT_FEATURE_FLAGS(suffix) \
5707 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5708 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5709 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5711 int btrfs_ioctl_get_supported_features(void __user *arg)
5713 static const struct btrfs_ioctl_feature_flags features[3] = {
5714 INIT_FEATURE_FLAGS(SUPP),
5715 INIT_FEATURE_FLAGS(SAFE_SET),
5716 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5719 if (copy_to_user(arg, &features, sizeof(features)))
5725 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5727 struct inode *inode = file_inode(file);
5728 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5729 struct btrfs_super_block *super_block = fs_info->super_copy;
5730 struct btrfs_ioctl_feature_flags features;
5732 features.compat_flags = btrfs_super_compat_flags(super_block);
5733 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5734 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5736 if (copy_to_user(arg, &features, sizeof(features)))
5742 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5743 enum btrfs_feature_set set,
5744 u64 change_mask, u64 flags, u64 supported_flags,
5745 u64 safe_set, u64 safe_clear)
5747 const char *type = btrfs_feature_set_names[set];
5749 u64 disallowed, unsupported;
5750 u64 set_mask = flags & change_mask;
5751 u64 clear_mask = ~flags & change_mask;
5753 unsupported = set_mask & ~supported_flags;
5755 names = btrfs_printable_features(set, unsupported);
5758 "this kernel does not support the %s feature bit%s",
5759 names, strchr(names, ',') ? "s" : "");
5763 "this kernel does not support %s bits 0x%llx",
5768 disallowed = set_mask & ~safe_set;
5770 names = btrfs_printable_features(set, disallowed);
5773 "can't set the %s feature bit%s while mounted",
5774 names, strchr(names, ',') ? "s" : "");
5778 "can't set %s bits 0x%llx while mounted",
5783 disallowed = clear_mask & ~safe_clear;
5785 names = btrfs_printable_features(set, disallowed);
5788 "can't clear the %s feature bit%s while mounted",
5789 names, strchr(names, ',') ? "s" : "");
5793 "can't clear %s bits 0x%llx while mounted",
5801 #define check_feature(fs_info, change_mask, flags, mask_base) \
5802 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5803 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5804 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5805 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5807 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5809 struct inode *inode = file_inode(file);
5810 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5811 struct btrfs_root *root = BTRFS_I(inode)->root;
5812 struct btrfs_super_block *super_block = fs_info->super_copy;
5813 struct btrfs_ioctl_feature_flags flags[2];
5814 struct btrfs_trans_handle *trans;
5818 if (!capable(CAP_SYS_ADMIN))
5821 if (copy_from_user(flags, arg, sizeof(flags)))
5825 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5826 !flags[0].incompat_flags)
5829 ret = check_feature(fs_info, flags[0].compat_flags,
5830 flags[1].compat_flags, COMPAT);
5834 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5835 flags[1].compat_ro_flags, COMPAT_RO);
5839 ret = check_feature(fs_info, flags[0].incompat_flags,
5840 flags[1].incompat_flags, INCOMPAT);
5844 ret = mnt_want_write_file(file);
5848 trans = btrfs_start_transaction(root, 0);
5849 if (IS_ERR(trans)) {
5850 ret = PTR_ERR(trans);
5851 goto out_drop_write;
5854 spin_lock(&fs_info->super_lock);
5855 newflags = btrfs_super_compat_flags(super_block);
5856 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5857 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5858 btrfs_set_super_compat_flags(super_block, newflags);
5860 newflags = btrfs_super_compat_ro_flags(super_block);
5861 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5862 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5863 btrfs_set_super_compat_ro_flags(super_block, newflags);
5865 newflags = btrfs_super_incompat_flags(super_block);
5866 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5867 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5868 btrfs_set_super_incompat_flags(super_block, newflags);
5869 spin_unlock(&fs_info->super_lock);
5871 ret = btrfs_commit_transaction(trans);
5873 mnt_drop_write_file(file);
5878 static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
5880 struct btrfs_ioctl_send_args *arg;
5884 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5885 struct btrfs_ioctl_send_args_32 args32;
5887 ret = copy_from_user(&args32, argp, sizeof(args32));
5890 arg = kzalloc(sizeof(*arg), GFP_KERNEL);
5893 arg->send_fd = args32.send_fd;
5894 arg->clone_sources_count = args32.clone_sources_count;
5895 arg->clone_sources = compat_ptr(args32.clone_sources);
5896 arg->parent_root = args32.parent_root;
5897 arg->flags = args32.flags;
5898 memcpy(arg->reserved, args32.reserved,
5899 sizeof(args32.reserved));
5904 arg = memdup_user(argp, sizeof(*arg));
5906 return PTR_ERR(arg);
5908 ret = btrfs_ioctl_send(file, arg);
5913 long btrfs_ioctl(struct file *file, unsigned int
5914 cmd, unsigned long arg)
5916 struct inode *inode = file_inode(file);
5917 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5918 struct btrfs_root *root = BTRFS_I(inode)->root;
5919 void __user *argp = (void __user *)arg;
5922 case FS_IOC_GETFLAGS:
5923 return btrfs_ioctl_getflags(file, argp);
5924 case FS_IOC_SETFLAGS:
5925 return btrfs_ioctl_setflags(file, argp);
5926 case FS_IOC_GETVERSION:
5927 return btrfs_ioctl_getversion(file, argp);
5929 return btrfs_ioctl_fitrim(file, argp);
5930 case BTRFS_IOC_SNAP_CREATE:
5931 return btrfs_ioctl_snap_create(file, argp, 0);
5932 case BTRFS_IOC_SNAP_CREATE_V2:
5933 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5934 case BTRFS_IOC_SUBVOL_CREATE:
5935 return btrfs_ioctl_snap_create(file, argp, 1);
5936 case BTRFS_IOC_SUBVOL_CREATE_V2:
5937 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5938 case BTRFS_IOC_SNAP_DESTROY:
5939 return btrfs_ioctl_snap_destroy(file, argp);
5940 case BTRFS_IOC_SUBVOL_GETFLAGS:
5941 return btrfs_ioctl_subvol_getflags(file, argp);
5942 case BTRFS_IOC_SUBVOL_SETFLAGS:
5943 return btrfs_ioctl_subvol_setflags(file, argp);
5944 case BTRFS_IOC_DEFAULT_SUBVOL:
5945 return btrfs_ioctl_default_subvol(file, argp);
5946 case BTRFS_IOC_DEFRAG:
5947 return btrfs_ioctl_defrag(file, NULL);
5948 case BTRFS_IOC_DEFRAG_RANGE:
5949 return btrfs_ioctl_defrag(file, argp);
5950 case BTRFS_IOC_RESIZE:
5951 return btrfs_ioctl_resize(file, argp);
5952 case BTRFS_IOC_ADD_DEV:
5953 return btrfs_ioctl_add_dev(fs_info, argp);
5954 case BTRFS_IOC_RM_DEV:
5955 return btrfs_ioctl_rm_dev(file, argp);
5956 case BTRFS_IOC_RM_DEV_V2:
5957 return btrfs_ioctl_rm_dev_v2(file, argp);
5958 case BTRFS_IOC_FS_INFO:
5959 return btrfs_ioctl_fs_info(fs_info, argp);
5960 case BTRFS_IOC_DEV_INFO:
5961 return btrfs_ioctl_dev_info(fs_info, argp);
5962 case BTRFS_IOC_BALANCE:
5963 return btrfs_ioctl_balance(file, NULL);
5964 case BTRFS_IOC_TREE_SEARCH:
5965 return btrfs_ioctl_tree_search(file, argp);
5966 case BTRFS_IOC_TREE_SEARCH_V2:
5967 return btrfs_ioctl_tree_search_v2(file, argp);
5968 case BTRFS_IOC_INO_LOOKUP:
5969 return btrfs_ioctl_ino_lookup(file, argp);
5970 case BTRFS_IOC_INO_PATHS:
5971 return btrfs_ioctl_ino_to_path(root, argp);
5972 case BTRFS_IOC_LOGICAL_INO:
5973 return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5974 case BTRFS_IOC_LOGICAL_INO_V2:
5975 return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5976 case BTRFS_IOC_SPACE_INFO:
5977 return btrfs_ioctl_space_info(fs_info, argp);
5978 case BTRFS_IOC_SYNC: {
5981 ret = btrfs_start_delalloc_roots(fs_info, -1);
5984 ret = btrfs_sync_fs(inode->i_sb, 1);
5986 * The transaction thread may want to do more work,
5987 * namely it pokes the cleaner kthread that will start
5988 * processing uncleaned subvols.
5990 wake_up_process(fs_info->transaction_kthread);
5993 case BTRFS_IOC_START_SYNC:
5994 return btrfs_ioctl_start_sync(root, argp);
5995 case BTRFS_IOC_WAIT_SYNC:
5996 return btrfs_ioctl_wait_sync(fs_info, argp);
5997 case BTRFS_IOC_SCRUB:
5998 return btrfs_ioctl_scrub(file, argp);
5999 case BTRFS_IOC_SCRUB_CANCEL:
6000 return btrfs_ioctl_scrub_cancel(fs_info);
6001 case BTRFS_IOC_SCRUB_PROGRESS:
6002 return btrfs_ioctl_scrub_progress(fs_info, argp);
6003 case BTRFS_IOC_BALANCE_V2:
6004 return btrfs_ioctl_balance(file, argp);
6005 case BTRFS_IOC_BALANCE_CTL:
6006 return btrfs_ioctl_balance_ctl(fs_info, arg);
6007 case BTRFS_IOC_BALANCE_PROGRESS:
6008 return btrfs_ioctl_balance_progress(fs_info, argp);
6009 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
6010 return btrfs_ioctl_set_received_subvol(file, argp);
6012 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
6013 return btrfs_ioctl_set_received_subvol_32(file, argp);
6015 case BTRFS_IOC_SEND:
6016 return _btrfs_ioctl_send(file, argp, false);
6017 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
6018 case BTRFS_IOC_SEND_32:
6019 return _btrfs_ioctl_send(file, argp, true);
6021 case BTRFS_IOC_GET_DEV_STATS:
6022 return btrfs_ioctl_get_dev_stats(fs_info, argp);
6023 case BTRFS_IOC_QUOTA_CTL:
6024 return btrfs_ioctl_quota_ctl(file, argp);
6025 case BTRFS_IOC_QGROUP_ASSIGN:
6026 return btrfs_ioctl_qgroup_assign(file, argp);
6027 case BTRFS_IOC_QGROUP_CREATE:
6028 return btrfs_ioctl_qgroup_create(file, argp);
6029 case BTRFS_IOC_QGROUP_LIMIT:
6030 return btrfs_ioctl_qgroup_limit(file, argp);
6031 case BTRFS_IOC_QUOTA_RESCAN:
6032 return btrfs_ioctl_quota_rescan(file, argp);
6033 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
6034 return btrfs_ioctl_quota_rescan_status(file, argp);
6035 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
6036 return btrfs_ioctl_quota_rescan_wait(file, argp);
6037 case BTRFS_IOC_DEV_REPLACE:
6038 return btrfs_ioctl_dev_replace(fs_info, argp);
6039 case BTRFS_IOC_GET_FSLABEL:
6040 return btrfs_ioctl_get_fslabel(file, argp);
6041 case BTRFS_IOC_SET_FSLABEL:
6042 return btrfs_ioctl_set_fslabel(file, argp);
6043 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
6044 return btrfs_ioctl_get_supported_features(argp);
6045 case BTRFS_IOC_GET_FEATURES:
6046 return btrfs_ioctl_get_features(file, argp);
6047 case BTRFS_IOC_SET_FEATURES:
6048 return btrfs_ioctl_set_features(file, argp);
6049 case FS_IOC_FSGETXATTR:
6050 return btrfs_ioctl_fsgetxattr(file, argp);
6051 case FS_IOC_FSSETXATTR:
6052 return btrfs_ioctl_fssetxattr(file, argp);
6053 case BTRFS_IOC_GET_SUBVOL_INFO:
6054 return btrfs_ioctl_get_subvol_info(file, argp);
6055 case BTRFS_IOC_GET_SUBVOL_ROOTREF:
6056 return btrfs_ioctl_get_subvol_rootref(file, argp);
6057 case BTRFS_IOC_INO_LOOKUP_USER:
6058 return btrfs_ioctl_ino_lookup_user(file, argp);
6064 #ifdef CONFIG_COMPAT
6065 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6068 * These all access 32-bit values anyway so no further
6069 * handling is necessary.
6072 case FS_IOC32_GETFLAGS:
6073 cmd = FS_IOC_GETFLAGS;
6075 case FS_IOC32_SETFLAGS:
6076 cmd = FS_IOC_SETFLAGS;
6078 case FS_IOC32_GETVERSION:
6079 cmd = FS_IOC_GETVERSION;
6083 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));