2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/fsnotify.h>
25 #include <linux/pagemap.h>
26 #include <linux/highmem.h>
27 #include <linux/time.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mount.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/swap.h>
35 #include <linux/writeback.h>
36 #include <linux/statfs.h>
37 #include <linux/compat.h>
38 #include <linux/bit_spinlock.h>
39 #include <linux/security.h>
40 #include <linux/xattr.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
43 #include <linux/blkdev.h>
44 #include <linux/uuid.h>
45 #include <linux/btrfs.h>
46 #include <linux/uaccess.h>
49 #include "transaction.h"
50 #include "btrfs_inode.h"
51 #include "print-tree.h"
54 #include "inode-map.h"
56 #include "rcu-string.h"
58 #include "dev-replace.h"
63 #include "compression.h"
66 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
67 * structures are incorrect, as the timespec structure from userspace
68 * is 4 bytes too small. We define these alternatives here to teach
69 * the kernel about the 32-bit struct packing.
71 struct btrfs_ioctl_timespec_32 {
74 } __attribute__ ((__packed__));
76 struct btrfs_ioctl_received_subvol_args_32 {
77 char uuid[BTRFS_UUID_SIZE]; /* in */
78 __u64 stransid; /* in */
79 __u64 rtransid; /* out */
80 struct btrfs_ioctl_timespec_32 stime; /* in */
81 struct btrfs_ioctl_timespec_32 rtime; /* out */
83 __u64 reserved[16]; /* in */
84 } __attribute__ ((__packed__));
86 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
87 struct btrfs_ioctl_received_subvol_args_32)
91 static int btrfs_clone(struct inode *src, struct inode *inode,
92 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
95 /* Mask out flags that are inappropriate for the given type of inode. */
96 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
100 else if (S_ISREG(mode))
101 return flags & ~FS_DIRSYNC_FL;
103 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
107 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
109 static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
111 unsigned int iflags = 0;
113 if (flags & BTRFS_INODE_SYNC)
114 iflags |= FS_SYNC_FL;
115 if (flags & BTRFS_INODE_IMMUTABLE)
116 iflags |= FS_IMMUTABLE_FL;
117 if (flags & BTRFS_INODE_APPEND)
118 iflags |= FS_APPEND_FL;
119 if (flags & BTRFS_INODE_NODUMP)
120 iflags |= FS_NODUMP_FL;
121 if (flags & BTRFS_INODE_NOATIME)
122 iflags |= FS_NOATIME_FL;
123 if (flags & BTRFS_INODE_DIRSYNC)
124 iflags |= FS_DIRSYNC_FL;
125 if (flags & BTRFS_INODE_NODATACOW)
126 iflags |= FS_NOCOW_FL;
128 if (flags & BTRFS_INODE_NOCOMPRESS)
129 iflags |= FS_NOCOMP_FL;
130 else if (flags & BTRFS_INODE_COMPRESS)
131 iflags |= FS_COMPR_FL;
137 * Update inode->i_flags based on the btrfs internal flags.
139 void btrfs_update_iflags(struct inode *inode)
141 struct btrfs_inode *ip = BTRFS_I(inode);
142 unsigned int new_fl = 0;
144 if (ip->flags & BTRFS_INODE_SYNC)
146 if (ip->flags & BTRFS_INODE_IMMUTABLE)
147 new_fl |= S_IMMUTABLE;
148 if (ip->flags & BTRFS_INODE_APPEND)
150 if (ip->flags & BTRFS_INODE_NOATIME)
152 if (ip->flags & BTRFS_INODE_DIRSYNC)
155 set_mask_bits(&inode->i_flags,
156 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
161 * Inherit flags from the parent inode.
163 * Currently only the compression flags and the cow flags are inherited.
165 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
172 flags = BTRFS_I(dir)->flags;
174 if (flags & BTRFS_INODE_NOCOMPRESS) {
175 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
176 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
177 } else if (flags & BTRFS_INODE_COMPRESS) {
178 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
179 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
182 if (flags & BTRFS_INODE_NODATACOW) {
183 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
184 if (S_ISREG(inode->i_mode))
185 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
188 btrfs_update_iflags(inode);
191 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
193 struct btrfs_inode *ip = BTRFS_I(file_inode(file));
194 unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
196 if (copy_to_user(arg, &flags, sizeof(flags)))
201 static int check_flags(unsigned int flags)
203 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
204 FS_NOATIME_FL | FS_NODUMP_FL | \
205 FS_SYNC_FL | FS_DIRSYNC_FL | \
206 FS_NOCOMP_FL | FS_COMPR_FL |
210 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
216 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
218 struct inode *inode = file_inode(file);
219 struct btrfs_inode *ip = BTRFS_I(inode);
220 struct btrfs_root *root = ip->root;
221 struct btrfs_trans_handle *trans;
222 unsigned int flags, oldflags;
225 unsigned int i_oldflags;
228 if (!inode_owner_or_capable(inode))
231 if (btrfs_root_readonly(root))
234 if (copy_from_user(&flags, arg, sizeof(flags)))
237 ret = check_flags(flags);
241 ret = mnt_want_write_file(file);
247 ip_oldflags = ip->flags;
248 i_oldflags = inode->i_flags;
249 mode = inode->i_mode;
251 flags = btrfs_mask_flags(inode->i_mode, flags);
252 oldflags = btrfs_flags_to_ioctl(ip->flags);
253 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
254 if (!capable(CAP_LINUX_IMMUTABLE)) {
260 if (flags & FS_SYNC_FL)
261 ip->flags |= BTRFS_INODE_SYNC;
263 ip->flags &= ~BTRFS_INODE_SYNC;
264 if (flags & FS_IMMUTABLE_FL)
265 ip->flags |= BTRFS_INODE_IMMUTABLE;
267 ip->flags &= ~BTRFS_INODE_IMMUTABLE;
268 if (flags & FS_APPEND_FL)
269 ip->flags |= BTRFS_INODE_APPEND;
271 ip->flags &= ~BTRFS_INODE_APPEND;
272 if (flags & FS_NODUMP_FL)
273 ip->flags |= BTRFS_INODE_NODUMP;
275 ip->flags &= ~BTRFS_INODE_NODUMP;
276 if (flags & FS_NOATIME_FL)
277 ip->flags |= BTRFS_INODE_NOATIME;
279 ip->flags &= ~BTRFS_INODE_NOATIME;
280 if (flags & FS_DIRSYNC_FL)
281 ip->flags |= BTRFS_INODE_DIRSYNC;
283 ip->flags &= ~BTRFS_INODE_DIRSYNC;
284 if (flags & FS_NOCOW_FL) {
287 * It's safe to turn csums off here, no extents exist.
288 * Otherwise we want the flag to reflect the real COW
289 * status of the file and will not set it.
291 if (inode->i_size == 0)
292 ip->flags |= BTRFS_INODE_NODATACOW
293 | BTRFS_INODE_NODATASUM;
295 ip->flags |= BTRFS_INODE_NODATACOW;
299 * Revert back under same assumptions as above
302 if (inode->i_size == 0)
303 ip->flags &= ~(BTRFS_INODE_NODATACOW
304 | BTRFS_INODE_NODATASUM);
306 ip->flags &= ~BTRFS_INODE_NODATACOW;
311 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
312 * flag may be changed automatically if compression code won't make
315 if (flags & FS_NOCOMP_FL) {
316 ip->flags &= ~BTRFS_INODE_COMPRESS;
317 ip->flags |= BTRFS_INODE_NOCOMPRESS;
319 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
320 if (ret && ret != -ENODATA)
322 } else if (flags & FS_COMPR_FL) {
325 ip->flags |= BTRFS_INODE_COMPRESS;
326 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
328 if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
332 ret = btrfs_set_prop(inode, "btrfs.compression",
333 comp, strlen(comp), 0);
338 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
339 if (ret && ret != -ENODATA)
341 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
344 trans = btrfs_start_transaction(root, 1);
346 ret = PTR_ERR(trans);
350 btrfs_update_iflags(inode);
351 inode_inc_iversion(inode);
352 inode->i_ctime = current_time(inode);
353 ret = btrfs_update_inode(trans, root, inode);
355 btrfs_end_transaction(trans, root);
358 ip->flags = ip_oldflags;
359 inode->i_flags = i_oldflags;
364 mnt_drop_write_file(file);
368 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
370 struct inode *inode = file_inode(file);
372 return put_user(inode->i_generation, arg);
375 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
377 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
378 struct btrfs_device *device;
379 struct request_queue *q;
380 struct fstrim_range range;
381 u64 minlen = ULLONG_MAX;
385 if (!capable(CAP_SYS_ADMIN))
389 * If the fs is mounted with nologreplay, which requires it to be
390 * mounted in RO mode as well, we can not allow discard on free space
391 * inside block groups, because log trees refer to extents that are not
392 * pinned in a block group's free space cache (pinning the extents is
393 * precisely the first phase of replaying a log tree).
395 if (btrfs_test_opt(fs_info, NOLOGREPLAY))
399 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
403 q = bdev_get_queue(device->bdev);
404 if (blk_queue_discard(q)) {
406 minlen = min((u64)q->limits.discard_granularity,
414 if (copy_from_user(&range, arg, sizeof(range)))
418 * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
419 * block group is in the logical address space, which can be any
420 * sectorsize aligned bytenr in the range [0, U64_MAX].
422 if (range.len < fs_info->sb->s_blocksize)
425 range.minlen = max(range.minlen, minlen);
426 ret = btrfs_trim_fs(fs_info->tree_root, &range);
430 if (copy_to_user(arg, &range, sizeof(range)))
436 int btrfs_is_empty_uuid(u8 *uuid)
440 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
447 static noinline int create_subvol(struct inode *dir,
448 struct dentry *dentry,
449 char *name, int namelen,
451 struct btrfs_qgroup_inherit *inherit)
453 struct btrfs_trans_handle *trans;
454 struct btrfs_key key;
455 struct btrfs_root_item *root_item;
456 struct btrfs_inode_item *inode_item;
457 struct extent_buffer *leaf;
458 struct btrfs_root *root = BTRFS_I(dir)->root;
459 struct btrfs_root *new_root;
460 struct btrfs_block_rsv block_rsv;
461 struct timespec cur_time = current_time(dir);
466 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
471 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
475 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
480 * Don't create subvolume whose level is not zero. Or qgroup will be
481 * screwed up since it assumes subvolume qgroup's level to be 0.
483 if (btrfs_qgroup_level(objectid)) {
488 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
490 * The same as the snapshot creation, please see the comment
491 * of create_snapshot().
493 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
494 8, &qgroup_reserved, false);
498 trans = btrfs_start_transaction(root, 0);
500 ret = PTR_ERR(trans);
501 btrfs_subvolume_release_metadata(root, &block_rsv,
505 trans->block_rsv = &block_rsv;
506 trans->bytes_reserved = block_rsv.size;
508 ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
512 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
518 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
519 btrfs_set_header_bytenr(leaf, leaf->start);
520 btrfs_set_header_generation(leaf, trans->transid);
521 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
522 btrfs_set_header_owner(leaf, objectid);
524 write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
526 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
527 btrfs_header_chunk_tree_uuid(leaf),
529 btrfs_mark_buffer_dirty(leaf);
531 inode_item = &root_item->inode;
532 btrfs_set_stack_inode_generation(inode_item, 1);
533 btrfs_set_stack_inode_size(inode_item, 3);
534 btrfs_set_stack_inode_nlink(inode_item, 1);
535 btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
536 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
538 btrfs_set_root_flags(root_item, 0);
539 btrfs_set_root_limit(root_item, 0);
540 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
542 btrfs_set_root_bytenr(root_item, leaf->start);
543 btrfs_set_root_generation(root_item, trans->transid);
544 btrfs_set_root_level(root_item, 0);
545 btrfs_set_root_refs(root_item, 1);
546 btrfs_set_root_used(root_item, leaf->len);
547 btrfs_set_root_last_snapshot(root_item, 0);
549 btrfs_set_root_generation_v2(root_item,
550 btrfs_root_generation(root_item));
551 uuid_le_gen(&new_uuid);
552 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
553 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
554 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
555 root_item->ctime = root_item->otime;
556 btrfs_set_root_ctransid(root_item, trans->transid);
557 btrfs_set_root_otransid(root_item, trans->transid);
559 btrfs_tree_unlock(leaf);
561 btrfs_set_root_dirid(root_item, new_dirid);
563 key.objectid = objectid;
565 key.type = BTRFS_ROOT_ITEM_KEY;
566 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
570 * Since we don't abort the transaction in this case, free the
571 * tree block so that we don't leak space and leave the
572 * filesystem in an inconsistent state (an extent item in the
573 * extent tree without backreferences). Also no need to have
574 * the tree block locked since it is not in any tree at this
575 * point, so no other task can find it and use it.
577 btrfs_free_tree_block(trans, root, leaf, 0, 1);
578 free_extent_buffer(leaf);
582 free_extent_buffer(leaf);
585 key.offset = (u64)-1;
586 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
587 if (IS_ERR(new_root)) {
588 ret = PTR_ERR(new_root);
589 btrfs_abort_transaction(trans, ret);
593 btrfs_record_root_in_trans(trans, new_root);
595 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
597 /* We potentially lose an unused inode item here */
598 btrfs_abort_transaction(trans, ret);
602 mutex_lock(&new_root->objectid_mutex);
603 new_root->highest_objectid = new_dirid;
604 mutex_unlock(&new_root->objectid_mutex);
607 * insert the directory item
609 ret = btrfs_set_inode_index(dir, &index);
611 btrfs_abort_transaction(trans, ret);
615 ret = btrfs_insert_dir_item(trans, root,
616 name, namelen, dir, &key,
617 BTRFS_FT_DIR, index);
619 btrfs_abort_transaction(trans, ret);
623 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
624 ret = btrfs_update_inode(trans, root, dir);
626 btrfs_abort_transaction(trans, ret);
630 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
631 objectid, root->root_key.objectid,
632 btrfs_ino(dir), index, name, namelen);
634 btrfs_abort_transaction(trans, ret);
638 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
639 root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
642 btrfs_abort_transaction(trans, ret);
646 trans->block_rsv = NULL;
647 trans->bytes_reserved = 0;
648 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
651 *async_transid = trans->transid;
652 err = btrfs_commit_transaction_async(trans, root, 1);
654 err = btrfs_commit_transaction(trans, root);
656 err = btrfs_commit_transaction(trans, root);
662 inode = btrfs_lookup_dentry(dir, dentry);
664 return PTR_ERR(inode);
665 d_instantiate(dentry, inode);
674 static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
680 prepare_to_wait(&root->subv_writers->wait, &wait,
681 TASK_UNINTERRUPTIBLE);
683 writers = percpu_counter_sum(&root->subv_writers->counter);
687 finish_wait(&root->subv_writers->wait, &wait);
691 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
692 struct dentry *dentry, char *name, int namelen,
693 u64 *async_transid, bool readonly,
694 struct btrfs_qgroup_inherit *inherit)
697 struct btrfs_pending_snapshot *pending_snapshot;
698 struct btrfs_trans_handle *trans;
701 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
704 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
705 if (!pending_snapshot)
708 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
710 pending_snapshot->path = btrfs_alloc_path();
711 if (!pending_snapshot->root_item || !pending_snapshot->path) {
716 atomic_inc(&root->will_be_snapshoted);
717 smp_mb__after_atomic();
718 btrfs_wait_for_no_snapshoting_writes(root);
720 ret = btrfs_start_delalloc_inodes(root, 0);
724 btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
726 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
727 BTRFS_BLOCK_RSV_TEMP);
729 * 1 - parent dir inode
732 * 2 - root ref/backref
733 * 1 - root of snapshot
736 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
737 &pending_snapshot->block_rsv, 8,
738 &pending_snapshot->qgroup_reserved,
743 pending_snapshot->dentry = dentry;
744 pending_snapshot->root = root;
745 pending_snapshot->readonly = readonly;
746 pending_snapshot->dir = dir;
747 pending_snapshot->inherit = inherit;
749 trans = btrfs_start_transaction(root, 0);
751 ret = PTR_ERR(trans);
755 spin_lock(&root->fs_info->trans_lock);
756 list_add(&pending_snapshot->list,
757 &trans->transaction->pending_snapshots);
758 spin_unlock(&root->fs_info->trans_lock);
760 *async_transid = trans->transid;
761 ret = btrfs_commit_transaction_async(trans,
762 root->fs_info->extent_root, 1);
764 ret = btrfs_commit_transaction(trans, root);
766 ret = btrfs_commit_transaction(trans,
767 root->fs_info->extent_root);
772 ret = pending_snapshot->error;
776 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
780 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
782 ret = PTR_ERR(inode);
786 d_instantiate(dentry, inode);
789 btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
790 &pending_snapshot->block_rsv,
791 pending_snapshot->qgroup_reserved);
793 if (atomic_dec_and_test(&root->will_be_snapshoted))
794 wake_up_atomic_t(&root->will_be_snapshoted);
796 kfree(pending_snapshot->root_item);
797 btrfs_free_path(pending_snapshot->path);
798 kfree(pending_snapshot);
803 /* copy of may_delete in fs/namei.c()
804 * Check whether we can remove a link victim from directory dir, check
805 * whether the type of victim is right.
806 * 1. We can't do it if dir is read-only (done in permission())
807 * 2. We should have write and exec permissions on dir
808 * 3. We can't remove anything from append-only dir
809 * 4. We can't do anything with immutable dir (done in permission())
810 * 5. If the sticky bit on dir is set we should either
811 * a. be owner of dir, or
812 * b. be owner of victim, or
813 * c. have CAP_FOWNER capability
814 * 6. If the victim is append-only or immutable we can't do anything with
815 * links pointing to it.
816 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
817 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
818 * 9. We can't remove a root or mountpoint.
819 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
820 * nfs_async_unlink().
823 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
827 if (d_really_is_negative(victim))
830 BUG_ON(d_inode(victim->d_parent) != dir);
831 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
833 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
838 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
839 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
842 if (!d_is_dir(victim))
846 } else if (d_is_dir(victim))
850 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
855 /* copy of may_create in fs/namei.c() */
856 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
858 if (d_really_is_positive(child))
862 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
866 * Create a new subvolume below @parent. This is largely modeled after
867 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
868 * inside this filesystem so it's quite a bit simpler.
870 static noinline int btrfs_mksubvol(struct path *parent,
871 char *name, int namelen,
872 struct btrfs_root *snap_src,
873 u64 *async_transid, bool readonly,
874 struct btrfs_qgroup_inherit *inherit)
876 struct inode *dir = d_inode(parent->dentry);
877 struct dentry *dentry;
880 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
884 dentry = lookup_one_len(name, parent->dentry, namelen);
885 error = PTR_ERR(dentry);
889 error = btrfs_may_create(dir, dentry);
894 * even if this name doesn't exist, we may get hash collisions.
895 * check for them now when we can safely fail
897 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
903 down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
905 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
909 error = create_snapshot(snap_src, dir, dentry, name, namelen,
910 async_transid, readonly, inherit);
912 error = create_subvol(dir, dentry, name, namelen,
913 async_transid, inherit);
916 fsnotify_mkdir(dir, dentry);
918 up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
927 * When we're defragging a range, we don't want to kick it off again
928 * if it is really just waiting for delalloc to send it down.
929 * If we find a nice big extent or delalloc range for the bytes in the
930 * file you want to defrag, we return 0 to let you know to skip this
933 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
935 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
936 struct extent_map *em = NULL;
937 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
940 read_lock(&em_tree->lock);
941 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
942 read_unlock(&em_tree->lock);
945 end = extent_map_end(em);
947 if (end - offset > thresh)
950 /* if we already have a nice delalloc here, just stop */
952 end = count_range_bits(io_tree, &offset, offset + thresh,
953 thresh, EXTENT_DELALLOC, 1);
960 * helper function to walk through a file and find extents
961 * newer than a specific transid, and smaller than thresh.
963 * This is used by the defragging code to find new and small
966 static int find_new_extents(struct btrfs_root *root,
967 struct inode *inode, u64 newer_than,
968 u64 *off, u32 thresh)
970 struct btrfs_path *path;
971 struct btrfs_key min_key;
972 struct extent_buffer *leaf;
973 struct btrfs_file_extent_item *extent;
976 u64 ino = btrfs_ino(inode);
978 path = btrfs_alloc_path();
982 min_key.objectid = ino;
983 min_key.type = BTRFS_EXTENT_DATA_KEY;
984 min_key.offset = *off;
987 ret = btrfs_search_forward(root, &min_key, path, newer_than);
991 if (min_key.objectid != ino)
993 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
996 leaf = path->nodes[0];
997 extent = btrfs_item_ptr(leaf, path->slots[0],
998 struct btrfs_file_extent_item);
1000 type = btrfs_file_extent_type(leaf, extent);
1001 if (type == BTRFS_FILE_EXTENT_REG &&
1002 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
1003 check_defrag_in_cache(inode, min_key.offset, thresh)) {
1004 *off = min_key.offset;
1005 btrfs_free_path(path);
1010 if (path->slots[0] < btrfs_header_nritems(leaf)) {
1011 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
1015 if (min_key.offset == (u64)-1)
1019 btrfs_release_path(path);
1022 btrfs_free_path(path);
1026 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1028 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1029 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1030 struct extent_map *em;
1031 u64 len = PAGE_SIZE;
1034 * hopefully we have this extent in the tree already, try without
1035 * the full extent lock
1037 read_lock(&em_tree->lock);
1038 em = lookup_extent_mapping(em_tree, start, len);
1039 read_unlock(&em_tree->lock);
1042 struct extent_state *cached = NULL;
1043 u64 end = start + len - 1;
1045 /* get the big lock and read metadata off disk */
1046 lock_extent_bits(io_tree, start, end, &cached);
1047 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
1048 unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
1057 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1059 struct extent_map *next;
1062 /* this is the last extent */
1063 if (em->start + em->len >= i_size_read(inode))
1066 next = defrag_lookup_extent(inode, em->start + em->len);
1067 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1069 else if ((em->block_start + em->block_len == next->block_start) &&
1070 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1073 free_extent_map(next);
1077 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1078 u64 *last_len, u64 *skip, u64 *defrag_end,
1081 struct extent_map *em;
1083 bool next_mergeable = true;
1084 bool prev_mergeable = true;
1087 * make sure that once we start defragging an extent, we keep on
1090 if (start < *defrag_end)
1095 em = defrag_lookup_extent(inode, start);
1099 /* this will cover holes, and inline extents */
1100 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1106 prev_mergeable = false;
1108 next_mergeable = defrag_check_next_extent(inode, em);
1110 * we hit a real extent, if it is big or the next extent is not a
1111 * real extent, don't bother defragging it
1113 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1114 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1118 * last_len ends up being a counter of how many bytes we've defragged.
1119 * every time we choose not to defrag an extent, we reset *last_len
1120 * so that the next tiny extent will force a defrag.
1122 * The end result of this is that tiny extents before a single big
1123 * extent will force at least part of that big extent to be defragged.
1126 *defrag_end = extent_map_end(em);
1129 *skip = extent_map_end(em);
1133 free_extent_map(em);
1138 * it doesn't do much good to defrag one or two pages
1139 * at a time. This pulls in a nice chunk of pages
1140 * to COW and defrag.
1142 * It also makes sure the delalloc code has enough
1143 * dirty data to avoid making new small extents as part
1146 * It's a good idea to start RA on this range
1147 * before calling this.
1149 static int cluster_pages_for_defrag(struct inode *inode,
1150 struct page **pages,
1151 unsigned long start_index,
1152 unsigned long num_pages)
1154 unsigned long file_end;
1155 u64 isize = i_size_read(inode);
1162 struct btrfs_ordered_extent *ordered;
1163 struct extent_state *cached_state = NULL;
1164 struct extent_io_tree *tree;
1165 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1167 file_end = (isize - 1) >> PAGE_SHIFT;
1168 if (!isize || start_index > file_end)
1171 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1173 ret = btrfs_delalloc_reserve_space(inode,
1174 start_index << PAGE_SHIFT,
1175 page_cnt << PAGE_SHIFT);
1179 tree = &BTRFS_I(inode)->io_tree;
1181 /* step one, lock all the pages */
1182 for (i = 0; i < page_cnt; i++) {
1185 page = find_or_create_page(inode->i_mapping,
1186 start_index + i, mask);
1190 page_start = page_offset(page);
1191 page_end = page_start + PAGE_SIZE - 1;
1193 lock_extent_bits(tree, page_start, page_end,
1195 ordered = btrfs_lookup_ordered_extent(inode,
1197 unlock_extent_cached(tree, page_start, page_end,
1198 &cached_state, GFP_NOFS);
1203 btrfs_start_ordered_extent(inode, ordered, 1);
1204 btrfs_put_ordered_extent(ordered);
1207 * we unlocked the page above, so we need check if
1208 * it was released or not.
1210 if (page->mapping != inode->i_mapping) {
1217 if (!PageUptodate(page)) {
1218 btrfs_readpage(NULL, page);
1220 if (!PageUptodate(page)) {
1228 if (page->mapping != inode->i_mapping) {
1240 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1244 * so now we have a nice long stream of locked
1245 * and up to date pages, lets wait on them
1247 for (i = 0; i < i_done; i++)
1248 wait_on_page_writeback(pages[i]);
1250 page_start = page_offset(pages[0]);
1251 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1253 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1254 page_start, page_end - 1, &cached_state);
1255 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1256 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1257 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1258 &cached_state, GFP_NOFS);
1260 if (i_done != page_cnt) {
1261 spin_lock(&BTRFS_I(inode)->lock);
1262 BTRFS_I(inode)->outstanding_extents++;
1263 spin_unlock(&BTRFS_I(inode)->lock);
1264 btrfs_delalloc_release_space(inode,
1265 start_index << PAGE_SHIFT,
1266 (page_cnt - i_done) << PAGE_SHIFT);
1270 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1273 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1274 page_start, page_end - 1, &cached_state,
1277 for (i = 0; i < i_done; i++) {
1278 clear_page_dirty_for_io(pages[i]);
1279 ClearPageChecked(pages[i]);
1280 set_page_extent_mapped(pages[i]);
1281 set_page_dirty(pages[i]);
1282 unlock_page(pages[i]);
1287 for (i = 0; i < i_done; i++) {
1288 unlock_page(pages[i]);
1291 btrfs_delalloc_release_space(inode,
1292 start_index << PAGE_SHIFT,
1293 page_cnt << PAGE_SHIFT);
1298 int btrfs_defrag_file(struct inode *inode, struct file *file,
1299 struct btrfs_ioctl_defrag_range_args *range,
1300 u64 newer_than, unsigned long max_to_defrag)
1302 struct btrfs_root *root = BTRFS_I(inode)->root;
1303 struct file_ra_state *ra = NULL;
1304 unsigned long last_index;
1305 u64 isize = i_size_read(inode);
1309 u64 newer_off = range->start;
1311 unsigned long ra_index = 0;
1313 int defrag_count = 0;
1314 int compress_type = BTRFS_COMPRESS_ZLIB;
1315 u32 extent_thresh = range->extent_thresh;
1316 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1317 unsigned long cluster = max_cluster;
1318 u64 new_align = ~((u64)SZ_128K - 1);
1319 struct page **pages = NULL;
1324 if (range->start >= isize)
1327 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1328 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1330 if (range->compress_type)
1331 compress_type = range->compress_type;
1334 if (extent_thresh == 0)
1335 extent_thresh = SZ_256K;
1338 * if we were not given a file, allocate a readahead
1342 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1345 file_ra_state_init(ra, inode->i_mapping);
1350 pages = kmalloc_array(max_cluster, sizeof(struct page *),
1357 /* find the last page to defrag */
1358 if (range->start + range->len > range->start) {
1359 last_index = min_t(u64, isize - 1,
1360 range->start + range->len - 1) >> PAGE_SHIFT;
1362 last_index = (isize - 1) >> PAGE_SHIFT;
1366 ret = find_new_extents(root, inode, newer_than,
1367 &newer_off, SZ_64K);
1369 range->start = newer_off;
1371 * we always align our defrag to help keep
1372 * the extents in the file evenly spaced
1374 i = (newer_off & new_align) >> PAGE_SHIFT;
1378 i = range->start >> PAGE_SHIFT;
1381 max_to_defrag = last_index - i + 1;
1384 * make writeback starts from i, so the defrag range can be
1385 * written sequentially.
1387 if (i < inode->i_mapping->writeback_index)
1388 inode->i_mapping->writeback_index = i;
1390 while (i <= last_index && defrag_count < max_to_defrag &&
1391 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1393 * make sure we stop running if someone unmounts
1396 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1399 if (btrfs_defrag_cancelled(root->fs_info)) {
1400 btrfs_debug(root->fs_info, "defrag_file cancelled");
1405 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1406 extent_thresh, &last_len, &skip,
1407 &defrag_end, range->flags &
1408 BTRFS_DEFRAG_RANGE_COMPRESS)) {
1411 * the should_defrag function tells us how much to skip
1412 * bump our counter by the suggested amount
1414 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1415 i = max(i + 1, next);
1420 cluster = (PAGE_ALIGN(defrag_end) >>
1422 cluster = min(cluster, max_cluster);
1424 cluster = max_cluster;
1427 if (i + cluster > ra_index) {
1428 ra_index = max(i, ra_index);
1429 btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
1431 ra_index += cluster;
1435 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
1436 BTRFS_I(inode)->force_compress = compress_type;
1437 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1439 inode_unlock(inode);
1443 defrag_count += ret;
1444 balance_dirty_pages_ratelimited(inode->i_mapping);
1445 inode_unlock(inode);
1448 if (newer_off == (u64)-1)
1454 newer_off = max(newer_off + 1,
1455 (u64)i << PAGE_SHIFT);
1457 ret = find_new_extents(root, inode, newer_than,
1458 &newer_off, SZ_64K);
1460 range->start = newer_off;
1461 i = (newer_off & new_align) >> PAGE_SHIFT;
1468 last_len += ret << PAGE_SHIFT;
1476 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1477 filemap_flush(inode->i_mapping);
1478 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1479 &BTRFS_I(inode)->runtime_flags))
1480 filemap_flush(inode->i_mapping);
1483 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1484 /* the filemap_flush will queue IO into the worker threads, but
1485 * we have to make sure the IO is actually started and that
1486 * ordered extents get created before we return
1488 atomic_inc(&root->fs_info->async_submit_draining);
1489 while (atomic_read(&root->fs_info->nr_async_submits) ||
1490 atomic_read(&root->fs_info->async_delalloc_pages)) {
1491 wait_event(root->fs_info->async_submit_wait,
1492 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
1493 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
1495 atomic_dec(&root->fs_info->async_submit_draining);
1498 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1499 btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
1505 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1507 BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
1508 inode_unlock(inode);
1516 static noinline int btrfs_ioctl_resize(struct file *file,
1522 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
1523 struct btrfs_ioctl_vol_args *vol_args;
1524 struct btrfs_trans_handle *trans;
1525 struct btrfs_device *device = NULL;
1528 char *devstr = NULL;
1532 if (!capable(CAP_SYS_ADMIN))
1535 ret = mnt_want_write_file(file);
1539 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1541 mnt_drop_write_file(file);
1542 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1545 mutex_lock(&root->fs_info->volume_mutex);
1546 vol_args = memdup_user(arg, sizeof(*vol_args));
1547 if (IS_ERR(vol_args)) {
1548 ret = PTR_ERR(vol_args);
1552 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1554 sizestr = vol_args->name;
1555 devstr = strchr(sizestr, ':');
1557 sizestr = devstr + 1;
1559 devstr = vol_args->name;
1560 ret = kstrtoull(devstr, 10, &devid);
1567 btrfs_info(root->fs_info, "resizing devid %llu", devid);
1570 device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
1572 btrfs_info(root->fs_info, "resizer unable to find device %llu",
1578 if (!device->writeable) {
1579 btrfs_info(root->fs_info,
1580 "resizer unable to apply on readonly device %llu",
1586 if (!strcmp(sizestr, "max"))
1587 new_size = device->bdev->bd_inode->i_size;
1589 if (sizestr[0] == '-') {
1592 } else if (sizestr[0] == '+') {
1596 new_size = memparse(sizestr, &retptr);
1597 if (*retptr != '\0' || new_size == 0) {
1603 if (device->is_tgtdev_for_dev_replace) {
1608 old_size = btrfs_device_get_total_bytes(device);
1611 if (new_size > old_size) {
1615 new_size = old_size - new_size;
1616 } else if (mod > 0) {
1617 if (new_size > ULLONG_MAX - old_size) {
1621 new_size = old_size + new_size;
1624 if (new_size < SZ_256M) {
1628 if (new_size > device->bdev->bd_inode->i_size) {
1633 new_size = div_u64(new_size, root->sectorsize);
1634 new_size *= root->sectorsize;
1636 btrfs_info_in_rcu(root->fs_info, "new size for %s is %llu",
1637 rcu_str_deref(device->name), new_size);
1639 if (new_size > old_size) {
1640 trans = btrfs_start_transaction(root, 0);
1641 if (IS_ERR(trans)) {
1642 ret = PTR_ERR(trans);
1645 ret = btrfs_grow_device(trans, device, new_size);
1646 btrfs_commit_transaction(trans, root);
1647 } else if (new_size < old_size) {
1648 ret = btrfs_shrink_device(device, new_size);
1649 } /* equal, nothing need to do */
1654 mutex_unlock(&root->fs_info->volume_mutex);
1655 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
1656 mnt_drop_write_file(file);
1660 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1661 char *name, unsigned long fd, int subvol,
1662 u64 *transid, bool readonly,
1663 struct btrfs_qgroup_inherit *inherit)
1668 if (!S_ISDIR(file_inode(file)->i_mode))
1671 ret = mnt_want_write_file(file);
1675 namelen = strlen(name);
1676 if (strchr(name, '/')) {
1678 goto out_drop_write;
1681 if (name[0] == '.' &&
1682 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1684 goto out_drop_write;
1688 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1689 NULL, transid, readonly, inherit);
1691 struct fd src = fdget(fd);
1692 struct inode *src_inode;
1695 goto out_drop_write;
1698 src_inode = file_inode(src.file);
1699 if (src_inode->i_sb != file_inode(file)->i_sb) {
1700 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1701 "Snapshot src from another FS");
1703 } else if (!inode_owner_or_capable(src_inode)) {
1705 * Subvolume creation is not restricted, but snapshots
1706 * are limited to own subvolumes only
1710 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1711 BTRFS_I(src_inode)->root,
1712 transid, readonly, inherit);
1717 mnt_drop_write_file(file);
1722 static noinline int btrfs_ioctl_snap_create(struct file *file,
1723 void __user *arg, int subvol)
1725 struct btrfs_ioctl_vol_args *vol_args;
1728 if (!S_ISDIR(file_inode(file)->i_mode))
1731 vol_args = memdup_user(arg, sizeof(*vol_args));
1732 if (IS_ERR(vol_args))
1733 return PTR_ERR(vol_args);
1734 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1736 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1737 vol_args->fd, subvol,
1744 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1745 void __user *arg, int subvol)
1747 struct btrfs_ioctl_vol_args_v2 *vol_args;
1751 bool readonly = false;
1752 struct btrfs_qgroup_inherit *inherit = NULL;
1754 if (!S_ISDIR(file_inode(file)->i_mode))
1757 vol_args = memdup_user(arg, sizeof(*vol_args));
1758 if (IS_ERR(vol_args))
1759 return PTR_ERR(vol_args);
1760 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1762 if (vol_args->flags &
1763 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1764 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1769 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1771 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1773 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1774 if (vol_args->size > PAGE_SIZE) {
1778 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1779 if (IS_ERR(inherit)) {
1780 ret = PTR_ERR(inherit);
1785 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1786 vol_args->fd, subvol, ptr,
1791 if (ptr && copy_to_user(arg +
1792 offsetof(struct btrfs_ioctl_vol_args_v2,
1804 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1807 struct inode *inode = file_inode(file);
1808 struct btrfs_root *root = BTRFS_I(inode)->root;
1812 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1815 down_read(&root->fs_info->subvol_sem);
1816 if (btrfs_root_readonly(root))
1817 flags |= BTRFS_SUBVOL_RDONLY;
1818 up_read(&root->fs_info->subvol_sem);
1820 if (copy_to_user(arg, &flags, sizeof(flags)))
1826 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1829 struct inode *inode = file_inode(file);
1830 struct btrfs_root *root = BTRFS_I(inode)->root;
1831 struct btrfs_trans_handle *trans;
1836 if (!inode_owner_or_capable(inode))
1839 ret = mnt_want_write_file(file);
1843 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
1845 goto out_drop_write;
1848 if (copy_from_user(&flags, arg, sizeof(flags))) {
1850 goto out_drop_write;
1853 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1855 goto out_drop_write;
1858 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1860 goto out_drop_write;
1863 down_write(&root->fs_info->subvol_sem);
1866 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1869 root_flags = btrfs_root_flags(&root->root_item);
1870 if (flags & BTRFS_SUBVOL_RDONLY) {
1871 btrfs_set_root_flags(&root->root_item,
1872 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1875 * Block RO -> RW transition if this subvolume is involved in
1878 spin_lock(&root->root_item_lock);
1879 if (root->send_in_progress == 0) {
1880 btrfs_set_root_flags(&root->root_item,
1881 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1882 spin_unlock(&root->root_item_lock);
1884 spin_unlock(&root->root_item_lock);
1885 btrfs_warn(root->fs_info,
1886 "Attempt to set subvolume %llu read-write during send",
1887 root->root_key.objectid);
1893 trans = btrfs_start_transaction(root, 1);
1894 if (IS_ERR(trans)) {
1895 ret = PTR_ERR(trans);
1899 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1900 &root->root_key, &root->root_item);
1902 btrfs_commit_transaction(trans, root);
1905 btrfs_set_root_flags(&root->root_item, root_flags);
1907 up_write(&root->fs_info->subvol_sem);
1909 mnt_drop_write_file(file);
1915 * helper to check if the subvolume references other subvolumes
1917 static noinline int may_destroy_subvol(struct btrfs_root *root)
1919 struct btrfs_path *path;
1920 struct btrfs_dir_item *di;
1921 struct btrfs_key key;
1925 path = btrfs_alloc_path();
1929 /* Make sure this root isn't set as the default subvol */
1930 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
1931 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path,
1932 dir_id, "default", 7, 0);
1933 if (di && !IS_ERR(di)) {
1934 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1935 if (key.objectid == root->root_key.objectid) {
1937 btrfs_err(root->fs_info,
1938 "deleting default subvolume %llu is not allowed",
1942 btrfs_release_path(path);
1945 key.objectid = root->root_key.objectid;
1946 key.type = BTRFS_ROOT_REF_KEY;
1947 key.offset = (u64)-1;
1949 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
1956 if (path->slots[0] > 0) {
1958 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1959 if (key.objectid == root->root_key.objectid &&
1960 key.type == BTRFS_ROOT_REF_KEY)
1964 btrfs_free_path(path);
1968 static noinline int key_in_sk(struct btrfs_key *key,
1969 struct btrfs_ioctl_search_key *sk)
1971 struct btrfs_key test;
1974 test.objectid = sk->min_objectid;
1975 test.type = sk->min_type;
1976 test.offset = sk->min_offset;
1978 ret = btrfs_comp_cpu_keys(key, &test);
1982 test.objectid = sk->max_objectid;
1983 test.type = sk->max_type;
1984 test.offset = sk->max_offset;
1986 ret = btrfs_comp_cpu_keys(key, &test);
1992 static noinline int copy_to_sk(struct btrfs_path *path,
1993 struct btrfs_key *key,
1994 struct btrfs_ioctl_search_key *sk,
1997 unsigned long *sk_offset,
2001 struct extent_buffer *leaf;
2002 struct btrfs_ioctl_search_header sh;
2003 struct btrfs_key test;
2004 unsigned long item_off;
2005 unsigned long item_len;
2011 leaf = path->nodes[0];
2012 slot = path->slots[0];
2013 nritems = btrfs_header_nritems(leaf);
2015 if (btrfs_header_generation(leaf) > sk->max_transid) {
2019 found_transid = btrfs_header_generation(leaf);
2021 for (i = slot; i < nritems; i++) {
2022 item_off = btrfs_item_ptr_offset(leaf, i);
2023 item_len = btrfs_item_size_nr(leaf, i);
2025 btrfs_item_key_to_cpu(leaf, key, i);
2026 if (!key_in_sk(key, sk))
2029 if (sizeof(sh) + item_len > *buf_size) {
2036 * return one empty item back for v1, which does not
2040 *buf_size = sizeof(sh) + item_len;
2045 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2050 sh.objectid = key->objectid;
2051 sh.offset = key->offset;
2052 sh.type = key->type;
2054 sh.transid = found_transid;
2057 * Copy search result header. If we fault then loop again so we
2058 * can fault in the pages and -EFAULT there if there's a
2059 * problem. Otherwise we'll fault and then copy the buffer in
2060 * properly this next time through
2062 if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
2067 *sk_offset += sizeof(sh);
2070 char __user *up = ubuf + *sk_offset;
2072 * Copy the item, same behavior as above, but reset the
2073 * * sk_offset so we copy the full thing again.
2075 if (read_extent_buffer_to_user_nofault(leaf, up,
2076 item_off, item_len)) {
2078 *sk_offset -= sizeof(sh);
2082 *sk_offset += item_len;
2086 if (ret) /* -EOVERFLOW from above */
2089 if (*num_found >= sk->nr_items) {
2096 test.objectid = sk->max_objectid;
2097 test.type = sk->max_type;
2098 test.offset = sk->max_offset;
2099 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2101 else if (key->offset < (u64)-1)
2103 else if (key->type < (u8)-1) {
2106 } else if (key->objectid < (u64)-1) {
2114 * 0: all items from this leaf copied, continue with next
2115 * 1: * more items can be copied, but unused buffer is too small
2116 * * all items were found
2117 * Either way, it will stops the loop which iterates to the next
2119 * -EOVERFLOW: item was to large for buffer
2120 * -EFAULT: could not copy extent buffer back to userspace
2125 static noinline int search_ioctl(struct inode *inode,
2126 struct btrfs_ioctl_search_key *sk,
2130 struct btrfs_root *root;
2131 struct btrfs_key key;
2132 struct btrfs_path *path;
2133 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
2136 unsigned long sk_offset = 0;
2138 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2139 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2143 path = btrfs_alloc_path();
2147 if (sk->tree_id == 0) {
2148 /* search the root of the inode that was passed */
2149 root = BTRFS_I(inode)->root;
2151 key.objectid = sk->tree_id;
2152 key.type = BTRFS_ROOT_ITEM_KEY;
2153 key.offset = (u64)-1;
2154 root = btrfs_read_fs_root_no_name(info, &key);
2156 btrfs_free_path(path);
2161 key.objectid = sk->min_objectid;
2162 key.type = sk->min_type;
2163 key.offset = sk->min_offset;
2166 ret = fault_in_pages_writeable(ubuf + sk_offset,
2167 *buf_size - sk_offset);
2171 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2177 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2178 &sk_offset, &num_found);
2179 btrfs_release_path(path);
2187 sk->nr_items = num_found;
2188 btrfs_free_path(path);
2192 static noinline int btrfs_ioctl_tree_search(struct file *file,
2195 struct btrfs_ioctl_search_args __user *uargs;
2196 struct btrfs_ioctl_search_key sk;
2197 struct inode *inode;
2201 if (!capable(CAP_SYS_ADMIN))
2204 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2206 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2209 buf_size = sizeof(uargs->buf);
2211 inode = file_inode(file);
2212 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2215 * In the origin implementation an overflow is handled by returning a
2216 * search header with a len of zero, so reset ret.
2218 if (ret == -EOVERFLOW)
2221 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2226 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2229 struct btrfs_ioctl_search_args_v2 __user *uarg;
2230 struct btrfs_ioctl_search_args_v2 args;
2231 struct inode *inode;
2234 const size_t buf_limit = SZ_16M;
2236 if (!capable(CAP_SYS_ADMIN))
2239 /* copy search header and buffer size */
2240 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2241 if (copy_from_user(&args, uarg, sizeof(args)))
2244 buf_size = args.buf_size;
2246 if (buf_size < sizeof(struct btrfs_ioctl_search_header))
2249 /* limit result size to 16MB */
2250 if (buf_size > buf_limit)
2251 buf_size = buf_limit;
2253 inode = file_inode(file);
2254 ret = search_ioctl(inode, &args.key, &buf_size,
2255 (char *)(&uarg->buf[0]));
2256 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2258 else if (ret == -EOVERFLOW &&
2259 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2266 * Search INODE_REFs to identify path name of 'dirid' directory
2267 * in a 'tree_id' tree. and sets path name to 'name'.
2269 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2270 u64 tree_id, u64 dirid, char *name)
2272 struct btrfs_root *root;
2273 struct btrfs_key key;
2279 struct btrfs_inode_ref *iref;
2280 struct extent_buffer *l;
2281 struct btrfs_path *path;
2283 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2288 path = btrfs_alloc_path();
2292 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2294 key.objectid = tree_id;
2295 key.type = BTRFS_ROOT_ITEM_KEY;
2296 key.offset = (u64)-1;
2297 root = btrfs_read_fs_root_no_name(info, &key);
2299 btrfs_err(info, "could not find root %llu", tree_id);
2304 key.objectid = dirid;
2305 key.type = BTRFS_INODE_REF_KEY;
2306 key.offset = (u64)-1;
2309 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2313 ret = btrfs_previous_item(root, path, dirid,
2314 BTRFS_INODE_REF_KEY);
2324 slot = path->slots[0];
2325 btrfs_item_key_to_cpu(l, &key, slot);
2327 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2328 len = btrfs_inode_ref_name_len(l, iref);
2330 total_len += len + 1;
2332 ret = -ENAMETOOLONG;
2337 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2339 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2342 btrfs_release_path(path);
2343 key.objectid = key.offset;
2344 key.offset = (u64)-1;
2345 dirid = key.objectid;
2347 memmove(name, ptr, total_len);
2348 name[total_len] = '\0';
2351 btrfs_free_path(path);
2355 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2358 struct btrfs_ioctl_ino_lookup_args *args;
2359 struct inode *inode;
2362 args = memdup_user(argp, sizeof(*args));
2364 return PTR_ERR(args);
2366 inode = file_inode(file);
2369 * Unprivileged query to obtain the containing subvolume root id. The
2370 * path is reset so it's consistent with btrfs_search_path_in_tree.
2372 if (args->treeid == 0)
2373 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2375 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2380 if (!capable(CAP_SYS_ADMIN)) {
2385 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2386 args->treeid, args->objectid,
2390 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2397 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2400 struct dentry *parent = file->f_path.dentry;
2401 struct dentry *dentry;
2402 struct inode *dir = d_inode(parent);
2403 struct inode *inode;
2404 struct btrfs_root *root = BTRFS_I(dir)->root;
2405 struct btrfs_root *dest = NULL;
2406 struct btrfs_ioctl_vol_args *vol_args;
2407 struct btrfs_trans_handle *trans;
2408 struct btrfs_block_rsv block_rsv;
2410 u64 qgroup_reserved;
2415 if (!S_ISDIR(dir->i_mode))
2418 vol_args = memdup_user(arg, sizeof(*vol_args));
2419 if (IS_ERR(vol_args))
2420 return PTR_ERR(vol_args);
2422 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2423 namelen = strlen(vol_args->name);
2424 if (strchr(vol_args->name, '/') ||
2425 strncmp(vol_args->name, "..", namelen) == 0) {
2430 err = mnt_want_write_file(file);
2435 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2437 goto out_drop_write;
2438 dentry = lookup_one_len(vol_args->name, parent, namelen);
2439 if (IS_ERR(dentry)) {
2440 err = PTR_ERR(dentry);
2441 goto out_unlock_dir;
2444 if (d_really_is_negative(dentry)) {
2449 inode = d_inode(dentry);
2450 dest = BTRFS_I(inode)->root;
2451 if (!capable(CAP_SYS_ADMIN)) {
2453 * Regular user. Only allow this with a special mount
2454 * option, when the user has write+exec access to the
2455 * subvol root, and when rmdir(2) would have been
2458 * Note that this is _not_ check that the subvol is
2459 * empty or doesn't contain data that we wouldn't
2460 * otherwise be able to delete.
2462 * Users who want to delete empty subvols should try
2466 if (!btrfs_test_opt(root->fs_info, USER_SUBVOL_RM_ALLOWED))
2470 * Do not allow deletion if the parent dir is the same
2471 * as the dir to be deleted. That means the ioctl
2472 * must be called on the dentry referencing the root
2473 * of the subvol, not a random directory contained
2480 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2485 /* check if subvolume may be deleted by a user */
2486 err = btrfs_may_delete(dir, dentry, 1);
2490 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
2498 * Don't allow to delete a subvolume with send in progress. This is
2499 * inside the i_mutex so the error handling that has to drop the bit
2500 * again is not run concurrently.
2502 spin_lock(&dest->root_item_lock);
2503 root_flags = btrfs_root_flags(&dest->root_item);
2504 if (dest->send_in_progress == 0) {
2505 btrfs_set_root_flags(&dest->root_item,
2506 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
2507 spin_unlock(&dest->root_item_lock);
2509 spin_unlock(&dest->root_item_lock);
2510 btrfs_warn(root->fs_info,
2511 "Attempt to delete subvolume %llu during send",
2512 dest->root_key.objectid);
2514 goto out_unlock_inode;
2517 down_write(&root->fs_info->subvol_sem);
2519 err = may_destroy_subvol(dest);
2523 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
2525 * One for dir inode, two for dir entries, two for root
2528 err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
2529 5, &qgroup_reserved, true);
2533 trans = btrfs_start_transaction(root, 0);
2534 if (IS_ERR(trans)) {
2535 err = PTR_ERR(trans);
2538 trans->block_rsv = &block_rsv;
2539 trans->bytes_reserved = block_rsv.size;
2541 btrfs_record_snapshot_destroy(trans, dir);
2543 ret = btrfs_unlink_subvol(trans, root, dir,
2544 dest->root_key.objectid,
2545 dentry->d_name.name,
2546 dentry->d_name.len);
2549 btrfs_abort_transaction(trans, ret);
2553 btrfs_record_root_in_trans(trans, dest);
2555 memset(&dest->root_item.drop_progress, 0,
2556 sizeof(dest->root_item.drop_progress));
2557 dest->root_item.drop_level = 0;
2558 btrfs_set_root_refs(&dest->root_item, 0);
2560 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
2561 ret = btrfs_insert_orphan_item(trans,
2562 root->fs_info->tree_root,
2563 dest->root_key.objectid);
2565 btrfs_abort_transaction(trans, ret);
2571 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2572 dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
2573 dest->root_key.objectid);
2574 if (ret && ret != -ENOENT) {
2575 btrfs_abort_transaction(trans, ret);
2579 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2580 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2581 dest->root_item.received_uuid,
2582 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
2583 dest->root_key.objectid);
2584 if (ret && ret != -ENOENT) {
2585 btrfs_abort_transaction(trans, ret);
2592 trans->block_rsv = NULL;
2593 trans->bytes_reserved = 0;
2594 ret = btrfs_end_transaction(trans, root);
2597 inode->i_flags |= S_DEAD;
2599 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
2601 up_write(&root->fs_info->subvol_sem);
2603 spin_lock(&dest->root_item_lock);
2604 root_flags = btrfs_root_flags(&dest->root_item);
2605 btrfs_set_root_flags(&dest->root_item,
2606 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2607 spin_unlock(&dest->root_item_lock);
2610 inode_unlock(inode);
2612 d_invalidate(dentry);
2613 btrfs_invalidate_inodes(dest);
2615 ASSERT(dest->send_in_progress == 0);
2618 if (dest->ino_cache_inode) {
2619 iput(dest->ino_cache_inode);
2620 dest->ino_cache_inode = NULL;
2628 mnt_drop_write_file(file);
2634 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2636 struct inode *inode = file_inode(file);
2637 struct btrfs_root *root = BTRFS_I(inode)->root;
2638 struct btrfs_ioctl_defrag_range_args *range;
2641 ret = mnt_want_write_file(file);
2645 if (btrfs_root_readonly(root)) {
2650 switch (inode->i_mode & S_IFMT) {
2652 if (!capable(CAP_SYS_ADMIN)) {
2656 ret = btrfs_defrag_root(root);
2659 ret = btrfs_defrag_root(root->fs_info->extent_root);
2662 if (!(file->f_mode & FMODE_WRITE)) {
2667 range = kzalloc(sizeof(*range), GFP_KERNEL);
2674 if (copy_from_user(range, argp,
2680 /* compression requires us to start the IO */
2681 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2682 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2683 range->extent_thresh = (u32)-1;
2686 /* the rest are all set to zero by kzalloc */
2687 range->len = (u64)-1;
2689 ret = btrfs_defrag_file(file_inode(file), file,
2699 mnt_drop_write_file(file);
2703 static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
2705 struct btrfs_ioctl_vol_args *vol_args;
2708 if (!capable(CAP_SYS_ADMIN))
2711 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2713 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2716 mutex_lock(&root->fs_info->volume_mutex);
2717 vol_args = memdup_user(arg, sizeof(*vol_args));
2718 if (IS_ERR(vol_args)) {
2719 ret = PTR_ERR(vol_args);
2723 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2724 ret = btrfs_init_new_device(root, vol_args->name);
2727 btrfs_info(root->fs_info, "disk added %s",vol_args->name);
2731 mutex_unlock(&root->fs_info->volume_mutex);
2732 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2736 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
2738 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
2739 struct btrfs_ioctl_vol_args_v2 *vol_args;
2742 if (!capable(CAP_SYS_ADMIN))
2745 ret = mnt_want_write_file(file);
2749 vol_args = memdup_user(arg, sizeof(*vol_args));
2750 if (IS_ERR(vol_args)) {
2751 ret = PTR_ERR(vol_args);
2755 /* Check for compatibility reject unknown flags */
2756 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
2761 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2763 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2767 mutex_lock(&root->fs_info->volume_mutex);
2768 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
2769 ret = btrfs_rm_device(root, NULL, vol_args->devid);
2771 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2772 ret = btrfs_rm_device(root, vol_args->name, 0);
2774 mutex_unlock(&root->fs_info->volume_mutex);
2775 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2778 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
2779 btrfs_info(root->fs_info, "device deleted: id %llu",
2782 btrfs_info(root->fs_info, "device deleted: %s",
2788 mnt_drop_write_file(file);
2792 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2794 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
2795 struct btrfs_ioctl_vol_args *vol_args;
2798 if (!capable(CAP_SYS_ADMIN))
2801 ret = mnt_want_write_file(file);
2805 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2807 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2808 goto out_drop_write;
2811 vol_args = memdup_user(arg, sizeof(*vol_args));
2812 if (IS_ERR(vol_args)) {
2813 ret = PTR_ERR(vol_args);
2817 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2818 mutex_lock(&root->fs_info->volume_mutex);
2819 ret = btrfs_rm_device(root, vol_args->name, 0);
2820 mutex_unlock(&root->fs_info->volume_mutex);
2823 btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
2826 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2828 mnt_drop_write_file(file);
2833 static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2835 struct btrfs_ioctl_fs_info_args *fi_args;
2836 struct btrfs_device *device;
2837 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2840 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2844 mutex_lock(&fs_devices->device_list_mutex);
2845 fi_args->num_devices = fs_devices->num_devices;
2846 memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
2848 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2849 if (device->devid > fi_args->max_id)
2850 fi_args->max_id = device->devid;
2852 mutex_unlock(&fs_devices->device_list_mutex);
2854 fi_args->nodesize = root->fs_info->super_copy->nodesize;
2855 fi_args->sectorsize = root->fs_info->super_copy->sectorsize;
2856 fi_args->clone_alignment = root->fs_info->super_copy->sectorsize;
2858 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2865 static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2867 struct btrfs_ioctl_dev_info_args *di_args;
2868 struct btrfs_device *dev;
2869 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2871 char *s_uuid = NULL;
2873 di_args = memdup_user(arg, sizeof(*di_args));
2874 if (IS_ERR(di_args))
2875 return PTR_ERR(di_args);
2877 if (!btrfs_is_empty_uuid(di_args->uuid))
2878 s_uuid = di_args->uuid;
2880 mutex_lock(&fs_devices->device_list_mutex);
2881 dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
2888 di_args->devid = dev->devid;
2889 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
2890 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
2891 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2893 struct rcu_string *name;
2896 name = rcu_dereference(dev->name);
2897 strncpy(di_args->path, name->str, sizeof(di_args->path));
2899 di_args->path[sizeof(di_args->path) - 1] = 0;
2901 di_args->path[0] = '\0';
2905 mutex_unlock(&fs_devices->device_list_mutex);
2906 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2913 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2917 page = grab_cache_page(inode->i_mapping, index);
2919 return ERR_PTR(-ENOMEM);
2921 if (!PageUptodate(page)) {
2924 ret = btrfs_readpage(NULL, page);
2926 return ERR_PTR(ret);
2928 if (!PageUptodate(page)) {
2931 return ERR_PTR(-EIO);
2933 if (page->mapping != inode->i_mapping) {
2936 return ERR_PTR(-EAGAIN);
2943 static int gather_extent_pages(struct inode *inode, struct page **pages,
2944 int num_pages, u64 off)
2947 pgoff_t index = off >> PAGE_SHIFT;
2949 for (i = 0; i < num_pages; i++) {
2951 pages[i] = extent_same_get_page(inode, index + i);
2952 if (IS_ERR(pages[i])) {
2953 int err = PTR_ERR(pages[i]);
2964 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2965 bool retry_range_locking)
2968 * Do any pending delalloc/csum calculations on inode, one way or
2969 * another, and lock file content.
2970 * The locking order is:
2973 * 2) range in the inode's io tree
2976 struct btrfs_ordered_extent *ordered;
2977 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2978 ordered = btrfs_lookup_first_ordered_extent(inode,
2981 ordered->file_offset + ordered->len <= off ||
2982 ordered->file_offset >= off + len) &&
2983 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
2984 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
2986 btrfs_put_ordered_extent(ordered);
2989 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2991 btrfs_put_ordered_extent(ordered);
2992 if (!retry_range_locking)
2994 btrfs_wait_ordered_range(inode, off, len);
2999 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
3001 inode_unlock(inode1);
3002 inode_unlock(inode2);
3005 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3007 if (inode1 < inode2)
3008 swap(inode1, inode2);
3010 inode_lock_nested(inode1, I_MUTEX_PARENT);
3011 inode_lock_nested(inode2, I_MUTEX_CHILD);
3014 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3015 struct inode *inode2, u64 loff2, u64 len)
3017 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3018 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3021 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3022 struct inode *inode2, u64 loff2, u64 len,
3023 bool retry_range_locking)
3027 if (inode1 < inode2) {
3028 swap(inode1, inode2);
3031 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
3034 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
3036 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
3043 struct page **src_pages;
3044 struct page **dst_pages;
3047 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
3052 for (i = 0; i < cmp->num_pages; i++) {
3053 pg = cmp->src_pages[i];
3058 pg = cmp->dst_pages[i];
3064 kfree(cmp->src_pages);
3065 kfree(cmp->dst_pages);
3068 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3069 struct inode *dst, u64 dst_loff,
3070 u64 len, struct cmp_pages *cmp)
3073 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3074 struct page **src_pgarr, **dst_pgarr;
3077 * We must gather up all the pages before we initiate our
3078 * extent locking. We use an array for the page pointers. Size
3079 * of the array is bounded by len, which is in turn bounded by
3080 * BTRFS_MAX_DEDUPE_LEN.
3082 src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3083 dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3084 if (!src_pgarr || !dst_pgarr) {
3089 cmp->num_pages = num_pages;
3090 cmp->src_pages = src_pgarr;
3091 cmp->dst_pages = dst_pgarr;
3093 ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
3097 ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
3101 btrfs_cmp_data_free(cmp);
3105 static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
3106 u64 dst_loff, u64 len, struct cmp_pages *cmp)
3110 struct page *src_page, *dst_page;
3111 unsigned int cmp_len = PAGE_SIZE;
3112 void *addr, *dst_addr;
3116 if (len < PAGE_SIZE)
3119 BUG_ON(i >= cmp->num_pages);
3121 src_page = cmp->src_pages[i];
3122 dst_page = cmp->dst_pages[i];
3123 ASSERT(PageLocked(src_page));
3124 ASSERT(PageLocked(dst_page));
3126 addr = kmap_atomic(src_page);
3127 dst_addr = kmap_atomic(dst_page);
3129 flush_dcache_page(src_page);
3130 flush_dcache_page(dst_page);
3132 if (memcmp(addr, dst_addr, cmp_len))
3135 kunmap_atomic(addr);
3136 kunmap_atomic(dst_addr);
3148 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3152 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3154 if (off + olen > inode->i_size || off + olen < off)
3157 /* if we extend to eof, continue to block boundary */
3158 if (off + len == inode->i_size)
3159 *plen = len = ALIGN(inode->i_size, bs) - off;
3161 /* Check that we are block aligned - btrfs_clone() requires this */
3162 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3168 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3169 struct inode *dst, u64 dst_loff)
3173 struct cmp_pages cmp;
3175 u64 same_lock_start = 0;
3176 u64 same_lock_len = 0;
3187 ret = extent_same_check_offsets(src, loff, &len, olen);
3190 ret = extent_same_check_offsets(src, dst_loff, &len, olen);
3195 * Single inode case wants the same checks, except we
3196 * don't want our length pushed out past i_size as
3197 * comparing that data range makes no sense.
3199 * extent_same_check_offsets() will do this for an
3200 * unaligned length at i_size, so catch it here and
3201 * reject the request.
3203 * This effectively means we require aligned extents
3204 * for the single-inode case, whereas the other cases
3205 * allow an unaligned length so long as it ends at
3213 /* Check for overlapping ranges */
3214 if (dst_loff + len > loff && dst_loff < loff + len) {
3219 same_lock_start = min_t(u64, loff, dst_loff);
3220 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3222 btrfs_double_inode_lock(src, dst);
3224 ret = extent_same_check_offsets(src, loff, &len, olen);
3228 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3233 /* don't make the dst file partly checksummed */
3234 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3235 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3241 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3246 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3249 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3252 * If one of the inodes has dirty pages in the respective range or
3253 * ordered extents, we need to flush dellaloc and wait for all ordered
3254 * extents in the range. We must unlock the pages and the ranges in the
3255 * io trees to avoid deadlocks when flushing delalloc (requires locking
3256 * pages) and when waiting for ordered extents to complete (they require
3259 if (ret == -EAGAIN) {
3261 * Ranges in the io trees already unlocked. Now unlock all
3262 * pages before waiting for all IO to complete.
3264 btrfs_cmp_data_free(&cmp);
3266 btrfs_wait_ordered_range(src, same_lock_start,
3269 btrfs_wait_ordered_range(src, loff, len);
3270 btrfs_wait_ordered_range(dst, dst_loff, len);
3276 /* ranges in the io trees already unlocked */
3277 btrfs_cmp_data_free(&cmp);
3281 /* pass original length for comparison so we stay within i_size */
3282 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
3284 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3287 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3288 same_lock_start + same_lock_len - 1);
3290 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3292 btrfs_cmp_data_free(&cmp);
3297 btrfs_double_inode_unlock(src, dst);
3302 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3304 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3305 struct file *dst_file, u64 dst_loff)
3307 struct inode *src = file_inode(src_file);
3308 struct inode *dst = file_inode(dst_file);
3309 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3312 if (olen > BTRFS_MAX_DEDUPE_LEN)
3313 olen = BTRFS_MAX_DEDUPE_LEN;
3315 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3317 * Btrfs does not support blocksize < page_size. As a
3318 * result, btrfs_cmp_data() won't correctly handle
3319 * this situation without an update.
3324 res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
3330 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3331 struct inode *inode,
3337 struct btrfs_root *root = BTRFS_I(inode)->root;
3340 inode_inc_iversion(inode);
3341 if (!no_time_update)
3342 inode->i_mtime = inode->i_ctime = current_time(inode);
3344 * We round up to the block size at eof when determining which
3345 * extents to clone above, but shouldn't round up the file size.
3347 if (endoff > destoff + olen)
3348 endoff = destoff + olen;
3349 if (endoff > inode->i_size)
3350 btrfs_i_size_write(inode, endoff);
3352 ret = btrfs_update_inode(trans, root, inode);
3354 btrfs_abort_transaction(trans, ret);
3355 btrfs_end_transaction(trans, root);
3358 ret = btrfs_end_transaction(trans, root);
3363 static void clone_update_extent_map(struct inode *inode,
3364 const struct btrfs_trans_handle *trans,
3365 const struct btrfs_path *path,
3366 const u64 hole_offset,
3369 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3370 struct extent_map *em;
3373 em = alloc_extent_map();
3375 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3376 &BTRFS_I(inode)->runtime_flags);
3381 struct btrfs_file_extent_item *fi;
3383 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3384 struct btrfs_file_extent_item);
3385 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3386 em->generation = -1;
3387 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3388 BTRFS_FILE_EXTENT_INLINE)
3389 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3390 &BTRFS_I(inode)->runtime_flags);
3392 em->start = hole_offset;
3394 em->ram_bytes = em->len;
3395 em->orig_start = hole_offset;
3396 em->block_start = EXTENT_MAP_HOLE;
3398 em->orig_block_len = 0;
3399 em->compress_type = BTRFS_COMPRESS_NONE;
3400 em->generation = trans->transid;
3404 write_lock(&em_tree->lock);
3405 ret = add_extent_mapping(em_tree, em, 1);
3406 write_unlock(&em_tree->lock);
3407 if (ret != -EEXIST) {
3408 free_extent_map(em);
3411 btrfs_drop_extent_cache(inode, em->start,
3412 em->start + em->len - 1, 0);
3416 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3417 &BTRFS_I(inode)->runtime_flags);
3421 * Make sure we do not end up inserting an inline extent into a file that has
3422 * already other (non-inline) extents. If a file has an inline extent it can
3423 * not have any other extents and the (single) inline extent must start at the
3424 * file offset 0. Failing to respect these rules will lead to file corruption,
3425 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3427 * We can have extents that have been already written to disk or we can have
3428 * dirty ranges still in delalloc, in which case the extent maps and items are
3429 * created only when we run delalloc, and the delalloc ranges might fall outside
3430 * the range we are currently locking in the inode's io tree. So we check the
3431 * inode's i_size because of that (i_size updates are done while holding the
3432 * i_mutex, which we are holding here).
3433 * We also check to see if the inode has a size not greater than "datal" but has
3434 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3435 * protected against such concurrent fallocate calls by the i_mutex).
3437 * If the file has no extents but a size greater than datal, do not allow the
3438 * copy because we would need turn the inline extent into a non-inline one (even
3439 * with NO_HOLES enabled). If we find our destination inode only has one inline
3440 * extent, just overwrite it with the source inline extent if its size is less
3441 * than the source extent's size, or we could copy the source inline extent's
3442 * data into the destination inode's inline extent if the later is greater then
3445 static int clone_copy_inline_extent(struct inode *src,
3447 struct btrfs_trans_handle *trans,
3448 struct btrfs_path *path,
3449 struct btrfs_key *new_key,
3450 const u64 drop_start,
3456 struct btrfs_root *root = BTRFS_I(dst)->root;
3457 const u64 aligned_end = ALIGN(new_key->offset + datal,
3460 struct btrfs_key key;
3462 if (new_key->offset > 0)
3465 key.objectid = btrfs_ino(dst);
3466 key.type = BTRFS_EXTENT_DATA_KEY;
3468 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3471 } else if (ret > 0) {
3472 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3473 ret = btrfs_next_leaf(root, path);
3477 goto copy_inline_extent;
3479 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3480 if (key.objectid == btrfs_ino(dst) &&
3481 key.type == BTRFS_EXTENT_DATA_KEY) {
3482 ASSERT(key.offset > 0);
3485 } else if (i_size_read(dst) <= datal) {
3486 struct btrfs_file_extent_item *ei;
3490 * If the file size is <= datal, make sure there are no other
3491 * extents following (can happen do to an fallocate call with
3492 * the flag FALLOC_FL_KEEP_SIZE).
3494 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3495 struct btrfs_file_extent_item);
3497 * If it's an inline extent, it can not have other extents
3500 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3501 BTRFS_FILE_EXTENT_INLINE)
3502 goto copy_inline_extent;
3504 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3505 if (ext_len > aligned_end)
3508 ret = btrfs_next_item(root, path);
3511 } else if (ret == 0) {
3512 btrfs_item_key_to_cpu(path->nodes[0], &key,
3514 if (key.objectid == btrfs_ino(dst) &&
3515 key.type == BTRFS_EXTENT_DATA_KEY)
3522 * We have no extent items, or we have an extent at offset 0 which may
3523 * or may not be inlined. All these cases are dealt the same way.
3525 if (i_size_read(dst) > datal) {
3527 * If the destination inode has an inline extent...
3528 * This would require copying the data from the source inline
3529 * extent into the beginning of the destination's inline extent.
3530 * But this is really complex, both extents can be compressed
3531 * or just one of them, which would require decompressing and
3532 * re-compressing data (which could increase the new compressed
3533 * size, not allowing the compressed data to fit anymore in an
3535 * So just don't support this case for now (it should be rare,
3536 * we are not really saving space when cloning inline extents).
3541 btrfs_release_path(path);
3542 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3545 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3550 const u32 start = btrfs_file_extent_calc_inline_size(0);
3552 memmove(inline_data + start, inline_data + start + skip, datal);
3555 write_extent_buffer(path->nodes[0], inline_data,
3556 btrfs_item_ptr_offset(path->nodes[0],
3559 inode_add_bytes(dst, datal);
3565 * btrfs_clone() - clone a range from inode file to another
3567 * @src: Inode to clone from
3568 * @inode: Inode to clone to
3569 * @off: Offset within source to start clone from
3570 * @olen: Original length, passed by user, of range to clone
3571 * @olen_aligned: Block-aligned value of olen
3572 * @destoff: Offset within @inode to start clone
3573 * @no_time_update: Whether to update mtime/ctime on the target inode
3575 static int btrfs_clone(struct inode *src, struct inode *inode,
3576 const u64 off, const u64 olen, const u64 olen_aligned,
3577 const u64 destoff, int no_time_update)
3579 struct btrfs_root *root = BTRFS_I(inode)->root;
3580 struct btrfs_path *path = NULL;
3581 struct extent_buffer *leaf;
3582 struct btrfs_trans_handle *trans;
3584 struct btrfs_key key;
3588 const u64 len = olen_aligned;
3589 u64 last_dest_end = destoff;
3592 buf = kmalloc(root->nodesize, GFP_KERNEL | __GFP_NOWARN);
3594 buf = vmalloc(root->nodesize);
3599 path = btrfs_alloc_path();
3605 path->reada = READA_FORWARD;
3607 key.objectid = btrfs_ino(src);
3608 key.type = BTRFS_EXTENT_DATA_KEY;
3612 u64 next_key_min_offset = key.offset + 1;
3615 * note the key will change type as we walk through the
3618 path->leave_spinning = 1;
3619 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3624 * First search, if no extent item that starts at offset off was
3625 * found but the previous item is an extent item, it's possible
3626 * it might overlap our target range, therefore process it.
3628 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3629 btrfs_item_key_to_cpu(path->nodes[0], &key,
3630 path->slots[0] - 1);
3631 if (key.type == BTRFS_EXTENT_DATA_KEY)
3635 nritems = btrfs_header_nritems(path->nodes[0]);
3637 if (path->slots[0] >= nritems) {
3638 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3643 nritems = btrfs_header_nritems(path->nodes[0]);
3645 leaf = path->nodes[0];
3646 slot = path->slots[0];
3648 btrfs_item_key_to_cpu(leaf, &key, slot);
3649 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3650 key.objectid != btrfs_ino(src))
3653 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3654 struct btrfs_file_extent_item *extent;
3657 struct btrfs_key new_key;
3658 u64 disko = 0, diskl = 0;
3659 u64 datao = 0, datal = 0;
3663 extent = btrfs_item_ptr(leaf, slot,
3664 struct btrfs_file_extent_item);
3665 comp = btrfs_file_extent_compression(leaf, extent);
3666 type = btrfs_file_extent_type(leaf, extent);
3667 if (type == BTRFS_FILE_EXTENT_REG ||
3668 type == BTRFS_FILE_EXTENT_PREALLOC) {
3669 disko = btrfs_file_extent_disk_bytenr(leaf,
3671 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3673 datao = btrfs_file_extent_offset(leaf, extent);
3674 datal = btrfs_file_extent_num_bytes(leaf,
3676 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3677 /* take upper bound, may be compressed */
3678 datal = btrfs_file_extent_ram_bytes(leaf,
3683 * The first search might have left us at an extent
3684 * item that ends before our target range's start, can
3685 * happen if we have holes and NO_HOLES feature enabled.
3687 if (key.offset + datal <= off) {
3690 } else if (key.offset >= off + len) {
3693 next_key_min_offset = key.offset + datal;
3694 size = btrfs_item_size_nr(leaf, slot);
3695 read_extent_buffer(leaf, buf,
3696 btrfs_item_ptr_offset(leaf, slot),
3699 btrfs_release_path(path);
3700 path->leave_spinning = 0;
3702 memcpy(&new_key, &key, sizeof(new_key));
3703 new_key.objectid = btrfs_ino(inode);
3704 if (off <= key.offset)
3705 new_key.offset = key.offset + destoff - off;
3707 new_key.offset = destoff;
3710 * Deal with a hole that doesn't have an extent item
3711 * that represents it (NO_HOLES feature enabled).
3712 * This hole is either in the middle of the cloning
3713 * range or at the beginning (fully overlaps it or
3714 * partially overlaps it).
3716 if (new_key.offset != last_dest_end)
3717 drop_start = last_dest_end;
3719 drop_start = new_key.offset;
3722 * 1 - adjusting old extent (we may have to split it)
3723 * 1 - add new extent
3726 trans = btrfs_start_transaction(root, 3);
3727 if (IS_ERR(trans)) {
3728 ret = PTR_ERR(trans);
3732 if (type == BTRFS_FILE_EXTENT_REG ||
3733 type == BTRFS_FILE_EXTENT_PREALLOC) {
3735 * a | --- range to clone ---| b
3736 * | ------------- extent ------------- |
3739 /* subtract range b */
3740 if (key.offset + datal > off + len)
3741 datal = off + len - key.offset;
3743 /* subtract range a */
3744 if (off > key.offset) {
3745 datao += off - key.offset;
3746 datal -= off - key.offset;
3749 ret = btrfs_drop_extents(trans, root, inode,
3751 new_key.offset + datal,
3754 if (ret != -EOPNOTSUPP)
3755 btrfs_abort_transaction(trans,
3757 btrfs_end_transaction(trans, root);
3761 ret = btrfs_insert_empty_item(trans, root, path,
3764 btrfs_abort_transaction(trans, ret);
3765 btrfs_end_transaction(trans, root);
3769 leaf = path->nodes[0];
3770 slot = path->slots[0];
3771 write_extent_buffer(leaf, buf,
3772 btrfs_item_ptr_offset(leaf, slot),
3775 extent = btrfs_item_ptr(leaf, slot,
3776 struct btrfs_file_extent_item);
3778 /* disko == 0 means it's a hole */
3782 btrfs_set_file_extent_offset(leaf, extent,
3784 btrfs_set_file_extent_num_bytes(leaf, extent,
3788 inode_add_bytes(inode, datal);
3789 ret = btrfs_inc_extent_ref(trans, root,
3791 root->root_key.objectid,
3793 new_key.offset - datao);
3795 btrfs_abort_transaction(trans,
3797 btrfs_end_transaction(trans,
3803 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3807 if (off > key.offset) {
3808 skip = off - key.offset;
3809 new_key.offset += skip;
3812 if (key.offset + datal > off + len)
3813 trim = key.offset + datal - (off + len);
3815 if (comp && (skip || trim)) {
3817 btrfs_end_transaction(trans, root);
3820 size -= skip + trim;
3821 datal -= skip + trim;
3823 ret = clone_copy_inline_extent(src, inode,
3830 if (ret != -EOPNOTSUPP)
3831 btrfs_abort_transaction(trans,
3833 btrfs_end_transaction(trans, root);
3836 leaf = path->nodes[0];
3837 slot = path->slots[0];
3840 /* If we have an implicit hole (NO_HOLES feature). */
3841 if (drop_start < new_key.offset)
3842 clone_update_extent_map(inode, trans,
3844 new_key.offset - drop_start);
3846 clone_update_extent_map(inode, trans, path, 0, 0);
3848 btrfs_mark_buffer_dirty(leaf);
3849 btrfs_release_path(path);
3851 last_dest_end = ALIGN(new_key.offset + datal,
3853 ret = clone_finish_inode_update(trans, inode,
3859 if (new_key.offset + datal >= destoff + len)
3862 btrfs_release_path(path);
3863 key.offset = next_key_min_offset;
3865 if (fatal_signal_pending(current)) {
3874 if (last_dest_end < destoff + len) {
3876 * We have an implicit hole (NO_HOLES feature is enabled) that
3877 * fully or partially overlaps our cloning range at its end.
3879 btrfs_release_path(path);
3882 * 1 - remove extent(s)
3885 trans = btrfs_start_transaction(root, 2);
3886 if (IS_ERR(trans)) {
3887 ret = PTR_ERR(trans);
3890 ret = btrfs_drop_extents(trans, root, inode,
3891 last_dest_end, destoff + len, 1);
3893 if (ret != -EOPNOTSUPP)
3894 btrfs_abort_transaction(trans, ret);
3895 btrfs_end_transaction(trans, root);
3898 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3899 destoff + len - last_dest_end);
3900 ret = clone_finish_inode_update(trans, inode, destoff + len,
3901 destoff, olen, no_time_update);
3905 btrfs_free_path(path);
3910 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3911 u64 off, u64 olen, u64 destoff)
3913 struct inode *inode = file_inode(file);
3914 struct inode *src = file_inode(file_src);
3915 struct btrfs_root *root = BTRFS_I(inode)->root;
3918 u64 bs = root->fs_info->sb->s_blocksize;
3919 int same_inode = src == inode;
3923 * - split compressed inline extents. annoying: we need to
3924 * decompress into destination's address_space (the file offset
3925 * may change, so source mapping won't do), then recompress (or
3926 * otherwise reinsert) a subrange.
3928 * - split destination inode's inline extents. The inline extents can
3929 * be either compressed or non-compressed.
3932 if (btrfs_root_readonly(root))
3935 if (file_src->f_path.mnt != file->f_path.mnt ||
3936 src->i_sb != inode->i_sb)
3939 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3943 btrfs_double_inode_lock(src, inode);
3948 /* don't make the dst file partly checksummed */
3949 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3950 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
3955 /* determine range to clone */
3957 if (off + len > src->i_size || off + len < off)
3960 olen = len = src->i_size - off;
3962 * If we extend to eof, continue to block boundary if and only if the
3963 * destination end offset matches the destination file's size, otherwise
3964 * we would be corrupting data by placing the eof block into the middle
3967 if (off + len == src->i_size) {
3968 if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
3970 len = ALIGN(src->i_size, bs) - off;
3978 /* verify the end result is block aligned */
3979 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3980 !IS_ALIGNED(destoff, bs))
3983 /* verify if ranges are overlapped within the same file */
3985 if (destoff + len > off && destoff < off + len)
3989 if (destoff > inode->i_size) {
3990 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3996 * Lock the target range too. Right after we replace the file extent
3997 * items in the fs tree (which now point to the cloned data), we might
3998 * have a worker replace them with extent items relative to a write
3999 * operation that was issued before this clone operation (i.e. confront
4000 * with inode.c:btrfs_finish_ordered_io).
4003 u64 lock_start = min_t(u64, off, destoff);
4004 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
4006 ret = lock_extent_range(src, lock_start, lock_len, true);
4008 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
4013 /* ranges in the io trees already unlocked */
4017 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
4020 u64 lock_start = min_t(u64, off, destoff);
4021 u64 lock_end = max_t(u64, off, destoff) + len - 1;
4023 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
4025 btrfs_double_extent_unlock(src, off, inode, destoff, len);
4028 * Truncate page cache pages so that future reads will see the cloned
4029 * data immediately and not the previous data.
4031 truncate_inode_pages_range(&inode->i_data,
4032 round_down(destoff, PAGE_SIZE),
4033 round_up(destoff + len, PAGE_SIZE) - 1);
4036 btrfs_double_inode_unlock(src, inode);
4042 ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
4043 struct file *file_out, loff_t pos_out,
4044 size_t len, unsigned int flags)
4048 ret = btrfs_clone_files(file_out, file_in, pos_in, len, pos_out);
4054 int btrfs_clone_file_range(struct file *src_file, loff_t off,
4055 struct file *dst_file, loff_t destoff, u64 len)
4057 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
4061 * there are many ways the trans_start and trans_end ioctls can lead
4062 * to deadlocks. They should only be used by applications that
4063 * basically own the machine, and have a very in depth understanding
4064 * of all the possible deadlocks and enospc problems.
4066 static long btrfs_ioctl_trans_start(struct file *file)
4068 struct inode *inode = file_inode(file);
4069 struct btrfs_root *root = BTRFS_I(inode)->root;
4070 struct btrfs_trans_handle *trans;
4074 if (!capable(CAP_SYS_ADMIN))
4078 if (file->private_data)
4082 if (btrfs_root_readonly(root))
4085 ret = mnt_want_write_file(file);
4089 atomic_inc(&root->fs_info->open_ioctl_trans);
4092 trans = btrfs_start_ioctl_transaction(root);
4096 file->private_data = trans;
4100 atomic_dec(&root->fs_info->open_ioctl_trans);
4101 mnt_drop_write_file(file);
4106 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4108 struct inode *inode = file_inode(file);
4109 struct btrfs_root *root = BTRFS_I(inode)->root;
4110 struct btrfs_root *new_root;
4111 struct btrfs_dir_item *di;
4112 struct btrfs_trans_handle *trans;
4113 struct btrfs_path *path;
4114 struct btrfs_key location;
4115 struct btrfs_disk_key disk_key;
4120 if (!capable(CAP_SYS_ADMIN))
4123 ret = mnt_want_write_file(file);
4127 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4133 objectid = BTRFS_FS_TREE_OBJECTID;
4135 location.objectid = objectid;
4136 location.type = BTRFS_ROOT_ITEM_KEY;
4137 location.offset = (u64)-1;
4139 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
4140 if (IS_ERR(new_root)) {
4141 ret = PTR_ERR(new_root);
4144 if (!is_fstree(new_root->objectid)) {
4149 path = btrfs_alloc_path();
4154 path->leave_spinning = 1;
4156 trans = btrfs_start_transaction(root, 1);
4157 if (IS_ERR(trans)) {
4158 btrfs_free_path(path);
4159 ret = PTR_ERR(trans);
4163 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
4164 di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
4165 dir_id, "default", 7, 1);
4166 if (IS_ERR_OR_NULL(di)) {
4167 btrfs_free_path(path);
4168 btrfs_end_transaction(trans, root);
4169 btrfs_err(new_root->fs_info,
4170 "Umm, you don't have the default diritem, this isn't going to work");
4175 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4176 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4177 btrfs_mark_buffer_dirty(path->nodes[0]);
4178 btrfs_free_path(path);
4180 btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
4181 btrfs_end_transaction(trans, root);
4183 mnt_drop_write_file(file);
4187 void btrfs_get_block_group_info(struct list_head *groups_list,
4188 struct btrfs_ioctl_space_info *space)
4190 struct btrfs_block_group_cache *block_group;
4192 space->total_bytes = 0;
4193 space->used_bytes = 0;
4195 list_for_each_entry(block_group, groups_list, list) {
4196 space->flags = block_group->flags;
4197 space->total_bytes += block_group->key.offset;
4198 space->used_bytes +=
4199 btrfs_block_group_used(&block_group->item);
4203 static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
4205 struct btrfs_ioctl_space_args space_args;
4206 struct btrfs_ioctl_space_info space;
4207 struct btrfs_ioctl_space_info *dest;
4208 struct btrfs_ioctl_space_info *dest_orig;
4209 struct btrfs_ioctl_space_info __user *user_dest;
4210 struct btrfs_space_info *info;
4211 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
4212 BTRFS_BLOCK_GROUP_SYSTEM,
4213 BTRFS_BLOCK_GROUP_METADATA,
4214 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
4221 if (copy_from_user(&space_args,
4222 (struct btrfs_ioctl_space_args __user *)arg,
4223 sizeof(space_args)))
4226 for (i = 0; i < num_types; i++) {
4227 struct btrfs_space_info *tmp;
4231 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4233 if (tmp->flags == types[i]) {
4243 down_read(&info->groups_sem);
4244 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4245 if (!list_empty(&info->block_groups[c]))
4248 up_read(&info->groups_sem);
4252 * Global block reserve, exported as a space_info
4256 /* space_slots == 0 means they are asking for a count */
4257 if (space_args.space_slots == 0) {
4258 space_args.total_spaces = slot_count;
4262 slot_count = min_t(u64, space_args.space_slots, slot_count);
4264 alloc_size = sizeof(*dest) * slot_count;
4266 /* we generally have at most 6 or so space infos, one for each raid
4267 * level. So, a whole page should be more than enough for everyone
4269 if (alloc_size > PAGE_SIZE)
4272 space_args.total_spaces = 0;
4273 dest = kmalloc(alloc_size, GFP_KERNEL);
4278 /* now we have a buffer to copy into */
4279 for (i = 0; i < num_types; i++) {
4280 struct btrfs_space_info *tmp;
4287 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4289 if (tmp->flags == types[i]) {
4298 down_read(&info->groups_sem);
4299 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4300 if (!list_empty(&info->block_groups[c])) {
4301 btrfs_get_block_group_info(
4302 &info->block_groups[c], &space);
4303 memcpy(dest, &space, sizeof(space));
4305 space_args.total_spaces++;
4311 up_read(&info->groups_sem);
4315 * Add global block reserve
4318 struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
4320 spin_lock(&block_rsv->lock);
4321 space.total_bytes = block_rsv->size;
4322 space.used_bytes = block_rsv->size - block_rsv->reserved;
4323 spin_unlock(&block_rsv->lock);
4324 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4325 memcpy(dest, &space, sizeof(space));
4326 space_args.total_spaces++;
4329 user_dest = (struct btrfs_ioctl_space_info __user *)
4330 (arg + sizeof(struct btrfs_ioctl_space_args));
4332 if (copy_to_user(user_dest, dest_orig, alloc_size))
4337 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4344 * there are many ways the trans_start and trans_end ioctls can lead
4345 * to deadlocks. They should only be used by applications that
4346 * basically own the machine, and have a very in depth understanding
4347 * of all the possible deadlocks and enospc problems.
4349 long btrfs_ioctl_trans_end(struct file *file)
4351 struct inode *inode = file_inode(file);
4352 struct btrfs_root *root = BTRFS_I(inode)->root;
4353 struct btrfs_trans_handle *trans;
4355 trans = file->private_data;
4358 file->private_data = NULL;
4360 btrfs_end_transaction(trans, root);
4362 atomic_dec(&root->fs_info->open_ioctl_trans);
4364 mnt_drop_write_file(file);
4368 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4371 struct btrfs_trans_handle *trans;
4375 trans = btrfs_attach_transaction_barrier(root);
4376 if (IS_ERR(trans)) {
4377 if (PTR_ERR(trans) != -ENOENT)
4378 return PTR_ERR(trans);
4380 /* No running transaction, don't bother */
4381 transid = root->fs_info->last_trans_committed;
4384 transid = trans->transid;
4385 ret = btrfs_commit_transaction_async(trans, root, 0);
4387 btrfs_end_transaction(trans, root);
4392 if (copy_to_user(argp, &transid, sizeof(transid)))
4397 static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
4403 if (copy_from_user(&transid, argp, sizeof(transid)))
4406 transid = 0; /* current trans */
4408 return btrfs_wait_for_commit(root, transid);
4411 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4413 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4414 struct btrfs_ioctl_scrub_args *sa;
4417 if (!capable(CAP_SYS_ADMIN))
4420 sa = memdup_user(arg, sizeof(*sa));
4424 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4425 ret = mnt_want_write_file(file);
4430 ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
4431 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4434 if (copy_to_user(arg, sa, sizeof(*sa)))
4437 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4438 mnt_drop_write_file(file);
4444 static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
4446 if (!capable(CAP_SYS_ADMIN))
4449 return btrfs_scrub_cancel(root->fs_info);
4452 static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
4455 struct btrfs_ioctl_scrub_args *sa;
4458 if (!capable(CAP_SYS_ADMIN))
4461 sa = memdup_user(arg, sizeof(*sa));
4465 ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
4467 if (copy_to_user(arg, sa, sizeof(*sa)))
4474 static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
4477 struct btrfs_ioctl_get_dev_stats *sa;
4480 sa = memdup_user(arg, sizeof(*sa));
4484 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4489 ret = btrfs_get_dev_stats(root, sa);
4491 if (copy_to_user(arg, sa, sizeof(*sa)))
4498 static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
4500 struct btrfs_ioctl_dev_replace_args *p;
4503 if (!capable(CAP_SYS_ADMIN))
4506 p = memdup_user(arg, sizeof(*p));
4511 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4512 if (root->fs_info->sb->s_flags & MS_RDONLY) {
4517 &root->fs_info->mutually_exclusive_operation_running,
4519 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4521 ret = btrfs_dev_replace_by_ioctl(root, p);
4523 &root->fs_info->mutually_exclusive_operation_running,
4527 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4528 btrfs_dev_replace_status(root->fs_info, p);
4531 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4532 ret = btrfs_dev_replace_cancel(root->fs_info, p);
4539 if (copy_to_user(arg, p, sizeof(*p)))
4546 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4552 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4553 struct inode_fs_paths *ipath = NULL;
4554 struct btrfs_path *path;
4556 if (!capable(CAP_DAC_READ_SEARCH))
4559 path = btrfs_alloc_path();
4565 ipa = memdup_user(arg, sizeof(*ipa));
4572 size = min_t(u32, ipa->size, 4096);
4573 ipath = init_ipath(size, root, path);
4574 if (IS_ERR(ipath)) {
4575 ret = PTR_ERR(ipath);
4580 ret = paths_from_inode(ipa->inum, ipath);
4584 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4585 rel_ptr = ipath->fspath->val[i] -
4586 (u64)(unsigned long)ipath->fspath->val;
4587 ipath->fspath->val[i] = rel_ptr;
4590 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
4591 (void *)(unsigned long)ipath->fspath, size);
4598 btrfs_free_path(path);
4605 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4607 struct btrfs_data_container *inodes = ctx;
4608 const size_t c = 3 * sizeof(u64);
4610 if (inodes->bytes_left >= c) {
4611 inodes->bytes_left -= c;
4612 inodes->val[inodes->elem_cnt] = inum;
4613 inodes->val[inodes->elem_cnt + 1] = offset;
4614 inodes->val[inodes->elem_cnt + 2] = root;
4615 inodes->elem_cnt += 3;
4617 inodes->bytes_missing += c - inodes->bytes_left;
4618 inodes->bytes_left = 0;
4619 inodes->elem_missed += 3;
4625 static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
4630 struct btrfs_ioctl_logical_ino_args *loi;
4631 struct btrfs_data_container *inodes = NULL;
4632 struct btrfs_path *path = NULL;
4634 if (!capable(CAP_SYS_ADMIN))
4637 loi = memdup_user(arg, sizeof(*loi));
4644 path = btrfs_alloc_path();
4650 size = min_t(u32, loi->size, SZ_64K);
4651 inodes = init_data_container(size);
4652 if (IS_ERR(inodes)) {
4653 ret = PTR_ERR(inodes);
4658 ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
4659 build_ino_list, inodes);
4665 ret = copy_to_user((void *)(unsigned long)loi->inodes,
4666 (void *)(unsigned long)inodes, size);
4671 btrfs_free_path(path);
4678 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
4679 struct btrfs_ioctl_balance_args *bargs)
4681 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4683 bargs->flags = bctl->flags;
4685 if (atomic_read(&fs_info->balance_running))
4686 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4687 if (atomic_read(&fs_info->balance_pause_req))
4688 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4689 if (atomic_read(&fs_info->balance_cancel_req))
4690 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4692 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4693 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4694 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4697 spin_lock(&fs_info->balance_lock);
4698 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4699 spin_unlock(&fs_info->balance_lock);
4701 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4705 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4707 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4708 struct btrfs_fs_info *fs_info = root->fs_info;
4709 struct btrfs_ioctl_balance_args *bargs;
4710 struct btrfs_balance_control *bctl;
4711 bool need_unlock; /* for mut. excl. ops lock */
4714 if (!capable(CAP_SYS_ADMIN))
4717 ret = mnt_want_write_file(file);
4722 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
4723 mutex_lock(&fs_info->volume_mutex);
4724 mutex_lock(&fs_info->balance_mutex);
4730 * mut. excl. ops lock is locked. Three possibilities:
4731 * (1) some other op is running
4732 * (2) balance is running
4733 * (3) balance is paused -- special case (think resume)
4735 mutex_lock(&fs_info->balance_mutex);
4736 if (fs_info->balance_ctl) {
4737 /* this is either (2) or (3) */
4738 if (!atomic_read(&fs_info->balance_running)) {
4739 mutex_unlock(&fs_info->balance_mutex);
4740 if (!mutex_trylock(&fs_info->volume_mutex))
4742 mutex_lock(&fs_info->balance_mutex);
4744 if (fs_info->balance_ctl &&
4745 !atomic_read(&fs_info->balance_running)) {
4747 need_unlock = false;
4751 mutex_unlock(&fs_info->balance_mutex);
4752 mutex_unlock(&fs_info->volume_mutex);
4756 mutex_unlock(&fs_info->balance_mutex);
4762 mutex_unlock(&fs_info->balance_mutex);
4763 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4768 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
4771 bargs = memdup_user(arg, sizeof(*bargs));
4772 if (IS_ERR(bargs)) {
4773 ret = PTR_ERR(bargs);
4777 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4778 if (!fs_info->balance_ctl) {
4783 bctl = fs_info->balance_ctl;
4784 spin_lock(&fs_info->balance_lock);
4785 bctl->flags |= BTRFS_BALANCE_RESUME;
4786 spin_unlock(&fs_info->balance_lock);
4794 if (fs_info->balance_ctl) {
4799 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4805 bctl->fs_info = fs_info;
4807 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4808 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4809 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4811 bctl->flags = bargs->flags;
4813 /* balance everything - no filters */
4814 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4817 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4824 * Ownership of bctl and mutually_exclusive_operation_running
4825 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
4826 * or, if restriper was paused all the way until unmount, in
4827 * free_fs_info. mutually_exclusive_operation_running is
4828 * cleared in __cancel_balance.
4830 need_unlock = false;
4832 ret = btrfs_balance(bctl, bargs);
4836 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4845 mutex_unlock(&fs_info->balance_mutex);
4846 mutex_unlock(&fs_info->volume_mutex);
4848 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
4850 mnt_drop_write_file(file);
4854 static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
4856 if (!capable(CAP_SYS_ADMIN))
4860 case BTRFS_BALANCE_CTL_PAUSE:
4861 return btrfs_pause_balance(root->fs_info);
4862 case BTRFS_BALANCE_CTL_CANCEL:
4863 return btrfs_cancel_balance(root->fs_info);
4869 static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
4872 struct btrfs_fs_info *fs_info = root->fs_info;
4873 struct btrfs_ioctl_balance_args *bargs;
4876 if (!capable(CAP_SYS_ADMIN))
4879 mutex_lock(&fs_info->balance_mutex);
4880 if (!fs_info->balance_ctl) {
4885 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4891 update_ioctl_balance_args(fs_info, 1, bargs);
4893 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4898 mutex_unlock(&fs_info->balance_mutex);
4902 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4904 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4905 struct btrfs_ioctl_quota_ctl_args *sa;
4906 struct btrfs_trans_handle *trans = NULL;
4910 if (!capable(CAP_SYS_ADMIN))
4913 ret = mnt_want_write_file(file);
4917 sa = memdup_user(arg, sizeof(*sa));
4923 down_write(&root->fs_info->subvol_sem);
4924 trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
4925 if (IS_ERR(trans)) {
4926 ret = PTR_ERR(trans);
4931 case BTRFS_QUOTA_CTL_ENABLE:
4932 ret = btrfs_quota_enable(trans, root->fs_info);
4934 case BTRFS_QUOTA_CTL_DISABLE:
4935 ret = btrfs_quota_disable(trans, root->fs_info);
4942 err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
4947 up_write(&root->fs_info->subvol_sem);
4949 mnt_drop_write_file(file);
4953 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4955 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4956 struct btrfs_ioctl_qgroup_assign_args *sa;
4957 struct btrfs_trans_handle *trans;
4961 if (!capable(CAP_SYS_ADMIN))
4964 ret = mnt_want_write_file(file);
4968 sa = memdup_user(arg, sizeof(*sa));
4974 trans = btrfs_join_transaction(root);
4975 if (IS_ERR(trans)) {
4976 ret = PTR_ERR(trans);
4980 /* FIXME: check if the IDs really exist */
4982 ret = btrfs_add_qgroup_relation(trans, root->fs_info,
4985 ret = btrfs_del_qgroup_relation(trans, root->fs_info,
4989 /* update qgroup status and info */
4990 err = btrfs_run_qgroups(trans, root->fs_info);
4992 btrfs_handle_fs_error(root->fs_info, err,
4993 "failed to update qgroup status and info");
4994 err = btrfs_end_transaction(trans, root);
5001 mnt_drop_write_file(file);
5005 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
5007 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5008 struct btrfs_ioctl_qgroup_create_args *sa;
5009 struct btrfs_trans_handle *trans;
5013 if (!capable(CAP_SYS_ADMIN))
5016 ret = mnt_want_write_file(file);
5020 sa = memdup_user(arg, sizeof(*sa));
5026 if (!sa->qgroupid) {
5031 trans = btrfs_join_transaction(root);
5032 if (IS_ERR(trans)) {
5033 ret = PTR_ERR(trans);
5037 /* FIXME: check if the IDs really exist */
5039 ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
5041 ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
5044 err = btrfs_end_transaction(trans, root);
5051 mnt_drop_write_file(file);
5055 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
5057 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5058 struct btrfs_ioctl_qgroup_limit_args *sa;
5059 struct btrfs_trans_handle *trans;
5064 if (!capable(CAP_SYS_ADMIN))
5067 ret = mnt_want_write_file(file);
5071 sa = memdup_user(arg, sizeof(*sa));
5077 trans = btrfs_join_transaction(root);
5078 if (IS_ERR(trans)) {
5079 ret = PTR_ERR(trans);
5083 qgroupid = sa->qgroupid;
5085 /* take the current subvol as qgroup */
5086 qgroupid = root->root_key.objectid;
5089 /* FIXME: check if the IDs really exist */
5090 ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
5092 err = btrfs_end_transaction(trans, root);
5099 mnt_drop_write_file(file);
5103 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5105 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5106 struct btrfs_ioctl_quota_rescan_args *qsa;
5109 if (!capable(CAP_SYS_ADMIN))
5112 ret = mnt_want_write_file(file);
5116 qsa = memdup_user(arg, sizeof(*qsa));
5127 ret = btrfs_qgroup_rescan(root->fs_info);
5132 mnt_drop_write_file(file);
5136 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5138 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5139 struct btrfs_ioctl_quota_rescan_args *qsa;
5142 if (!capable(CAP_SYS_ADMIN))
5145 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5149 if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5151 qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
5154 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5161 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5163 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5165 if (!capable(CAP_SYS_ADMIN))
5168 return btrfs_qgroup_wait_for_completion(root->fs_info, true);
5171 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5172 struct btrfs_ioctl_received_subvol_args *sa)
5174 struct inode *inode = file_inode(file);
5175 struct btrfs_root *root = BTRFS_I(inode)->root;
5176 struct btrfs_root_item *root_item = &root->root_item;
5177 struct btrfs_trans_handle *trans;
5178 struct timespec ct = current_time(inode);
5180 int received_uuid_changed;
5182 if (!inode_owner_or_capable(inode))
5185 ret = mnt_want_write_file(file);
5189 down_write(&root->fs_info->subvol_sem);
5191 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
5196 if (btrfs_root_readonly(root)) {
5203 * 2 - uuid items (received uuid + subvol uuid)
5205 trans = btrfs_start_transaction(root, 3);
5206 if (IS_ERR(trans)) {
5207 ret = PTR_ERR(trans);
5212 sa->rtransid = trans->transid;
5213 sa->rtime.sec = ct.tv_sec;
5214 sa->rtime.nsec = ct.tv_nsec;
5216 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5218 if (received_uuid_changed &&
5219 !btrfs_is_empty_uuid(root_item->received_uuid))
5220 btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
5221 root_item->received_uuid,
5222 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5223 root->root_key.objectid);
5224 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5225 btrfs_set_root_stransid(root_item, sa->stransid);
5226 btrfs_set_root_rtransid(root_item, sa->rtransid);
5227 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5228 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5229 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5230 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5232 ret = btrfs_update_root(trans, root->fs_info->tree_root,
5233 &root->root_key, &root->root_item);
5235 btrfs_end_transaction(trans, root);
5238 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5239 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
5241 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5242 root->root_key.objectid);
5243 if (ret < 0 && ret != -EEXIST) {
5244 btrfs_abort_transaction(trans, ret);
5248 ret = btrfs_commit_transaction(trans, root);
5250 btrfs_abort_transaction(trans, ret);
5255 up_write(&root->fs_info->subvol_sem);
5256 mnt_drop_write_file(file);
5261 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5264 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5265 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5268 args32 = memdup_user(arg, sizeof(*args32));
5269 if (IS_ERR(args32)) {
5270 ret = PTR_ERR(args32);
5275 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5281 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5282 args64->stransid = args32->stransid;
5283 args64->rtransid = args32->rtransid;
5284 args64->stime.sec = args32->stime.sec;
5285 args64->stime.nsec = args32->stime.nsec;
5286 args64->rtime.sec = args32->rtime.sec;
5287 args64->rtime.nsec = args32->rtime.nsec;
5288 args64->flags = args32->flags;
5290 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5294 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5295 args32->stransid = args64->stransid;
5296 args32->rtransid = args64->rtransid;
5297 args32->stime.sec = args64->stime.sec;
5298 args32->stime.nsec = args64->stime.nsec;
5299 args32->rtime.sec = args64->rtime.sec;
5300 args32->rtime.nsec = args64->rtime.nsec;
5301 args32->flags = args64->flags;
5303 ret = copy_to_user(arg, args32, sizeof(*args32));
5314 static long btrfs_ioctl_set_received_subvol(struct file *file,
5317 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5320 sa = memdup_user(arg, sizeof(*sa));
5327 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5332 ret = copy_to_user(arg, sa, sizeof(*sa));
5341 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5343 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5346 char label[BTRFS_LABEL_SIZE];
5348 spin_lock(&root->fs_info->super_lock);
5349 memcpy(label, root->fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5350 spin_unlock(&root->fs_info->super_lock);
5352 len = strnlen(label, BTRFS_LABEL_SIZE);
5354 if (len == BTRFS_LABEL_SIZE) {
5355 btrfs_warn(root->fs_info,
5356 "label is too long, return the first %zu bytes", --len);
5359 ret = copy_to_user(arg, label, len);
5361 return ret ? -EFAULT : 0;
5364 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5366 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5367 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5368 struct btrfs_trans_handle *trans;
5369 char label[BTRFS_LABEL_SIZE];
5372 if (!capable(CAP_SYS_ADMIN))
5375 if (copy_from_user(label, arg, sizeof(label)))
5378 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5379 btrfs_err(root->fs_info,
5380 "unable to set label with more than %d bytes",
5381 BTRFS_LABEL_SIZE - 1);
5385 ret = mnt_want_write_file(file);
5389 trans = btrfs_start_transaction(root, 0);
5390 if (IS_ERR(trans)) {
5391 ret = PTR_ERR(trans);
5395 spin_lock(&root->fs_info->super_lock);
5396 strcpy(super_block->label, label);
5397 spin_unlock(&root->fs_info->super_lock);
5398 ret = btrfs_commit_transaction(trans, root);
5401 mnt_drop_write_file(file);
5405 #define INIT_FEATURE_FLAGS(suffix) \
5406 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5407 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5408 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5410 int btrfs_ioctl_get_supported_features(void __user *arg)
5412 static const struct btrfs_ioctl_feature_flags features[3] = {
5413 INIT_FEATURE_FLAGS(SUPP),
5414 INIT_FEATURE_FLAGS(SAFE_SET),
5415 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5418 if (copy_to_user(arg, &features, sizeof(features)))
5424 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5426 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5427 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5428 struct btrfs_ioctl_feature_flags features;
5430 features.compat_flags = btrfs_super_compat_flags(super_block);
5431 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5432 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5434 if (copy_to_user(arg, &features, sizeof(features)))
5440 static int check_feature_bits(struct btrfs_root *root,
5441 enum btrfs_feature_set set,
5442 u64 change_mask, u64 flags, u64 supported_flags,
5443 u64 safe_set, u64 safe_clear)
5445 const char *type = btrfs_feature_set_names[set];
5447 u64 disallowed, unsupported;
5448 u64 set_mask = flags & change_mask;
5449 u64 clear_mask = ~flags & change_mask;
5451 unsupported = set_mask & ~supported_flags;
5453 names = btrfs_printable_features(set, unsupported);
5455 btrfs_warn(root->fs_info,
5456 "this kernel does not support the %s feature bit%s",
5457 names, strchr(names, ',') ? "s" : "");
5460 btrfs_warn(root->fs_info,
5461 "this kernel does not support %s bits 0x%llx",
5466 disallowed = set_mask & ~safe_set;
5468 names = btrfs_printable_features(set, disallowed);
5470 btrfs_warn(root->fs_info,
5471 "can't set the %s feature bit%s while mounted",
5472 names, strchr(names, ',') ? "s" : "");
5475 btrfs_warn(root->fs_info,
5476 "can't set %s bits 0x%llx while mounted",
5481 disallowed = clear_mask & ~safe_clear;
5483 names = btrfs_printable_features(set, disallowed);
5485 btrfs_warn(root->fs_info,
5486 "can't clear the %s feature bit%s while mounted",
5487 names, strchr(names, ',') ? "s" : "");
5490 btrfs_warn(root->fs_info,
5491 "can't clear %s bits 0x%llx while mounted",
5499 #define check_feature(root, change_mask, flags, mask_base) \
5500 check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
5501 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5502 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5503 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5505 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5507 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5508 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5509 struct btrfs_ioctl_feature_flags flags[2];
5510 struct btrfs_trans_handle *trans;
5514 if (!capable(CAP_SYS_ADMIN))
5517 if (copy_from_user(flags, arg, sizeof(flags)))
5521 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5522 !flags[0].incompat_flags)
5525 ret = check_feature(root, flags[0].compat_flags,
5526 flags[1].compat_flags, COMPAT);
5530 ret = check_feature(root, flags[0].compat_ro_flags,
5531 flags[1].compat_ro_flags, COMPAT_RO);
5535 ret = check_feature(root, flags[0].incompat_flags,
5536 flags[1].incompat_flags, INCOMPAT);
5540 ret = mnt_want_write_file(file);
5544 trans = btrfs_start_transaction(root, 0);
5545 if (IS_ERR(trans)) {
5546 ret = PTR_ERR(trans);
5547 goto out_drop_write;
5550 spin_lock(&root->fs_info->super_lock);
5551 newflags = btrfs_super_compat_flags(super_block);
5552 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5553 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5554 btrfs_set_super_compat_flags(super_block, newflags);
5556 newflags = btrfs_super_compat_ro_flags(super_block);
5557 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5558 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5559 btrfs_set_super_compat_ro_flags(super_block, newflags);
5561 newflags = btrfs_super_incompat_flags(super_block);
5562 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5563 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5564 btrfs_set_super_incompat_flags(super_block, newflags);
5565 spin_unlock(&root->fs_info->super_lock);
5567 ret = btrfs_commit_transaction(trans, root);
5569 mnt_drop_write_file(file);
5574 long btrfs_ioctl(struct file *file, unsigned int
5575 cmd, unsigned long arg)
5577 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5578 void __user *argp = (void __user *)arg;
5581 case FS_IOC_GETFLAGS:
5582 return btrfs_ioctl_getflags(file, argp);
5583 case FS_IOC_SETFLAGS:
5584 return btrfs_ioctl_setflags(file, argp);
5585 case FS_IOC_GETVERSION:
5586 return btrfs_ioctl_getversion(file, argp);
5588 return btrfs_ioctl_fitrim(file, argp);
5589 case BTRFS_IOC_SNAP_CREATE:
5590 return btrfs_ioctl_snap_create(file, argp, 0);
5591 case BTRFS_IOC_SNAP_CREATE_V2:
5592 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5593 case BTRFS_IOC_SUBVOL_CREATE:
5594 return btrfs_ioctl_snap_create(file, argp, 1);
5595 case BTRFS_IOC_SUBVOL_CREATE_V2:
5596 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5597 case BTRFS_IOC_SNAP_DESTROY:
5598 return btrfs_ioctl_snap_destroy(file, argp);
5599 case BTRFS_IOC_SUBVOL_GETFLAGS:
5600 return btrfs_ioctl_subvol_getflags(file, argp);
5601 case BTRFS_IOC_SUBVOL_SETFLAGS:
5602 return btrfs_ioctl_subvol_setflags(file, argp);
5603 case BTRFS_IOC_DEFAULT_SUBVOL:
5604 return btrfs_ioctl_default_subvol(file, argp);
5605 case BTRFS_IOC_DEFRAG:
5606 return btrfs_ioctl_defrag(file, NULL);
5607 case BTRFS_IOC_DEFRAG_RANGE:
5608 return btrfs_ioctl_defrag(file, argp);
5609 case BTRFS_IOC_RESIZE:
5610 return btrfs_ioctl_resize(file, argp);
5611 case BTRFS_IOC_ADD_DEV:
5612 return btrfs_ioctl_add_dev(root, argp);
5613 case BTRFS_IOC_RM_DEV:
5614 return btrfs_ioctl_rm_dev(file, argp);
5615 case BTRFS_IOC_RM_DEV_V2:
5616 return btrfs_ioctl_rm_dev_v2(file, argp);
5617 case BTRFS_IOC_FS_INFO:
5618 return btrfs_ioctl_fs_info(root, argp);
5619 case BTRFS_IOC_DEV_INFO:
5620 return btrfs_ioctl_dev_info(root, argp);
5621 case BTRFS_IOC_BALANCE:
5622 return btrfs_ioctl_balance(file, NULL);
5623 case BTRFS_IOC_TRANS_START:
5624 return btrfs_ioctl_trans_start(file);
5625 case BTRFS_IOC_TRANS_END:
5626 return btrfs_ioctl_trans_end(file);
5627 case BTRFS_IOC_TREE_SEARCH:
5628 return btrfs_ioctl_tree_search(file, argp);
5629 case BTRFS_IOC_TREE_SEARCH_V2:
5630 return btrfs_ioctl_tree_search_v2(file, argp);
5631 case BTRFS_IOC_INO_LOOKUP:
5632 return btrfs_ioctl_ino_lookup(file, argp);
5633 case BTRFS_IOC_INO_PATHS:
5634 return btrfs_ioctl_ino_to_path(root, argp);
5635 case BTRFS_IOC_LOGICAL_INO:
5636 return btrfs_ioctl_logical_to_ino(root, argp);
5637 case BTRFS_IOC_SPACE_INFO:
5638 return btrfs_ioctl_space_info(root, argp);
5639 case BTRFS_IOC_SYNC: {
5642 ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
5645 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
5647 * The transaction thread may want to do more work,
5648 * namely it pokes the cleaner kthread that will start
5649 * processing uncleaned subvols.
5651 wake_up_process(root->fs_info->transaction_kthread);
5654 case BTRFS_IOC_START_SYNC:
5655 return btrfs_ioctl_start_sync(root, argp);
5656 case BTRFS_IOC_WAIT_SYNC:
5657 return btrfs_ioctl_wait_sync(root, argp);
5658 case BTRFS_IOC_SCRUB:
5659 return btrfs_ioctl_scrub(file, argp);
5660 case BTRFS_IOC_SCRUB_CANCEL:
5661 return btrfs_ioctl_scrub_cancel(root, argp);
5662 case BTRFS_IOC_SCRUB_PROGRESS:
5663 return btrfs_ioctl_scrub_progress(root, argp);
5664 case BTRFS_IOC_BALANCE_V2:
5665 return btrfs_ioctl_balance(file, argp);
5666 case BTRFS_IOC_BALANCE_CTL:
5667 return btrfs_ioctl_balance_ctl(root, arg);
5668 case BTRFS_IOC_BALANCE_PROGRESS:
5669 return btrfs_ioctl_balance_progress(root, argp);
5670 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5671 return btrfs_ioctl_set_received_subvol(file, argp);
5673 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5674 return btrfs_ioctl_set_received_subvol_32(file, argp);
5676 case BTRFS_IOC_SEND:
5677 return btrfs_ioctl_send(file, argp);
5678 case BTRFS_IOC_GET_DEV_STATS:
5679 return btrfs_ioctl_get_dev_stats(root, argp);
5680 case BTRFS_IOC_QUOTA_CTL:
5681 return btrfs_ioctl_quota_ctl(file, argp);
5682 case BTRFS_IOC_QGROUP_ASSIGN:
5683 return btrfs_ioctl_qgroup_assign(file, argp);
5684 case BTRFS_IOC_QGROUP_CREATE:
5685 return btrfs_ioctl_qgroup_create(file, argp);
5686 case BTRFS_IOC_QGROUP_LIMIT:
5687 return btrfs_ioctl_qgroup_limit(file, argp);
5688 case BTRFS_IOC_QUOTA_RESCAN:
5689 return btrfs_ioctl_quota_rescan(file, argp);
5690 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5691 return btrfs_ioctl_quota_rescan_status(file, argp);
5692 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5693 return btrfs_ioctl_quota_rescan_wait(file, argp);
5694 case BTRFS_IOC_DEV_REPLACE:
5695 return btrfs_ioctl_dev_replace(root, argp);
5696 case BTRFS_IOC_GET_FSLABEL:
5697 return btrfs_ioctl_get_fslabel(file, argp);
5698 case BTRFS_IOC_SET_FSLABEL:
5699 return btrfs_ioctl_set_fslabel(file, argp);
5700 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5701 return btrfs_ioctl_get_supported_features(argp);
5702 case BTRFS_IOC_GET_FEATURES:
5703 return btrfs_ioctl_get_features(file, argp);
5704 case BTRFS_IOC_SET_FEATURES:
5705 return btrfs_ioctl_set_features(file, argp);
5711 #ifdef CONFIG_COMPAT
5712 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5715 * These all access 32-bit values anyway so no further
5716 * handling is necessary.
5719 case FS_IOC32_GETFLAGS:
5720 cmd = FS_IOC_GETFLAGS;
5722 case FS_IOC32_SETFLAGS:
5723 cmd = FS_IOC_SETFLAGS;
5725 case FS_IOC32_GETVERSION:
5726 cmd = FS_IOC_GETVERSION;
5730 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));