1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
6 #include <linux/bsearch.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
18 #include <linux/fsverity.h>
25 #include "btrfs_inode.h"
26 #include "transaction.h"
27 #include "compression.h"
29 #include "print-tree.h"
30 #include "accessors.h"
32 #include "file-item.h"
35 #include "lru_cache.h"
38 * Maximum number of references an extent can have in order for us to attempt to
39 * issue clone operations instead of write operations. This currently exists to
40 * avoid hitting limitations of the backreference walking code (taking a lot of
41 * time and using too much memory for extents with large number of references).
43 #define SEND_MAX_EXTENT_REFS 1024
46 * A fs_path is a helper to dynamically build path names with unknown size.
47 * It reallocates the internal buffer on demand.
48 * It allows fast adding of path elements on the right side (normal path) and
49 * fast adding to the left side (reversed path). A reversed path can also be
50 * unreversed if needed.
59 unsigned short buf_len:15;
60 unsigned short reversed:1;
64 * Average path length does not exceed 200 bytes, we'll have
65 * better packing in the slab and higher chance to satisfy
66 * a allocation later during send.
71 #define FS_PATH_INLINE_SIZE \
72 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
75 /* reused for each extent */
77 struct btrfs_root *root;
84 #define SEND_MAX_NAME_CACHE_SIZE 256
87 * Limit the root_ids array of struct backref_cache_entry to 17 elements.
88 * This makes the size of a cache entry to be exactly 192 bytes on x86_64, which
89 * can be satisfied from the kmalloc-192 slab, without wasting any space.
90 * The most common case is to have a single root for cloning, which corresponds
91 * to the send root. Having the user specify more than 16 clone roots is not
92 * common, and in such rare cases we simply don't use caching if the number of
93 * cloning roots that lead down to a leaf is more than 17.
95 #define SEND_MAX_BACKREF_CACHE_ROOTS 17
98 * Max number of entries in the cache.
99 * With SEND_MAX_BACKREF_CACHE_ROOTS as 17, the size in bytes, excluding
100 * maple tree's internal nodes, is 24K.
102 #define SEND_MAX_BACKREF_CACHE_SIZE 128
105 * A backref cache entry maps a leaf to a list of IDs of roots from which the
106 * leaf is accessible and we can use for clone operations.
107 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on
110 struct backref_cache_entry {
111 struct btrfs_lru_cache_entry entry;
112 u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
113 /* Number of valid elements in the root_ids array. */
117 /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
118 static_assert(offsetof(struct backref_cache_entry, entry) == 0);
121 * Max number of entries in the cache that stores directories that were already
122 * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses
123 * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but
124 * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64).
126 #define SEND_MAX_DIR_CREATED_CACHE_SIZE 64
129 * Max number of entries in the cache that stores directories that were already
130 * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses
131 * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but
132 * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64).
134 #define SEND_MAX_DIR_UTIMES_CACHE_SIZE 64
137 struct file *send_filp;
143 * Whether BTRFS_SEND_A_DATA attribute was already added to current
144 * command (since protocol v2, data must be the last attribute).
147 struct page **send_buf_pages;
148 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
149 /* Protocol version compatibility requested */
152 struct btrfs_root *send_root;
153 struct btrfs_root *parent_root;
154 struct clone_root *clone_roots;
157 /* current state of the compare_tree call */
158 struct btrfs_path *left_path;
159 struct btrfs_path *right_path;
160 struct btrfs_key *cmp_key;
163 * Keep track of the generation of the last transaction that was used
164 * for relocating a block group. This is periodically checked in order
165 * to detect if a relocation happened since the last check, so that we
166 * don't operate on stale extent buffers for nodes (level >= 1) or on
167 * stale disk_bytenr values of file extent items.
169 u64 last_reloc_trans;
172 * infos of the currently processed inode. In case of deleted inodes,
173 * these are the values from the deleted inode.
180 u64 cur_inode_last_extent;
181 u64 cur_inode_next_write_offset;
183 bool cur_inode_new_gen;
184 bool cur_inode_deleted;
185 bool ignore_cur_inode;
186 bool cur_inode_needs_verity;
187 void *verity_descriptor;
191 struct list_head new_refs;
192 struct list_head deleted_refs;
194 struct btrfs_lru_cache name_cache;
197 * The inode we are currently processing. It's not NULL only when we
198 * need to issue write commands for data extents from this inode.
200 struct inode *cur_inode;
201 struct file_ra_state ra;
202 u64 page_cache_clear_start;
203 bool clean_page_cache;
206 * We process inodes by their increasing order, so if before an
207 * incremental send we reverse the parent/child relationship of
208 * directories such that a directory with a lower inode number was
209 * the parent of a directory with a higher inode number, and the one
210 * becoming the new parent got renamed too, we can't rename/move the
211 * directory with lower inode number when we finish processing it - we
212 * must process the directory with higher inode number first, then
213 * rename/move it and then rename/move the directory with lower inode
214 * number. Example follows.
216 * Tree state when the first send was performed:
228 * Tree state when the second (incremental) send is performed:
237 * The sequence of steps that lead to the second state was:
239 * mv /a/b/c/d /a/b/c2/d2
240 * mv /a/b/c /a/b/c2/d2/cc
242 * "c" has lower inode number, but we can't move it (2nd mv operation)
243 * before we move "d", which has higher inode number.
245 * So we just memorize which move/rename operations must be performed
246 * later when their respective parent is processed and moved/renamed.
249 /* Indexed by parent directory inode number. */
250 struct rb_root pending_dir_moves;
253 * Reverse index, indexed by the inode number of a directory that
254 * is waiting for the move/rename of its immediate parent before its
255 * own move/rename can be performed.
257 struct rb_root waiting_dir_moves;
260 * A directory that is going to be rm'ed might have a child directory
261 * which is in the pending directory moves index above. In this case,
262 * the directory can only be removed after the move/rename of its child
263 * is performed. Example:
283 * Sequence of steps that lead to the send snapshot:
284 * rm -f /a/b/c/foo.txt
286 * mv /a/b/c/x /a/b/YY
289 * When the child is processed, its move/rename is delayed until its
290 * parent is processed (as explained above), but all other operations
291 * like update utimes, chown, chgrp, etc, are performed and the paths
292 * that it uses for those operations must use the orphanized name of
293 * its parent (the directory we're going to rm later), so we need to
294 * memorize that name.
296 * Indexed by the inode number of the directory to be deleted.
298 struct rb_root orphan_dirs;
300 struct rb_root rbtree_new_refs;
301 struct rb_root rbtree_deleted_refs;
303 struct btrfs_lru_cache backref_cache;
304 u64 backref_cache_last_reloc_trans;
306 struct btrfs_lru_cache dir_created_cache;
307 struct btrfs_lru_cache dir_utimes_cache;
310 struct pending_dir_move {
312 struct list_head list;
316 struct list_head update_refs;
319 struct waiting_dir_move {
323 * There might be some directory that could not be removed because it
324 * was waiting for this directory inode to be moved first. Therefore
325 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
332 struct orphan_dir_info {
336 u64 last_dir_index_offset;
337 u64 dir_high_seq_ino;
340 struct name_cache_entry {
342 * The key in the entry is an inode number, and the generation matches
343 * the inode's generation.
345 struct btrfs_lru_cache_entry entry;
349 int need_later_update;
354 /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
355 static_assert(offsetof(struct name_cache_entry, entry) == 0);
358 #define ADVANCE_ONLY_NEXT -1
360 enum btrfs_compare_tree_result {
361 BTRFS_COMPARE_TREE_NEW,
362 BTRFS_COMPARE_TREE_DELETED,
363 BTRFS_COMPARE_TREE_CHANGED,
364 BTRFS_COMPARE_TREE_SAME,
368 static void inconsistent_snapshot_error(struct send_ctx *sctx,
369 enum btrfs_compare_tree_result result,
372 const char *result_string;
375 case BTRFS_COMPARE_TREE_NEW:
376 result_string = "new";
378 case BTRFS_COMPARE_TREE_DELETED:
379 result_string = "deleted";
381 case BTRFS_COMPARE_TREE_CHANGED:
382 result_string = "updated";
384 case BTRFS_COMPARE_TREE_SAME:
386 result_string = "unchanged";
390 result_string = "unexpected";
393 btrfs_err(sctx->send_root->fs_info,
394 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
395 result_string, what, sctx->cmp_key->objectid,
396 sctx->send_root->root_key.objectid,
398 sctx->parent_root->root_key.objectid : 0));
402 static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
404 switch (sctx->proto) {
405 case 1: return cmd <= BTRFS_SEND_C_MAX_V1;
406 case 2: return cmd <= BTRFS_SEND_C_MAX_V2;
407 case 3: return cmd <= BTRFS_SEND_C_MAX_V3;
408 default: return false;
412 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
414 static struct waiting_dir_move *
415 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
417 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
419 static int need_send_hole(struct send_ctx *sctx)
421 return (sctx->parent_root && !sctx->cur_inode_new &&
422 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
423 S_ISREG(sctx->cur_inode_mode));
426 static void fs_path_reset(struct fs_path *p)
429 p->start = p->buf + p->buf_len - 1;
439 static struct fs_path *fs_path_alloc(void)
443 p = kmalloc(sizeof(*p), GFP_KERNEL);
447 p->buf = p->inline_buf;
448 p->buf_len = FS_PATH_INLINE_SIZE;
453 static struct fs_path *fs_path_alloc_reversed(void)
465 static void fs_path_free(struct fs_path *p)
469 if (p->buf != p->inline_buf)
474 static int fs_path_len(struct fs_path *p)
476 return p->end - p->start;
479 static int fs_path_ensure_buf(struct fs_path *p, int len)
487 if (p->buf_len >= len)
490 if (len > PATH_MAX) {
495 path_len = p->end - p->start;
496 old_buf_len = p->buf_len;
499 * Allocate to the next largest kmalloc bucket size, to let
500 * the fast path happen most of the time.
502 len = kmalloc_size_roundup(len);
504 * First time the inline_buf does not suffice
506 if (p->buf == p->inline_buf) {
507 tmp_buf = kmalloc(len, GFP_KERNEL);
509 memcpy(tmp_buf, p->buf, old_buf_len);
511 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
519 tmp_buf = p->buf + old_buf_len - path_len - 1;
520 p->end = p->buf + p->buf_len - 1;
521 p->start = p->end - path_len;
522 memmove(p->start, tmp_buf, path_len + 1);
525 p->end = p->start + path_len;
530 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
536 new_len = p->end - p->start + name_len;
537 if (p->start != p->end)
539 ret = fs_path_ensure_buf(p, new_len);
544 if (p->start != p->end)
546 p->start -= name_len;
547 *prepared = p->start;
549 if (p->start != p->end)
560 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
565 ret = fs_path_prepare_for_add(p, name_len, &prepared);
568 memcpy(prepared, name, name_len);
574 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
579 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
582 memcpy(prepared, p2->start, p2->end - p2->start);
588 static int fs_path_add_from_extent_buffer(struct fs_path *p,
589 struct extent_buffer *eb,
590 unsigned long off, int len)
595 ret = fs_path_prepare_for_add(p, len, &prepared);
599 read_extent_buffer(eb, prepared, off, len);
605 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
607 p->reversed = from->reversed;
610 return fs_path_add_path(p, from);
613 static void fs_path_unreverse(struct fs_path *p)
622 len = p->end - p->start;
624 p->end = p->start + len;
625 memmove(p->start, tmp, len + 1);
629 static struct btrfs_path *alloc_path_for_send(void)
631 struct btrfs_path *path;
633 path = btrfs_alloc_path();
636 path->search_commit_root = 1;
637 path->skip_locking = 1;
638 path->need_commit_sem = 1;
642 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
648 ret = kernel_write(filp, buf + pos, len - pos, off);
659 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
661 struct btrfs_tlv_header *hdr;
662 int total_len = sizeof(*hdr) + len;
663 int left = sctx->send_max_size - sctx->send_size;
665 if (WARN_ON_ONCE(sctx->put_data))
668 if (unlikely(left < total_len))
671 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
672 put_unaligned_le16(attr, &hdr->tlv_type);
673 put_unaligned_le16(len, &hdr->tlv_len);
674 memcpy(hdr + 1, data, len);
675 sctx->send_size += total_len;
680 #define TLV_PUT_DEFINE_INT(bits) \
681 static int tlv_put_u##bits(struct send_ctx *sctx, \
682 u##bits attr, u##bits value) \
684 __le##bits __tmp = cpu_to_le##bits(value); \
685 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
688 TLV_PUT_DEFINE_INT(8)
689 TLV_PUT_DEFINE_INT(32)
690 TLV_PUT_DEFINE_INT(64)
692 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
693 const char *str, int len)
697 return tlv_put(sctx, attr, str, len);
700 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
703 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
706 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
707 struct extent_buffer *eb,
708 struct btrfs_timespec *ts)
710 struct btrfs_timespec bts;
711 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
712 return tlv_put(sctx, attr, &bts, sizeof(bts));
716 #define TLV_PUT(sctx, attrtype, data, attrlen) \
718 ret = tlv_put(sctx, attrtype, data, attrlen); \
720 goto tlv_put_failure; \
723 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
725 ret = tlv_put_u##bits(sctx, attrtype, value); \
727 goto tlv_put_failure; \
730 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
731 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
732 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
733 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
734 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
736 ret = tlv_put_string(sctx, attrtype, str, len); \
738 goto tlv_put_failure; \
740 #define TLV_PUT_PATH(sctx, attrtype, p) \
742 ret = tlv_put_string(sctx, attrtype, p->start, \
743 p->end - p->start); \
745 goto tlv_put_failure; \
747 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
749 ret = tlv_put_uuid(sctx, attrtype, uuid); \
751 goto tlv_put_failure; \
753 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
755 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
757 goto tlv_put_failure; \
760 static int send_header(struct send_ctx *sctx)
762 struct btrfs_stream_header hdr;
764 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
765 hdr.version = cpu_to_le32(sctx->proto);
766 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
771 * For each command/item we want to send to userspace, we call this function.
773 static int begin_cmd(struct send_ctx *sctx, int cmd)
775 struct btrfs_cmd_header *hdr;
777 if (WARN_ON(!sctx->send_buf))
780 BUG_ON(sctx->send_size);
782 sctx->send_size += sizeof(*hdr);
783 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
784 put_unaligned_le16(cmd, &hdr->cmd);
789 static int send_cmd(struct send_ctx *sctx)
792 struct btrfs_cmd_header *hdr;
795 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
796 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
797 put_unaligned_le32(0, &hdr->crc);
799 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
800 put_unaligned_le32(crc, &hdr->crc);
802 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
806 sctx->put_data = false;
812 * Sends a move instruction to user space
814 static int send_rename(struct send_ctx *sctx,
815 struct fs_path *from, struct fs_path *to)
817 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
820 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
822 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
826 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
827 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
829 ret = send_cmd(sctx);
837 * Sends a link instruction to user space
839 static int send_link(struct send_ctx *sctx,
840 struct fs_path *path, struct fs_path *lnk)
842 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
845 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
847 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
851 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
852 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
854 ret = send_cmd(sctx);
862 * Sends an unlink instruction to user space
864 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
866 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
869 btrfs_debug(fs_info, "send_unlink %s", path->start);
871 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
875 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
877 ret = send_cmd(sctx);
885 * Sends a rmdir instruction to user space
887 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
889 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
892 btrfs_debug(fs_info, "send_rmdir %s", path->start);
894 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
898 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
900 ret = send_cmd(sctx);
907 struct btrfs_inode_info {
919 * Helper function to retrieve some fields from an inode item.
921 static int get_inode_info(struct btrfs_root *root, u64 ino,
922 struct btrfs_inode_info *info)
925 struct btrfs_path *path;
926 struct btrfs_inode_item *ii;
927 struct btrfs_key key;
929 path = alloc_path_for_send();
934 key.type = BTRFS_INODE_ITEM_KEY;
936 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
946 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
947 struct btrfs_inode_item);
948 info->size = btrfs_inode_size(path->nodes[0], ii);
949 info->gen = btrfs_inode_generation(path->nodes[0], ii);
950 info->mode = btrfs_inode_mode(path->nodes[0], ii);
951 info->uid = btrfs_inode_uid(path->nodes[0], ii);
952 info->gid = btrfs_inode_gid(path->nodes[0], ii);
953 info->rdev = btrfs_inode_rdev(path->nodes[0], ii);
954 info->nlink = btrfs_inode_nlink(path->nodes[0], ii);
956 * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's
957 * otherwise logically split to 32/32 parts.
959 info->fileattr = btrfs_inode_flags(path->nodes[0], ii);
962 btrfs_free_path(path);
966 static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen)
969 struct btrfs_inode_info info = { 0 };
973 ret = get_inode_info(root, ino, &info);
978 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
983 * Helper function to iterate the entries in ONE btrfs_inode_ref or
984 * btrfs_inode_extref.
985 * The iterate callback may return a non zero value to stop iteration. This can
986 * be a negative value for error codes or 1 to simply stop it.
988 * path must point to the INODE_REF or INODE_EXTREF when called.
990 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
991 struct btrfs_key *found_key, int resolve,
992 iterate_inode_ref_t iterate, void *ctx)
994 struct extent_buffer *eb = path->nodes[0];
995 struct btrfs_inode_ref *iref;
996 struct btrfs_inode_extref *extref;
997 struct btrfs_path *tmp_path;
1001 int slot = path->slots[0];
1008 unsigned long name_off;
1009 unsigned long elem_size;
1012 p = fs_path_alloc_reversed();
1016 tmp_path = alloc_path_for_send();
1023 if (found_key->type == BTRFS_INODE_REF_KEY) {
1024 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
1025 struct btrfs_inode_ref);
1026 total = btrfs_item_size(eb, slot);
1027 elem_size = sizeof(*iref);
1029 ptr = btrfs_item_ptr_offset(eb, slot);
1030 total = btrfs_item_size(eb, slot);
1031 elem_size = sizeof(*extref);
1034 while (cur < total) {
1037 if (found_key->type == BTRFS_INODE_REF_KEY) {
1038 iref = (struct btrfs_inode_ref *)(ptr + cur);
1039 name_len = btrfs_inode_ref_name_len(eb, iref);
1040 name_off = (unsigned long)(iref + 1);
1041 index = btrfs_inode_ref_index(eb, iref);
1042 dir = found_key->offset;
1044 extref = (struct btrfs_inode_extref *)(ptr + cur);
1045 name_len = btrfs_inode_extref_name_len(eb, extref);
1046 name_off = (unsigned long)&extref->name;
1047 index = btrfs_inode_extref_index(eb, extref);
1048 dir = btrfs_inode_extref_parent(eb, extref);
1052 start = btrfs_ref_to_path(root, tmp_path, name_len,
1054 p->buf, p->buf_len);
1055 if (IS_ERR(start)) {
1056 ret = PTR_ERR(start);
1059 if (start < p->buf) {
1060 /* overflow , try again with larger buffer */
1061 ret = fs_path_ensure_buf(p,
1062 p->buf_len + p->buf - start);
1065 start = btrfs_ref_to_path(root, tmp_path,
1068 p->buf, p->buf_len);
1069 if (IS_ERR(start)) {
1070 ret = PTR_ERR(start);
1073 if (unlikely(start < p->buf)) {
1074 btrfs_err(root->fs_info,
1075 "send: path ref buffer underflow for key (%llu %u %llu)",
1076 found_key->objectid,
1085 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
1091 cur += elem_size + name_len;
1092 ret = iterate(num, dir, index, p, ctx);
1099 btrfs_free_path(tmp_path);
1104 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1105 const char *name, int name_len,
1106 const char *data, int data_len,
1110 * Helper function to iterate the entries in ONE btrfs_dir_item.
1111 * The iterate callback may return a non zero value to stop iteration. This can
1112 * be a negative value for error codes or 1 to simply stop it.
1114 * path must point to the dir item when called.
1116 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1117 iterate_dir_item_t iterate, void *ctx)
1120 struct extent_buffer *eb;
1121 struct btrfs_dir_item *di;
1122 struct btrfs_key di_key;
1134 * Start with a small buffer (1 page). If later we end up needing more
1135 * space, which can happen for xattrs on a fs with a leaf size greater
1136 * then the page size, attempt to increase the buffer. Typically xattr
1140 buf = kmalloc(buf_len, GFP_KERNEL);
1146 eb = path->nodes[0];
1147 slot = path->slots[0];
1148 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1151 total = btrfs_item_size(eb, slot);
1154 while (cur < total) {
1155 name_len = btrfs_dir_name_len(eb, di);
1156 data_len = btrfs_dir_data_len(eb, di);
1157 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1159 if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) {
1160 if (name_len > XATTR_NAME_MAX) {
1161 ret = -ENAMETOOLONG;
1164 if (name_len + data_len >
1165 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1173 if (name_len + data_len > PATH_MAX) {
1174 ret = -ENAMETOOLONG;
1179 if (name_len + data_len > buf_len) {
1180 buf_len = name_len + data_len;
1181 if (is_vmalloc_addr(buf)) {
1185 char *tmp = krealloc(buf, buf_len,
1186 GFP_KERNEL | __GFP_NOWARN);
1193 buf = kvmalloc(buf_len, GFP_KERNEL);
1201 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1202 name_len + data_len);
1204 len = sizeof(*di) + name_len + data_len;
1205 di = (struct btrfs_dir_item *)((char *)di + len);
1208 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1225 static int __copy_first_ref(int num, u64 dir, int index,
1226 struct fs_path *p, void *ctx)
1229 struct fs_path *pt = ctx;
1231 ret = fs_path_copy(pt, p);
1235 /* we want the first only */
1240 * Retrieve the first path of an inode. If an inode has more then one
1241 * ref/hardlink, this is ignored.
1243 static int get_inode_path(struct btrfs_root *root,
1244 u64 ino, struct fs_path *path)
1247 struct btrfs_key key, found_key;
1248 struct btrfs_path *p;
1250 p = alloc_path_for_send();
1254 fs_path_reset(path);
1257 key.type = BTRFS_INODE_REF_KEY;
1260 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1267 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1268 if (found_key.objectid != ino ||
1269 (found_key.type != BTRFS_INODE_REF_KEY &&
1270 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1275 ret = iterate_inode_ref(root, p, &found_key, 1,
1276 __copy_first_ref, path);
1286 struct backref_ctx {
1287 struct send_ctx *sctx;
1289 /* number of total found references */
1293 * used for clones found in send_root. clones found behind cur_objectid
1294 * and cur_offset are not considered as allowed clones.
1299 /* may be truncated in case it's the last extent in a file */
1302 /* The bytenr the file extent item we are processing refers to. */
1304 /* The owner (root id) of the data backref for the current extent. */
1306 /* The offset of the data backref for the current extent. */
1310 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1312 u64 root = (u64)(uintptr_t)key;
1313 const struct clone_root *cr = elt;
1315 if (root < cr->root->root_key.objectid)
1317 if (root > cr->root->root_key.objectid)
1322 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1324 const struct clone_root *cr1 = e1;
1325 const struct clone_root *cr2 = e2;
1327 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1329 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1335 * Called for every backref that is found for the current extent.
1336 * Results are collected in sctx->clone_roots->ino/offset.
1338 static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id,
1341 struct backref_ctx *bctx = ctx_;
1342 struct clone_root *clone_root;
1344 /* First check if the root is in the list of accepted clone sources */
1345 clone_root = bsearch((void *)(uintptr_t)root_id, bctx->sctx->clone_roots,
1346 bctx->sctx->clone_roots_cnt,
1347 sizeof(struct clone_root),
1348 __clone_root_cmp_bsearch);
1352 /* This is our own reference, bail out as we can't clone from it. */
1353 if (clone_root->root == bctx->sctx->send_root &&
1354 ino == bctx->cur_objectid &&
1355 offset == bctx->cur_offset)
1359 * Make sure we don't consider clones from send_root that are
1360 * behind the current inode/offset.
1362 if (clone_root->root == bctx->sctx->send_root) {
1364 * If the source inode was not yet processed we can't issue a
1365 * clone operation, as the source extent does not exist yet at
1366 * the destination of the stream.
1368 if (ino > bctx->cur_objectid)
1371 * We clone from the inode currently being sent as long as the
1372 * source extent is already processed, otherwise we could try
1373 * to clone from an extent that does not exist yet at the
1374 * destination of the stream.
1376 if (ino == bctx->cur_objectid &&
1377 offset + bctx->extent_len >
1378 bctx->sctx->cur_inode_next_write_offset)
1383 clone_root->found_ref = true;
1386 * If the given backref refers to a file extent item with a larger
1387 * number of bytes than what we found before, use the new one so that
1388 * we clone more optimally and end up doing less writes and getting
1389 * less exclusive, non-shared extents at the destination.
1391 if (num_bytes > clone_root->num_bytes) {
1392 clone_root->ino = ino;
1393 clone_root->offset = offset;
1394 clone_root->num_bytes = num_bytes;
1397 * Found a perfect candidate, so there's no need to continue
1400 if (num_bytes >= bctx->extent_len)
1401 return BTRFS_ITERATE_EXTENT_INODES_STOP;
1407 static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
1408 const u64 **root_ids_ret, int *root_count_ret)
1410 struct backref_ctx *bctx = ctx;
1411 struct send_ctx *sctx = bctx->sctx;
1412 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1413 const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
1414 struct btrfs_lru_cache_entry *raw_entry;
1415 struct backref_cache_entry *entry;
1417 if (btrfs_lru_cache_size(&sctx->backref_cache) == 0)
1421 * If relocation happened since we first filled the cache, then we must
1422 * empty the cache and can not use it, because even though we operate on
1423 * read-only roots, their leaves and nodes may have been reallocated and
1424 * now be used for different nodes/leaves of the same tree or some other
1427 * We are called from iterate_extent_inodes() while either holding a
1428 * transaction handle or holding fs_info->commit_root_sem, so no need
1429 * to take any lock here.
1431 if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) {
1432 btrfs_lru_cache_clear(&sctx->backref_cache);
1436 raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key, 0);
1440 entry = container_of(raw_entry, struct backref_cache_entry, entry);
1441 *root_ids_ret = entry->root_ids;
1442 *root_count_ret = entry->num_roots;
1447 static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
1450 struct backref_ctx *bctx = ctx;
1451 struct send_ctx *sctx = bctx->sctx;
1452 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1453 struct backref_cache_entry *new_entry;
1454 struct ulist_iterator uiter;
1455 struct ulist_node *node;
1459 * We're called while holding a transaction handle or while holding
1460 * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a
1463 new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS);
1464 /* No worries, cache is optional. */
1468 new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits;
1469 new_entry->entry.gen = 0;
1470 new_entry->num_roots = 0;
1471 ULIST_ITER_INIT(&uiter);
1472 while ((node = ulist_next(root_ids, &uiter)) != NULL) {
1473 const u64 root_id = node->val;
1474 struct clone_root *root;
1476 root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots,
1477 sctx->clone_roots_cnt, sizeof(struct clone_root),
1478 __clone_root_cmp_bsearch);
1482 /* Too many roots, just exit, no worries as caching is optional. */
1483 if (new_entry->num_roots >= SEND_MAX_BACKREF_CACHE_ROOTS) {
1488 new_entry->root_ids[new_entry->num_roots] = root_id;
1489 new_entry->num_roots++;
1493 * We may have not added any roots to the new cache entry, which means
1494 * none of the roots is part of the list of roots from which we are
1495 * allowed to clone. Cache the new entry as it's still useful to avoid
1496 * backref walking to determine which roots have a path to the leaf.
1498 * Also use GFP_NOFS because we're called while holding a transaction
1499 * handle or while holding fs_info->commit_root_sem.
1501 ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry,
1503 ASSERT(ret == 0 || ret == -ENOMEM);
1505 /* Caching is optional, no worries. */
1511 * We are called from iterate_extent_inodes() while either holding a
1512 * transaction handle or holding fs_info->commit_root_sem, so no need
1513 * to take any lock here.
1515 if (btrfs_lru_cache_size(&sctx->backref_cache) == 1)
1516 sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans;
1519 static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
1520 const struct extent_buffer *leaf, void *ctx)
1522 const u64 refs = btrfs_extent_refs(leaf, ei);
1523 const struct backref_ctx *bctx = ctx;
1524 const struct send_ctx *sctx = bctx->sctx;
1526 if (bytenr == bctx->bytenr) {
1527 const u64 flags = btrfs_extent_flags(leaf, ei);
1529 if (WARN_ON(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
1533 * If we have only one reference and only the send root as a
1534 * clone source - meaning no clone roots were given in the
1535 * struct btrfs_ioctl_send_args passed to the send ioctl - then
1536 * it's our reference and there's no point in doing backref
1537 * walking which is expensive, so exit early.
1539 if (refs == 1 && sctx->clone_roots_cnt == 1)
1544 * Backreference walking (iterate_extent_inodes() below) is currently
1545 * too expensive when an extent has a large number of references, both
1546 * in time spent and used memory. So for now just fallback to write
1547 * operations instead of clone operations when an extent has more than
1548 * a certain amount of references.
1550 if (refs > SEND_MAX_EXTENT_REFS)
1556 static bool skip_self_data_ref(u64 root, u64 ino, u64 offset, void *ctx)
1558 const struct backref_ctx *bctx = ctx;
1560 if (ino == bctx->cur_objectid &&
1561 root == bctx->backref_owner &&
1562 offset == bctx->backref_offset)
1569 * Given an inode, offset and extent item, it finds a good clone for a clone
1570 * instruction. Returns -ENOENT when none could be found. The function makes
1571 * sure that the returned clone is usable at the point where sending is at the
1572 * moment. This means, that no clones are accepted which lie behind the current
1575 * path must point to the extent item when called.
1577 static int find_extent_clone(struct send_ctx *sctx,
1578 struct btrfs_path *path,
1579 u64 ino, u64 data_offset,
1581 struct clone_root **found)
1583 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1589 struct btrfs_file_extent_item *fi;
1590 struct extent_buffer *eb = path->nodes[0];
1591 struct backref_ctx backref_ctx = { 0 };
1592 struct btrfs_backref_walk_ctx backref_walk_ctx = { 0 };
1593 struct clone_root *cur_clone_root;
1598 * With fallocate we can get prealloc extents beyond the inode's i_size,
1599 * so we don't do anything here because clone operations can not clone
1600 * to a range beyond i_size without increasing the i_size of the
1601 * destination inode.
1603 if (data_offset >= ino_size)
1606 fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item);
1607 extent_type = btrfs_file_extent_type(eb, fi);
1608 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1611 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1615 compressed = btrfs_file_extent_compression(eb, fi);
1616 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1617 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1620 * Setup the clone roots.
1622 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1623 cur_clone_root = sctx->clone_roots + i;
1624 cur_clone_root->ino = (u64)-1;
1625 cur_clone_root->offset = 0;
1626 cur_clone_root->num_bytes = 0;
1627 cur_clone_root->found_ref = false;
1630 backref_ctx.sctx = sctx;
1631 backref_ctx.cur_objectid = ino;
1632 backref_ctx.cur_offset = data_offset;
1633 backref_ctx.bytenr = disk_byte;
1635 * Use the header owner and not the send root's id, because in case of a
1636 * snapshot we can have shared subtrees.
1638 backref_ctx.backref_owner = btrfs_header_owner(eb);
1639 backref_ctx.backref_offset = data_offset - btrfs_file_extent_offset(eb, fi);
1642 * The last extent of a file may be too large due to page alignment.
1643 * We need to adjust extent_len in this case so that the checks in
1644 * iterate_backrefs() work.
1646 if (data_offset + num_bytes >= ino_size)
1647 backref_ctx.extent_len = ino_size - data_offset;
1649 backref_ctx.extent_len = num_bytes;
1652 * Now collect all backrefs.
1654 backref_walk_ctx.bytenr = disk_byte;
1655 if (compressed == BTRFS_COMPRESS_NONE)
1656 backref_walk_ctx.extent_item_pos = btrfs_file_extent_offset(eb, fi);
1657 backref_walk_ctx.fs_info = fs_info;
1658 backref_walk_ctx.cache_lookup = lookup_backref_cache;
1659 backref_walk_ctx.cache_store = store_backref_cache;
1660 backref_walk_ctx.indirect_ref_iterator = iterate_backrefs;
1661 backref_walk_ctx.check_extent_item = check_extent_item;
1662 backref_walk_ctx.user_ctx = &backref_ctx;
1665 * If have a single clone root, then it's the send root and we can tell
1666 * the backref walking code to skip our own backref and not resolve it,
1667 * since we can not use it for cloning - the source and destination
1668 * ranges can't overlap and in case the leaf is shared through a subtree
1669 * due to snapshots, we can't use those other roots since they are not
1670 * in the list of clone roots.
1672 if (sctx->clone_roots_cnt == 1)
1673 backref_walk_ctx.skip_data_ref = skip_self_data_ref;
1675 ret = iterate_extent_inodes(&backref_walk_ctx, true, iterate_backrefs,
1680 down_read(&fs_info->commit_root_sem);
1681 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
1683 * A transaction commit for a transaction in which block group
1684 * relocation was done just happened.
1685 * The disk_bytenr of the file extent item we processed is
1686 * possibly stale, referring to the extent's location before
1687 * relocation. So act as if we haven't found any clone sources
1688 * and fallback to write commands, which will read the correct
1689 * data from the new extent location. Otherwise we will fail
1690 * below because we haven't found our own back reference or we
1691 * could be getting incorrect sources in case the old extent
1692 * was already reallocated after the relocation.
1694 up_read(&fs_info->commit_root_sem);
1697 up_read(&fs_info->commit_root_sem);
1699 btrfs_debug(fs_info,
1700 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1701 data_offset, ino, num_bytes, logical);
1703 if (!backref_ctx.found) {
1704 btrfs_debug(fs_info, "no clones found");
1708 cur_clone_root = NULL;
1709 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1710 struct clone_root *clone_root = &sctx->clone_roots[i];
1712 if (!clone_root->found_ref)
1716 * Choose the root from which we can clone more bytes, to
1717 * minimize write operations and therefore have more extent
1718 * sharing at the destination (the same as in the source).
1720 if (!cur_clone_root ||
1721 clone_root->num_bytes > cur_clone_root->num_bytes) {
1722 cur_clone_root = clone_root;
1725 * We found an optimal clone candidate (any inode from
1726 * any root is fine), so we're done.
1728 if (clone_root->num_bytes >= backref_ctx.extent_len)
1733 if (cur_clone_root) {
1734 *found = cur_clone_root;
1743 static int read_symlink(struct btrfs_root *root,
1745 struct fs_path *dest)
1748 struct btrfs_path *path;
1749 struct btrfs_key key;
1750 struct btrfs_file_extent_item *ei;
1756 path = alloc_path_for_send();
1761 key.type = BTRFS_EXTENT_DATA_KEY;
1763 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1768 * An empty symlink inode. Can happen in rare error paths when
1769 * creating a symlink (transaction committed before the inode
1770 * eviction handler removed the symlink inode items and a crash
1771 * happened in between or the subvol was snapshoted in between).
1772 * Print an informative message to dmesg/syslog so that the user
1773 * can delete the symlink.
1775 btrfs_err(root->fs_info,
1776 "Found empty symlink inode %llu at root %llu",
1777 ino, root->root_key.objectid);
1782 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1783 struct btrfs_file_extent_item);
1784 type = btrfs_file_extent_type(path->nodes[0], ei);
1785 if (unlikely(type != BTRFS_FILE_EXTENT_INLINE)) {
1787 btrfs_crit(root->fs_info,
1788 "send: found symlink extent that is not inline, ino %llu root %llu extent type %d",
1789 ino, btrfs_root_id(root), type);
1792 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1793 if (unlikely(compression != BTRFS_COMPRESS_NONE)) {
1795 btrfs_crit(root->fs_info,
1796 "send: found symlink extent with compression, ino %llu root %llu compression type %d",
1797 ino, btrfs_root_id(root), compression);
1801 off = btrfs_file_extent_inline_start(ei);
1802 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1804 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1807 btrfs_free_path(path);
1812 * Helper function to generate a file name that is unique in the root of
1813 * send_root and parent_root. This is used to generate names for orphan inodes.
1815 static int gen_unique_name(struct send_ctx *sctx,
1817 struct fs_path *dest)
1820 struct btrfs_path *path;
1821 struct btrfs_dir_item *di;
1826 path = alloc_path_for_send();
1831 struct fscrypt_str tmp_name;
1833 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1835 ASSERT(len < sizeof(tmp));
1836 tmp_name.name = tmp;
1837 tmp_name.len = strlen(tmp);
1839 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1840 path, BTRFS_FIRST_FREE_OBJECTID,
1842 btrfs_release_path(path);
1848 /* not unique, try again */
1853 if (!sctx->parent_root) {
1859 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1860 path, BTRFS_FIRST_FREE_OBJECTID,
1862 btrfs_release_path(path);
1868 /* not unique, try again */
1876 ret = fs_path_add(dest, tmp, strlen(tmp));
1879 btrfs_free_path(path);
1884 inode_state_no_change,
1885 inode_state_will_create,
1886 inode_state_did_create,
1887 inode_state_will_delete,
1888 inode_state_did_delete,
1891 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen,
1892 u64 *send_gen, u64 *parent_gen)
1899 struct btrfs_inode_info info;
1901 ret = get_inode_info(sctx->send_root, ino, &info);
1902 if (ret < 0 && ret != -ENOENT)
1904 left_ret = (info.nlink == 0) ? -ENOENT : ret;
1905 left_gen = info.gen;
1907 *send_gen = ((left_ret == -ENOENT) ? 0 : info.gen);
1909 if (!sctx->parent_root) {
1910 right_ret = -ENOENT;
1912 ret = get_inode_info(sctx->parent_root, ino, &info);
1913 if (ret < 0 && ret != -ENOENT)
1915 right_ret = (info.nlink == 0) ? -ENOENT : ret;
1916 right_gen = info.gen;
1918 *parent_gen = ((right_ret == -ENOENT) ? 0 : info.gen);
1921 if (!left_ret && !right_ret) {
1922 if (left_gen == gen && right_gen == gen) {
1923 ret = inode_state_no_change;
1924 } else if (left_gen == gen) {
1925 if (ino < sctx->send_progress)
1926 ret = inode_state_did_create;
1928 ret = inode_state_will_create;
1929 } else if (right_gen == gen) {
1930 if (ino < sctx->send_progress)
1931 ret = inode_state_did_delete;
1933 ret = inode_state_will_delete;
1937 } else if (!left_ret) {
1938 if (left_gen == gen) {
1939 if (ino < sctx->send_progress)
1940 ret = inode_state_did_create;
1942 ret = inode_state_will_create;
1946 } else if (!right_ret) {
1947 if (right_gen == gen) {
1948 if (ino < sctx->send_progress)
1949 ret = inode_state_did_delete;
1951 ret = inode_state_will_delete;
1963 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen,
1964 u64 *send_gen, u64 *parent_gen)
1968 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1971 ret = get_cur_inode_state(sctx, ino, gen, send_gen, parent_gen);
1975 if (ret == inode_state_no_change ||
1976 ret == inode_state_did_create ||
1977 ret == inode_state_will_delete)
1987 * Helper function to lookup a dir item in a dir.
1989 static int lookup_dir_item_inode(struct btrfs_root *root,
1990 u64 dir, const char *name, int name_len,
1994 struct btrfs_dir_item *di;
1995 struct btrfs_key key;
1996 struct btrfs_path *path;
1997 struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
1999 path = alloc_path_for_send();
2003 di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
2004 if (IS_ERR_OR_NULL(di)) {
2005 ret = di ? PTR_ERR(di) : -ENOENT;
2008 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
2009 if (key.type == BTRFS_ROOT_ITEM_KEY) {
2013 *found_inode = key.objectid;
2016 btrfs_free_path(path);
2021 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
2022 * generation of the parent dir and the name of the dir entry.
2024 static int get_first_ref(struct btrfs_root *root, u64 ino,
2025 u64 *dir, u64 *dir_gen, struct fs_path *name)
2028 struct btrfs_key key;
2029 struct btrfs_key found_key;
2030 struct btrfs_path *path;
2034 path = alloc_path_for_send();
2039 key.type = BTRFS_INODE_REF_KEY;
2042 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2046 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2048 if (ret || found_key.objectid != ino ||
2049 (found_key.type != BTRFS_INODE_REF_KEY &&
2050 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
2055 if (found_key.type == BTRFS_INODE_REF_KEY) {
2056 struct btrfs_inode_ref *iref;
2057 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2058 struct btrfs_inode_ref);
2059 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
2060 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2061 (unsigned long)(iref + 1),
2063 parent_dir = found_key.offset;
2065 struct btrfs_inode_extref *extref;
2066 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2067 struct btrfs_inode_extref);
2068 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
2069 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2070 (unsigned long)&extref->name, len);
2071 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
2075 btrfs_release_path(path);
2078 ret = get_inode_gen(root, parent_dir, dir_gen);
2086 btrfs_free_path(path);
2090 static int is_first_ref(struct btrfs_root *root,
2092 const char *name, int name_len)
2095 struct fs_path *tmp_name;
2098 tmp_name = fs_path_alloc();
2102 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
2106 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
2111 ret = !memcmp(tmp_name->start, name, name_len);
2114 fs_path_free(tmp_name);
2119 * Used by process_recorded_refs to determine if a new ref would overwrite an
2120 * already existing ref. In case it detects an overwrite, it returns the
2121 * inode/gen in who_ino/who_gen.
2122 * When an overwrite is detected, process_recorded_refs does proper orphanizing
2123 * to make sure later references to the overwritten inode are possible.
2124 * Orphanizing is however only required for the first ref of an inode.
2125 * process_recorded_refs does an additional is_first_ref check to see if
2126 * orphanizing is really required.
2128 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2129 const char *name, int name_len,
2130 u64 *who_ino, u64 *who_gen, u64 *who_mode)
2133 u64 parent_root_dir_gen;
2134 u64 other_inode = 0;
2135 struct btrfs_inode_info info;
2137 if (!sctx->parent_root)
2140 ret = is_inode_existent(sctx, dir, dir_gen, NULL, &parent_root_dir_gen);
2145 * If we have a parent root we need to verify that the parent dir was
2146 * not deleted and then re-created, if it was then we have no overwrite
2147 * and we can just unlink this entry.
2149 * @parent_root_dir_gen was set to 0 if the inode does not exist in the
2152 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID &&
2153 parent_root_dir_gen != dir_gen)
2156 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
2164 * Check if the overwritten ref was already processed. If yes, the ref
2165 * was already unlinked/moved, so we can safely assume that we will not
2166 * overwrite anything at this point in time.
2168 if (other_inode > sctx->send_progress ||
2169 is_waiting_for_move(sctx, other_inode)) {
2170 ret = get_inode_info(sctx->parent_root, other_inode, &info);
2174 *who_ino = other_inode;
2175 *who_gen = info.gen;
2176 *who_mode = info.mode;
2184 * Checks if the ref was overwritten by an already processed inode. This is
2185 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
2186 * thus the orphan name needs be used.
2187 * process_recorded_refs also uses it to avoid unlinking of refs that were
2190 static int did_overwrite_ref(struct send_ctx *sctx,
2191 u64 dir, u64 dir_gen,
2192 u64 ino, u64 ino_gen,
2193 const char *name, int name_len)
2198 u64 send_root_dir_gen;
2200 if (!sctx->parent_root)
2203 ret = is_inode_existent(sctx, dir, dir_gen, &send_root_dir_gen, NULL);
2208 * @send_root_dir_gen was set to 0 if the inode does not exist in the
2211 if (dir != BTRFS_FIRST_FREE_OBJECTID && send_root_dir_gen != dir_gen)
2214 /* check if the ref was overwritten by another ref */
2215 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
2217 if (ret == -ENOENT) {
2218 /* was never and will never be overwritten */
2220 } else if (ret < 0) {
2224 if (ow_inode == ino) {
2225 ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen);
2229 /* It's the same inode, so no overwrite happened. */
2230 if (ow_gen == ino_gen)
2235 * We know that it is or will be overwritten. Check this now.
2236 * The current inode being processed might have been the one that caused
2237 * inode 'ino' to be orphanized, therefore check if ow_inode matches
2238 * the current inode being processed.
2240 if (ow_inode < sctx->send_progress)
2243 if (ino != sctx->cur_ino && ow_inode == sctx->cur_ino) {
2245 ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen);
2249 if (ow_gen == sctx->cur_inode_gen)
2257 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2258 * that got overwritten. This is used by process_recorded_refs to determine
2259 * if it has to use the path as returned by get_cur_path or the orphan name.
2261 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
2264 struct fs_path *name = NULL;
2268 if (!sctx->parent_root)
2271 name = fs_path_alloc();
2275 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2279 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2280 name->start, fs_path_len(name));
2287 static inline struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2290 struct btrfs_lru_cache_entry *entry;
2292 entry = btrfs_lru_cache_lookup(&sctx->name_cache, ino, gen);
2296 return container_of(entry, struct name_cache_entry, entry);
2300 * Used by get_cur_path for each ref up to the root.
2301 * Returns 0 if it succeeded.
2302 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2303 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2304 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2305 * Returns <0 in case of error.
2307 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2311 struct fs_path *dest)
2315 struct name_cache_entry *nce;
2318 * First check if we already did a call to this function with the same
2319 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2320 * return the cached result.
2322 nce = name_cache_search(sctx, ino, gen);
2324 if (ino < sctx->send_progress && nce->need_later_update) {
2325 btrfs_lru_cache_remove(&sctx->name_cache, &nce->entry);
2328 *parent_ino = nce->parent_ino;
2329 *parent_gen = nce->parent_gen;
2330 ret = fs_path_add(dest, nce->name, nce->name_len);
2339 * If the inode is not existent yet, add the orphan name and return 1.
2340 * This should only happen for the parent dir that we determine in
2341 * record_new_ref_if_needed().
2343 ret = is_inode_existent(sctx, ino, gen, NULL, NULL);
2348 ret = gen_unique_name(sctx, ino, gen, dest);
2356 * Depending on whether the inode was already processed or not, use
2357 * send_root or parent_root for ref lookup.
2359 if (ino < sctx->send_progress)
2360 ret = get_first_ref(sctx->send_root, ino,
2361 parent_ino, parent_gen, dest);
2363 ret = get_first_ref(sctx->parent_root, ino,
2364 parent_ino, parent_gen, dest);
2369 * Check if the ref was overwritten by an inode's ref that was processed
2370 * earlier. If yes, treat as orphan and return 1.
2372 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2373 dest->start, dest->end - dest->start);
2377 fs_path_reset(dest);
2378 ret = gen_unique_name(sctx, ino, gen, dest);
2386 * Store the result of the lookup in the name cache.
2388 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2394 nce->entry.key = ino;
2395 nce->entry.gen = gen;
2396 nce->parent_ino = *parent_ino;
2397 nce->parent_gen = *parent_gen;
2398 nce->name_len = fs_path_len(dest);
2400 strcpy(nce->name, dest->start);
2402 if (ino < sctx->send_progress)
2403 nce->need_later_update = 0;
2405 nce->need_later_update = 1;
2407 nce_ret = btrfs_lru_cache_store(&sctx->name_cache, &nce->entry, GFP_KERNEL);
2418 * Magic happens here. This function returns the first ref to an inode as it
2419 * would look like while receiving the stream at this point in time.
2420 * We walk the path up to the root. For every inode in between, we check if it
2421 * was already processed/sent. If yes, we continue with the parent as found
2422 * in send_root. If not, we continue with the parent as found in parent_root.
2423 * If we encounter an inode that was deleted at this point in time, we use the
2424 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2425 * that were not created yet and overwritten inodes/refs.
2427 * When do we have orphan inodes:
2428 * 1. When an inode is freshly created and thus no valid refs are available yet
2429 * 2. When a directory lost all it's refs (deleted) but still has dir items
2430 * inside which were not processed yet (pending for move/delete). If anyone
2431 * tried to get the path to the dir items, it would get a path inside that
2433 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2434 * of an unprocessed inode. If in that case the first ref would be
2435 * overwritten, the overwritten inode gets "orphanized". Later when we
2436 * process this overwritten inode, it is restored at a new place by moving
2439 * sctx->send_progress tells this function at which point in time receiving
2442 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2443 struct fs_path *dest)
2446 struct fs_path *name = NULL;
2447 u64 parent_inode = 0;
2451 name = fs_path_alloc();
2458 fs_path_reset(dest);
2460 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2461 struct waiting_dir_move *wdm;
2463 fs_path_reset(name);
2465 if (is_waiting_for_rm(sctx, ino, gen)) {
2466 ret = gen_unique_name(sctx, ino, gen, name);
2469 ret = fs_path_add_path(dest, name);
2473 wdm = get_waiting_dir_move(sctx, ino);
2474 if (wdm && wdm->orphanized) {
2475 ret = gen_unique_name(sctx, ino, gen, name);
2478 ret = get_first_ref(sctx->parent_root, ino,
2479 &parent_inode, &parent_gen, name);
2481 ret = __get_cur_name_and_parent(sctx, ino, gen,
2491 ret = fs_path_add_path(dest, name);
2502 fs_path_unreverse(dest);
2507 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2509 static int send_subvol_begin(struct send_ctx *sctx)
2512 struct btrfs_root *send_root = sctx->send_root;
2513 struct btrfs_root *parent_root = sctx->parent_root;
2514 struct btrfs_path *path;
2515 struct btrfs_key key;
2516 struct btrfs_root_ref *ref;
2517 struct extent_buffer *leaf;
2521 path = btrfs_alloc_path();
2525 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2527 btrfs_free_path(path);
2531 key.objectid = send_root->root_key.objectid;
2532 key.type = BTRFS_ROOT_BACKREF_KEY;
2535 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2544 leaf = path->nodes[0];
2545 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2546 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2547 key.objectid != send_root->root_key.objectid) {
2551 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2552 namelen = btrfs_root_ref_name_len(leaf, ref);
2553 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2554 btrfs_release_path(path);
2557 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2561 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2566 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2568 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2569 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2570 sctx->send_root->root_item.received_uuid);
2572 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2573 sctx->send_root->root_item.uuid);
2575 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2576 btrfs_root_ctransid(&sctx->send_root->root_item));
2578 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2579 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2580 parent_root->root_item.received_uuid);
2582 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2583 parent_root->root_item.uuid);
2584 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2585 btrfs_root_ctransid(&sctx->parent_root->root_item));
2588 ret = send_cmd(sctx);
2592 btrfs_free_path(path);
2597 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2599 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2603 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2605 p = fs_path_alloc();
2609 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2613 ret = get_cur_path(sctx, ino, gen, p);
2616 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2617 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2619 ret = send_cmd(sctx);
2627 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2629 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2633 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2635 p = fs_path_alloc();
2639 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2643 ret = get_cur_path(sctx, ino, gen, p);
2646 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2647 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2649 ret = send_cmd(sctx);
2657 static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
2659 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2663 if (sctx->proto < 2)
2666 btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
2668 p = fs_path_alloc();
2672 ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
2676 ret = get_cur_path(sctx, ino, gen, p);
2679 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2680 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
2682 ret = send_cmd(sctx);
2690 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2692 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2696 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2699 p = fs_path_alloc();
2703 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2707 ret = get_cur_path(sctx, ino, gen, p);
2710 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2711 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2712 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2714 ret = send_cmd(sctx);
2722 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2724 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2726 struct fs_path *p = NULL;
2727 struct btrfs_inode_item *ii;
2728 struct btrfs_path *path = NULL;
2729 struct extent_buffer *eb;
2730 struct btrfs_key key;
2733 btrfs_debug(fs_info, "send_utimes %llu", ino);
2735 p = fs_path_alloc();
2739 path = alloc_path_for_send();
2746 key.type = BTRFS_INODE_ITEM_KEY;
2748 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2754 eb = path->nodes[0];
2755 slot = path->slots[0];
2756 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2758 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2762 ret = get_cur_path(sctx, ino, gen, p);
2765 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2766 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2767 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2768 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2769 if (sctx->proto >= 2)
2770 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime);
2772 ret = send_cmd(sctx);
2777 btrfs_free_path(path);
2782 * If the cache is full, we can't remove entries from it and do a call to
2783 * send_utimes() for each respective inode, because we might be finishing
2784 * processing an inode that is a directory and it just got renamed, and existing
2785 * entries in the cache may refer to inodes that have the directory in their
2786 * full path - in which case we would generate outdated paths (pre-rename)
2787 * for the inodes that the cache entries point to. Instead of prunning the
2788 * cache when inserting, do it after we finish processing each inode at
2789 * finish_inode_if_needed().
2791 static int cache_dir_utimes(struct send_ctx *sctx, u64 dir, u64 gen)
2793 struct btrfs_lru_cache_entry *entry;
2796 entry = btrfs_lru_cache_lookup(&sctx->dir_utimes_cache, dir, gen);
2800 /* Caching is optional, don't fail if we can't allocate memory. */
2801 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2803 return send_utimes(sctx, dir, gen);
2808 ret = btrfs_lru_cache_store(&sctx->dir_utimes_cache, entry, GFP_KERNEL);
2809 ASSERT(ret != -EEXIST);
2812 return send_utimes(sctx, dir, gen);
2818 static int trim_dir_utimes_cache(struct send_ctx *sctx)
2820 while (btrfs_lru_cache_size(&sctx->dir_utimes_cache) >
2821 SEND_MAX_DIR_UTIMES_CACHE_SIZE) {
2822 struct btrfs_lru_cache_entry *lru;
2825 lru = btrfs_lru_cache_lru_entry(&sctx->dir_utimes_cache);
2826 ASSERT(lru != NULL);
2828 ret = send_utimes(sctx, lru->key, lru->gen);
2832 btrfs_lru_cache_remove(&sctx->dir_utimes_cache, lru);
2839 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2840 * a valid path yet because we did not process the refs yet. So, the inode
2841 * is created as orphan.
2843 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2845 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2849 struct btrfs_inode_info info;
2854 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2856 p = fs_path_alloc();
2860 if (ino != sctx->cur_ino) {
2861 ret = get_inode_info(sctx->send_root, ino, &info);
2868 gen = sctx->cur_inode_gen;
2869 mode = sctx->cur_inode_mode;
2870 rdev = sctx->cur_inode_rdev;
2873 if (S_ISREG(mode)) {
2874 cmd = BTRFS_SEND_C_MKFILE;
2875 } else if (S_ISDIR(mode)) {
2876 cmd = BTRFS_SEND_C_MKDIR;
2877 } else if (S_ISLNK(mode)) {
2878 cmd = BTRFS_SEND_C_SYMLINK;
2879 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2880 cmd = BTRFS_SEND_C_MKNOD;
2881 } else if (S_ISFIFO(mode)) {
2882 cmd = BTRFS_SEND_C_MKFIFO;
2883 } else if (S_ISSOCK(mode)) {
2884 cmd = BTRFS_SEND_C_MKSOCK;
2886 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2887 (int)(mode & S_IFMT));
2892 ret = begin_cmd(sctx, cmd);
2896 ret = gen_unique_name(sctx, ino, gen, p);
2900 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2901 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2903 if (S_ISLNK(mode)) {
2905 ret = read_symlink(sctx->send_root, ino, p);
2908 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2909 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2910 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2911 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2912 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2915 ret = send_cmd(sctx);
2926 static void cache_dir_created(struct send_ctx *sctx, u64 dir)
2928 struct btrfs_lru_cache_entry *entry;
2931 /* Caching is optional, ignore any failures. */
2932 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2938 ret = btrfs_lru_cache_store(&sctx->dir_created_cache, entry, GFP_KERNEL);
2944 * We need some special handling for inodes that get processed before the parent
2945 * directory got created. See process_recorded_refs for details.
2946 * This function does the check if we already created the dir out of order.
2948 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2952 struct btrfs_path *path = NULL;
2953 struct btrfs_key key;
2954 struct btrfs_key found_key;
2955 struct btrfs_key di_key;
2956 struct btrfs_dir_item *di;
2958 if (btrfs_lru_cache_lookup(&sctx->dir_created_cache, dir, 0))
2961 path = alloc_path_for_send();
2966 key.type = BTRFS_DIR_INDEX_KEY;
2969 btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) {
2970 struct extent_buffer *eb = path->nodes[0];
2972 if (found_key.objectid != key.objectid ||
2973 found_key.type != key.type) {
2978 di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item);
2979 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2981 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2982 di_key.objectid < sctx->send_progress) {
2984 cache_dir_created(sctx, dir);
2988 /* Catch error found during iteration */
2992 btrfs_free_path(path);
2997 * Only creates the inode if it is:
2998 * 1. Not a directory
2999 * 2. Or a directory which was not created already due to out of order
3000 * directories. See did_create_dir and process_recorded_refs for details.
3002 static int send_create_inode_if_needed(struct send_ctx *sctx)
3006 if (S_ISDIR(sctx->cur_inode_mode)) {
3007 ret = did_create_dir(sctx, sctx->cur_ino);
3014 ret = send_create_inode(sctx, sctx->cur_ino);
3016 if (ret == 0 && S_ISDIR(sctx->cur_inode_mode))
3017 cache_dir_created(sctx, sctx->cur_ino);
3022 struct recorded_ref {
3023 struct list_head list;
3025 struct fs_path *full_path;
3029 struct rb_node node;
3030 struct rb_root *root;
3033 static struct recorded_ref *recorded_ref_alloc(void)
3035 struct recorded_ref *ref;
3037 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3040 RB_CLEAR_NODE(&ref->node);
3041 INIT_LIST_HEAD(&ref->list);
3045 static void recorded_ref_free(struct recorded_ref *ref)
3049 if (!RB_EMPTY_NODE(&ref->node))
3050 rb_erase(&ref->node, ref->root);
3051 list_del(&ref->list);
3052 fs_path_free(ref->full_path);
3056 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
3058 ref->full_path = path;
3059 ref->name = (char *)kbasename(ref->full_path->start);
3060 ref->name_len = ref->full_path->end - ref->name;
3063 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
3065 struct recorded_ref *new;
3067 new = recorded_ref_alloc();
3071 new->dir = ref->dir;
3072 new->dir_gen = ref->dir_gen;
3073 list_add_tail(&new->list, list);
3077 static void __free_recorded_refs(struct list_head *head)
3079 struct recorded_ref *cur;
3081 while (!list_empty(head)) {
3082 cur = list_entry(head->next, struct recorded_ref, list);
3083 recorded_ref_free(cur);
3087 static void free_recorded_refs(struct send_ctx *sctx)
3089 __free_recorded_refs(&sctx->new_refs);
3090 __free_recorded_refs(&sctx->deleted_refs);
3094 * Renames/moves a file/dir to its orphan name. Used when the first
3095 * ref of an unprocessed inode gets overwritten and for all non empty
3098 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
3099 struct fs_path *path)
3102 struct fs_path *orphan;
3104 orphan = fs_path_alloc();
3108 ret = gen_unique_name(sctx, ino, gen, orphan);
3112 ret = send_rename(sctx, path, orphan);
3115 fs_path_free(orphan);
3119 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
3120 u64 dir_ino, u64 dir_gen)
3122 struct rb_node **p = &sctx->orphan_dirs.rb_node;
3123 struct rb_node *parent = NULL;
3124 struct orphan_dir_info *entry, *odi;
3128 entry = rb_entry(parent, struct orphan_dir_info, node);
3129 if (dir_ino < entry->ino)
3131 else if (dir_ino > entry->ino)
3132 p = &(*p)->rb_right;
3133 else if (dir_gen < entry->gen)
3135 else if (dir_gen > entry->gen)
3136 p = &(*p)->rb_right;
3141 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
3143 return ERR_PTR(-ENOMEM);
3146 odi->last_dir_index_offset = 0;
3147 odi->dir_high_seq_ino = 0;
3149 rb_link_node(&odi->node, parent, p);
3150 rb_insert_color(&odi->node, &sctx->orphan_dirs);
3154 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
3155 u64 dir_ino, u64 gen)
3157 struct rb_node *n = sctx->orphan_dirs.rb_node;
3158 struct orphan_dir_info *entry;
3161 entry = rb_entry(n, struct orphan_dir_info, node);
3162 if (dir_ino < entry->ino)
3164 else if (dir_ino > entry->ino)
3166 else if (gen < entry->gen)
3168 else if (gen > entry->gen)
3176 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
3178 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
3183 static void free_orphan_dir_info(struct send_ctx *sctx,
3184 struct orphan_dir_info *odi)
3188 rb_erase(&odi->node, &sctx->orphan_dirs);
3193 * Returns 1 if a directory can be removed at this point in time.
3194 * We check this by iterating all dir items and checking if the inode behind
3195 * the dir item was already processed.
3197 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen)
3201 struct btrfs_root *root = sctx->parent_root;
3202 struct btrfs_path *path;
3203 struct btrfs_key key;
3204 struct btrfs_key found_key;
3205 struct btrfs_key loc;
3206 struct btrfs_dir_item *di;
3207 struct orphan_dir_info *odi = NULL;
3208 u64 dir_high_seq_ino = 0;
3209 u64 last_dir_index_offset = 0;
3212 * Don't try to rmdir the top/root subvolume dir.
3214 if (dir == BTRFS_FIRST_FREE_OBJECTID)
3217 odi = get_orphan_dir_info(sctx, dir, dir_gen);
3218 if (odi && sctx->cur_ino < odi->dir_high_seq_ino)
3221 path = alloc_path_for_send();
3227 * Find the inode number associated with the last dir index
3228 * entry. This is very likely the inode with the highest number
3229 * of all inodes that have an entry in the directory. We can
3230 * then use it to avoid future calls to can_rmdir(), when
3231 * processing inodes with a lower number, from having to search
3232 * the parent root b+tree for dir index keys.
3235 key.type = BTRFS_DIR_INDEX_KEY;
3236 key.offset = (u64)-1;
3238 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3241 } else if (ret > 0) {
3242 /* Can't happen, the root is never empty. */
3243 ASSERT(path->slots[0] > 0);
3244 if (WARN_ON(path->slots[0] == 0)) {
3251 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3252 if (key.objectid != dir || key.type != BTRFS_DIR_INDEX_KEY) {
3253 /* No index keys, dir can be removed. */
3258 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3259 struct btrfs_dir_item);
3260 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3261 dir_high_seq_ino = loc.objectid;
3262 if (sctx->cur_ino < dir_high_seq_ino) {
3267 btrfs_release_path(path);
3271 key.type = BTRFS_DIR_INDEX_KEY;
3272 key.offset = (odi ? odi->last_dir_index_offset : 0);
3274 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
3275 struct waiting_dir_move *dm;
3277 if (found_key.objectid != key.objectid ||
3278 found_key.type != key.type)
3281 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3282 struct btrfs_dir_item);
3283 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3285 dir_high_seq_ino = max(dir_high_seq_ino, loc.objectid);
3286 last_dir_index_offset = found_key.offset;
3288 dm = get_waiting_dir_move(sctx, loc.objectid);
3290 dm->rmdir_ino = dir;
3291 dm->rmdir_gen = dir_gen;
3296 if (loc.objectid > sctx->cur_ino) {
3305 free_orphan_dir_info(sctx, odi);
3310 btrfs_free_path(path);
3316 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3318 return PTR_ERR(odi);
3323 odi->last_dir_index_offset = last_dir_index_offset;
3324 odi->dir_high_seq_ino = max(odi->dir_high_seq_ino, dir_high_seq_ino);
3329 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3331 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3333 return entry != NULL;
3336 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3338 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3339 struct rb_node *parent = NULL;
3340 struct waiting_dir_move *entry, *dm;
3342 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3348 dm->orphanized = orphanized;
3352 entry = rb_entry(parent, struct waiting_dir_move, node);
3353 if (ino < entry->ino) {
3355 } else if (ino > entry->ino) {
3356 p = &(*p)->rb_right;
3363 rb_link_node(&dm->node, parent, p);
3364 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3368 static struct waiting_dir_move *
3369 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3371 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3372 struct waiting_dir_move *entry;
3375 entry = rb_entry(n, struct waiting_dir_move, node);
3376 if (ino < entry->ino)
3378 else if (ino > entry->ino)
3386 static void free_waiting_dir_move(struct send_ctx *sctx,
3387 struct waiting_dir_move *dm)
3391 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3395 static int add_pending_dir_move(struct send_ctx *sctx,
3399 struct list_head *new_refs,
3400 struct list_head *deleted_refs,
3401 const bool is_orphan)
3403 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3404 struct rb_node *parent = NULL;
3405 struct pending_dir_move *entry = NULL, *pm;
3406 struct recorded_ref *cur;
3410 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3413 pm->parent_ino = parent_ino;
3416 INIT_LIST_HEAD(&pm->list);
3417 INIT_LIST_HEAD(&pm->update_refs);
3418 RB_CLEAR_NODE(&pm->node);
3422 entry = rb_entry(parent, struct pending_dir_move, node);
3423 if (parent_ino < entry->parent_ino) {
3425 } else if (parent_ino > entry->parent_ino) {
3426 p = &(*p)->rb_right;
3433 list_for_each_entry(cur, deleted_refs, list) {
3434 ret = dup_ref(cur, &pm->update_refs);
3438 list_for_each_entry(cur, new_refs, list) {
3439 ret = dup_ref(cur, &pm->update_refs);
3444 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3449 list_add_tail(&pm->list, &entry->list);
3451 rb_link_node(&pm->node, parent, p);
3452 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3457 __free_recorded_refs(&pm->update_refs);
3463 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3466 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3467 struct pending_dir_move *entry;
3470 entry = rb_entry(n, struct pending_dir_move, node);
3471 if (parent_ino < entry->parent_ino)
3473 else if (parent_ino > entry->parent_ino)
3481 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3482 u64 ino, u64 gen, u64 *ancestor_ino)
3485 u64 parent_inode = 0;
3487 u64 start_ino = ino;
3490 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3491 fs_path_reset(name);
3493 if (is_waiting_for_rm(sctx, ino, gen))
3495 if (is_waiting_for_move(sctx, ino)) {
3496 if (*ancestor_ino == 0)
3497 *ancestor_ino = ino;
3498 ret = get_first_ref(sctx->parent_root, ino,
3499 &parent_inode, &parent_gen, name);
3501 ret = __get_cur_name_and_parent(sctx, ino, gen,
3511 if (parent_inode == start_ino) {
3513 if (*ancestor_ino == 0)
3514 *ancestor_ino = ino;
3523 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3525 struct fs_path *from_path = NULL;
3526 struct fs_path *to_path = NULL;
3527 struct fs_path *name = NULL;
3528 u64 orig_progress = sctx->send_progress;
3529 struct recorded_ref *cur;
3530 u64 parent_ino, parent_gen;
3531 struct waiting_dir_move *dm = NULL;
3538 name = fs_path_alloc();
3539 from_path = fs_path_alloc();
3540 if (!name || !from_path) {
3545 dm = get_waiting_dir_move(sctx, pm->ino);
3547 rmdir_ino = dm->rmdir_ino;
3548 rmdir_gen = dm->rmdir_gen;
3549 is_orphan = dm->orphanized;
3550 free_waiting_dir_move(sctx, dm);
3553 ret = gen_unique_name(sctx, pm->ino,
3554 pm->gen, from_path);
3556 ret = get_first_ref(sctx->parent_root, pm->ino,
3557 &parent_ino, &parent_gen, name);
3560 ret = get_cur_path(sctx, parent_ino, parent_gen,
3564 ret = fs_path_add_path(from_path, name);
3569 sctx->send_progress = sctx->cur_ino + 1;
3570 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3574 LIST_HEAD(deleted_refs);
3575 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3576 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3577 &pm->update_refs, &deleted_refs,
3582 dm = get_waiting_dir_move(sctx, pm->ino);
3584 dm->rmdir_ino = rmdir_ino;
3585 dm->rmdir_gen = rmdir_gen;
3589 fs_path_reset(name);
3592 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3596 ret = send_rename(sctx, from_path, to_path);
3601 struct orphan_dir_info *odi;
3604 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3606 /* already deleted */
3611 ret = can_rmdir(sctx, rmdir_ino, gen);
3617 name = fs_path_alloc();
3622 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3625 ret = send_rmdir(sctx, name);
3631 ret = cache_dir_utimes(sctx, pm->ino, pm->gen);
3636 * After rename/move, need to update the utimes of both new parent(s)
3637 * and old parent(s).
3639 list_for_each_entry(cur, &pm->update_refs, list) {
3641 * The parent inode might have been deleted in the send snapshot
3643 ret = get_inode_info(sctx->send_root, cur->dir, NULL);
3644 if (ret == -ENOENT) {
3651 ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
3658 fs_path_free(from_path);
3659 fs_path_free(to_path);
3660 sctx->send_progress = orig_progress;
3665 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3667 if (!list_empty(&m->list))
3669 if (!RB_EMPTY_NODE(&m->node))
3670 rb_erase(&m->node, &sctx->pending_dir_moves);
3671 __free_recorded_refs(&m->update_refs);
3675 static void tail_append_pending_moves(struct send_ctx *sctx,
3676 struct pending_dir_move *moves,
3677 struct list_head *stack)
3679 if (list_empty(&moves->list)) {
3680 list_add_tail(&moves->list, stack);
3683 list_splice_init(&moves->list, &list);
3684 list_add_tail(&moves->list, stack);
3685 list_splice_tail(&list, stack);
3687 if (!RB_EMPTY_NODE(&moves->node)) {
3688 rb_erase(&moves->node, &sctx->pending_dir_moves);
3689 RB_CLEAR_NODE(&moves->node);
3693 static int apply_children_dir_moves(struct send_ctx *sctx)
3695 struct pending_dir_move *pm;
3697 u64 parent_ino = sctx->cur_ino;
3700 pm = get_pending_dir_moves(sctx, parent_ino);
3704 tail_append_pending_moves(sctx, pm, &stack);
3706 while (!list_empty(&stack)) {
3707 pm = list_first_entry(&stack, struct pending_dir_move, list);
3708 parent_ino = pm->ino;
3709 ret = apply_dir_move(sctx, pm);
3710 free_pending_move(sctx, pm);
3713 pm = get_pending_dir_moves(sctx, parent_ino);
3715 tail_append_pending_moves(sctx, pm, &stack);
3720 while (!list_empty(&stack)) {
3721 pm = list_first_entry(&stack, struct pending_dir_move, list);
3722 free_pending_move(sctx, pm);
3728 * We might need to delay a directory rename even when no ancestor directory
3729 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3730 * renamed. This happens when we rename a directory to the old name (the name
3731 * in the parent root) of some other unrelated directory that got its rename
3732 * delayed due to some ancestor with higher number that got renamed.
3738 * |---- a/ (ino 257)
3739 * | |---- file (ino 260)
3741 * |---- b/ (ino 258)
3742 * |---- c/ (ino 259)
3746 * |---- a/ (ino 258)
3747 * |---- x/ (ino 259)
3748 * |---- y/ (ino 257)
3749 * |----- file (ino 260)
3751 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3752 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3753 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3756 * 1 - rename 259 from 'c' to 'x'
3757 * 2 - rename 257 from 'a' to 'x/y'
3758 * 3 - rename 258 from 'b' to 'a'
3760 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3761 * be done right away and < 0 on error.
3763 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3764 struct recorded_ref *parent_ref,
3765 const bool is_orphan)
3767 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3768 struct btrfs_path *path;
3769 struct btrfs_key key;
3770 struct btrfs_key di_key;
3771 struct btrfs_dir_item *di;
3775 struct waiting_dir_move *wdm;
3777 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3780 path = alloc_path_for_send();
3784 key.objectid = parent_ref->dir;
3785 key.type = BTRFS_DIR_ITEM_KEY;
3786 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3788 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3791 } else if (ret > 0) {
3796 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3797 parent_ref->name_len);
3803 * di_key.objectid has the number of the inode that has a dentry in the
3804 * parent directory with the same name that sctx->cur_ino is being
3805 * renamed to. We need to check if that inode is in the send root as
3806 * well and if it is currently marked as an inode with a pending rename,
3807 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3808 * that it happens after that other inode is renamed.
3810 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3811 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3816 ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen);
3819 ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen);
3826 /* Different inode, no need to delay the rename of sctx->cur_ino */
3827 if (right_gen != left_gen) {
3832 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3833 if (wdm && !wdm->orphanized) {
3834 ret = add_pending_dir_move(sctx,
3836 sctx->cur_inode_gen,
3839 &sctx->deleted_refs,
3845 btrfs_free_path(path);
3850 * Check if inode ino2, or any of its ancestors, is inode ino1.
3851 * Return 1 if true, 0 if false and < 0 on error.
3853 static int check_ino_in_path(struct btrfs_root *root,
3858 struct fs_path *fs_path)
3863 return ino1_gen == ino2_gen;
3865 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3870 fs_path_reset(fs_path);
3871 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3875 return parent_gen == ino1_gen;
3882 * Check if inode ino1 is an ancestor of inode ino2 in the given root for any
3883 * possible path (in case ino2 is not a directory and has multiple hard links).
3884 * Return 1 if true, 0 if false and < 0 on error.
3886 static int is_ancestor(struct btrfs_root *root,
3890 struct fs_path *fs_path)
3892 bool free_fs_path = false;
3895 struct btrfs_path *path = NULL;
3896 struct btrfs_key key;
3899 fs_path = fs_path_alloc();
3902 free_fs_path = true;
3905 path = alloc_path_for_send();
3911 key.objectid = ino2;
3912 key.type = BTRFS_INODE_REF_KEY;
3915 btrfs_for_each_slot(root, &key, &key, path, iter_ret) {
3916 struct extent_buffer *leaf = path->nodes[0];
3917 int slot = path->slots[0];
3921 if (key.objectid != ino2)
3923 if (key.type != BTRFS_INODE_REF_KEY &&
3924 key.type != BTRFS_INODE_EXTREF_KEY)
3927 item_size = btrfs_item_size(leaf, slot);
3928 while (cur_offset < item_size) {
3932 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3934 struct btrfs_inode_extref *extref;
3936 ptr = btrfs_item_ptr_offset(leaf, slot);
3937 extref = (struct btrfs_inode_extref *)
3939 parent = btrfs_inode_extref_parent(leaf,
3941 cur_offset += sizeof(*extref);
3942 cur_offset += btrfs_inode_extref_name_len(leaf,
3945 parent = key.offset;
3946 cur_offset = item_size;
3949 ret = get_inode_gen(root, parent, &parent_gen);
3952 ret = check_ino_in_path(root, ino1, ino1_gen,
3953 parent, parent_gen, fs_path);
3963 btrfs_free_path(path);
3965 fs_path_free(fs_path);
3969 static int wait_for_parent_move(struct send_ctx *sctx,
3970 struct recorded_ref *parent_ref,
3971 const bool is_orphan)
3974 u64 ino = parent_ref->dir;
3975 u64 ino_gen = parent_ref->dir_gen;
3976 u64 parent_ino_before, parent_ino_after;
3977 struct fs_path *path_before = NULL;
3978 struct fs_path *path_after = NULL;
3981 path_after = fs_path_alloc();
3982 path_before = fs_path_alloc();
3983 if (!path_after || !path_before) {
3989 * Our current directory inode may not yet be renamed/moved because some
3990 * ancestor (immediate or not) has to be renamed/moved first. So find if
3991 * such ancestor exists and make sure our own rename/move happens after
3992 * that ancestor is processed to avoid path build infinite loops (done
3993 * at get_cur_path()).
3995 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3996 u64 parent_ino_after_gen;
3998 if (is_waiting_for_move(sctx, ino)) {
4000 * If the current inode is an ancestor of ino in the
4001 * parent root, we need to delay the rename of the
4002 * current inode, otherwise don't delayed the rename
4003 * because we can end up with a circular dependency
4004 * of renames, resulting in some directories never
4005 * getting the respective rename operations issued in
4006 * the send stream or getting into infinite path build
4009 ret = is_ancestor(sctx->parent_root,
4010 sctx->cur_ino, sctx->cur_inode_gen,
4016 fs_path_reset(path_before);
4017 fs_path_reset(path_after);
4019 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
4020 &parent_ino_after_gen, path_after);
4023 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
4025 if (ret < 0 && ret != -ENOENT) {
4027 } else if (ret == -ENOENT) {
4032 len1 = fs_path_len(path_before);
4033 len2 = fs_path_len(path_after);
4034 if (ino > sctx->cur_ino &&
4035 (parent_ino_before != parent_ino_after || len1 != len2 ||
4036 memcmp(path_before->start, path_after->start, len1))) {
4039 ret = get_inode_gen(sctx->parent_root, ino, &parent_ino_gen);
4042 if (ino_gen == parent_ino_gen) {
4047 ino = parent_ino_after;
4048 ino_gen = parent_ino_after_gen;
4052 fs_path_free(path_before);
4053 fs_path_free(path_after);
4056 ret = add_pending_dir_move(sctx,
4058 sctx->cur_inode_gen,
4061 &sctx->deleted_refs,
4070 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
4073 struct fs_path *new_path;
4076 * Our reference's name member points to its full_path member string, so
4077 * we use here a new path.
4079 new_path = fs_path_alloc();
4083 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
4085 fs_path_free(new_path);
4088 ret = fs_path_add(new_path, ref->name, ref->name_len);
4090 fs_path_free(new_path);
4094 fs_path_free(ref->full_path);
4095 set_ref_path(ref, new_path);
4101 * When processing the new references for an inode we may orphanize an existing
4102 * directory inode because its old name conflicts with one of the new references
4103 * of the current inode. Later, when processing another new reference of our
4104 * inode, we might need to orphanize another inode, but the path we have in the
4105 * reference reflects the pre-orphanization name of the directory we previously
4106 * orphanized. For example:
4108 * parent snapshot looks like:
4111 * |----- f1 (ino 257)
4112 * |----- f2 (ino 258)
4113 * |----- d1/ (ino 259)
4114 * |----- d2/ (ino 260)
4116 * send snapshot looks like:
4119 * |----- d1 (ino 258)
4120 * |----- f2/ (ino 259)
4121 * |----- f2_link/ (ino 260)
4122 * | |----- f1 (ino 257)
4124 * |----- d2 (ino 258)
4126 * When processing inode 257 we compute the name for inode 259 as "d1", and we
4127 * cache it in the name cache. Later when we start processing inode 258, when
4128 * collecting all its new references we set a full path of "d1/d2" for its new
4129 * reference with name "d2". When we start processing the new references we
4130 * start by processing the new reference with name "d1", and this results in
4131 * orphanizing inode 259, since its old reference causes a conflict. Then we
4132 * move on the next new reference, with name "d2", and we find out we must
4133 * orphanize inode 260, as its old reference conflicts with ours - but for the
4134 * orphanization we use a source path corresponding to the path we stored in the
4135 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
4136 * receiver fail since the path component "d1/" no longer exists, it was renamed
4137 * to "o259-6-0/" when processing the previous new reference. So in this case we
4138 * must recompute the path in the new reference and use it for the new
4139 * orphanization operation.
4141 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
4146 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
4150 fs_path_reset(ref->full_path);
4151 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
4155 ret = fs_path_add(ref->full_path, name, ref->name_len);
4159 /* Update the reference's base name pointer. */
4160 set_ref_path(ref, ref->full_path);
4167 * This does all the move/link/unlink/rmdir magic.
4169 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
4171 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4173 struct recorded_ref *cur;
4174 struct recorded_ref *cur2;
4175 LIST_HEAD(check_dirs);
4176 struct fs_path *valid_path = NULL;
4180 int did_overwrite = 0;
4182 u64 last_dir_ino_rm = 0;
4183 bool can_rename = true;
4184 bool orphanized_dir = false;
4185 bool orphanized_ancestor = false;
4187 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
4190 * This should never happen as the root dir always has the same ref
4191 * which is always '..'
4193 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
4195 valid_path = fs_path_alloc();
4202 * First, check if the first ref of the current inode was overwritten
4203 * before. If yes, we know that the current inode was already orphanized
4204 * and thus use the orphan name. If not, we can use get_cur_path to
4205 * get the path of the first ref as it would like while receiving at
4206 * this point in time.
4207 * New inodes are always orphan at the beginning, so force to use the
4208 * orphan name in this case.
4209 * The first ref is stored in valid_path and will be updated if it
4210 * gets moved around.
4212 if (!sctx->cur_inode_new) {
4213 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
4214 sctx->cur_inode_gen);
4220 if (sctx->cur_inode_new || did_overwrite) {
4221 ret = gen_unique_name(sctx, sctx->cur_ino,
4222 sctx->cur_inode_gen, valid_path);
4227 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4234 * Before doing any rename and link operations, do a first pass on the
4235 * new references to orphanize any unprocessed inodes that may have a
4236 * reference that conflicts with one of the new references of the current
4237 * inode. This needs to happen first because a new reference may conflict
4238 * with the old reference of a parent directory, so we must make sure
4239 * that the path used for link and rename commands don't use an
4240 * orphanized name when an ancestor was not yet orphanized.
4247 * |----- testdir/ (ino 259)
4248 * | |----- a (ino 257)
4250 * |----- b (ino 258)
4255 * |----- testdir_2/ (ino 259)
4256 * | |----- a (ino 260)
4258 * |----- testdir (ino 257)
4259 * |----- b (ino 257)
4260 * |----- b2 (ino 258)
4262 * Processing the new reference for inode 257 with name "b" may happen
4263 * before processing the new reference with name "testdir". If so, we
4264 * must make sure that by the time we send a link command to create the
4265 * hard link "b", inode 259 was already orphanized, since the generated
4266 * path in "valid_path" already contains the orphanized name for 259.
4267 * We are processing inode 257, so only later when processing 259 we do
4268 * the rename operation to change its temporary (orphanized) name to
4271 list_for_each_entry(cur, &sctx->new_refs, list) {
4272 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
4275 if (ret == inode_state_will_create)
4279 * Check if this new ref would overwrite the first ref of another
4280 * unprocessed inode. If yes, orphanize the overwritten inode.
4281 * If we find an overwritten ref that is not the first ref,
4284 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4285 cur->name, cur->name_len,
4286 &ow_inode, &ow_gen, &ow_mode);
4290 ret = is_first_ref(sctx->parent_root,
4291 ow_inode, cur->dir, cur->name,
4296 struct name_cache_entry *nce;
4297 struct waiting_dir_move *wdm;
4299 if (orphanized_dir) {
4300 ret = refresh_ref_path(sctx, cur);
4305 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4309 if (S_ISDIR(ow_mode))
4310 orphanized_dir = true;
4313 * If ow_inode has its rename operation delayed
4314 * make sure that its orphanized name is used in
4315 * the source path when performing its rename
4318 wdm = get_waiting_dir_move(sctx, ow_inode);
4320 wdm->orphanized = true;
4323 * Make sure we clear our orphanized inode's
4324 * name from the name cache. This is because the
4325 * inode ow_inode might be an ancestor of some
4326 * other inode that will be orphanized as well
4327 * later and has an inode number greater than
4328 * sctx->send_progress. We need to prevent
4329 * future name lookups from using the old name
4330 * and get instead the orphan name.
4332 nce = name_cache_search(sctx, ow_inode, ow_gen);
4334 btrfs_lru_cache_remove(&sctx->name_cache,
4338 * ow_inode might currently be an ancestor of
4339 * cur_ino, therefore compute valid_path (the
4340 * current path of cur_ino) again because it
4341 * might contain the pre-orphanization name of
4342 * ow_inode, which is no longer valid.
4344 ret = is_ancestor(sctx->parent_root,
4346 sctx->cur_ino, NULL);
4348 orphanized_ancestor = true;
4349 fs_path_reset(valid_path);
4350 ret = get_cur_path(sctx, sctx->cur_ino,
4351 sctx->cur_inode_gen,
4358 * If we previously orphanized a directory that
4359 * collided with a new reference that we already
4360 * processed, recompute the current path because
4361 * that directory may be part of the path.
4363 if (orphanized_dir) {
4364 ret = refresh_ref_path(sctx, cur);
4368 ret = send_unlink(sctx, cur->full_path);
4376 list_for_each_entry(cur, &sctx->new_refs, list) {
4378 * We may have refs where the parent directory does not exist
4379 * yet. This happens if the parent directories inum is higher
4380 * than the current inum. To handle this case, we create the
4381 * parent directory out of order. But we need to check if this
4382 * did already happen before due to other refs in the same dir.
4384 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
4387 if (ret == inode_state_will_create) {
4390 * First check if any of the current inodes refs did
4391 * already create the dir.
4393 list_for_each_entry(cur2, &sctx->new_refs, list) {
4396 if (cur2->dir == cur->dir) {
4403 * If that did not happen, check if a previous inode
4404 * did already create the dir.
4407 ret = did_create_dir(sctx, cur->dir);
4411 ret = send_create_inode(sctx, cur->dir);
4414 cache_dir_created(sctx, cur->dir);
4418 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4419 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4428 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4430 ret = wait_for_parent_move(sctx, cur, is_orphan);
4440 * link/move the ref to the new place. If we have an orphan
4441 * inode, move it and update valid_path. If not, link or move
4442 * it depending on the inode mode.
4444 if (is_orphan && can_rename) {
4445 ret = send_rename(sctx, valid_path, cur->full_path);
4449 ret = fs_path_copy(valid_path, cur->full_path);
4452 } else if (can_rename) {
4453 if (S_ISDIR(sctx->cur_inode_mode)) {
4455 * Dirs can't be linked, so move it. For moved
4456 * dirs, we always have one new and one deleted
4457 * ref. The deleted ref is ignored later.
4459 ret = send_rename(sctx, valid_path,
4462 ret = fs_path_copy(valid_path,
4468 * We might have previously orphanized an inode
4469 * which is an ancestor of our current inode,
4470 * so our reference's full path, which was
4471 * computed before any such orphanizations, must
4474 if (orphanized_dir) {
4475 ret = update_ref_path(sctx, cur);
4479 ret = send_link(sctx, cur->full_path,
4485 ret = dup_ref(cur, &check_dirs);
4490 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4492 * Check if we can already rmdir the directory. If not,
4493 * orphanize it. For every dir item inside that gets deleted
4494 * later, we do this check again and rmdir it then if possible.
4495 * See the use of check_dirs for more details.
4497 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen);
4501 ret = send_rmdir(sctx, valid_path);
4504 } else if (!is_orphan) {
4505 ret = orphanize_inode(sctx, sctx->cur_ino,
4506 sctx->cur_inode_gen, valid_path);
4512 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4513 ret = dup_ref(cur, &check_dirs);
4517 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4518 !list_empty(&sctx->deleted_refs)) {
4520 * We have a moved dir. Add the old parent to check_dirs
4522 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4524 ret = dup_ref(cur, &check_dirs);
4527 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4529 * We have a non dir inode. Go through all deleted refs and
4530 * unlink them if they were not already overwritten by other
4533 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4534 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4535 sctx->cur_ino, sctx->cur_inode_gen,
4536 cur->name, cur->name_len);
4541 * If we orphanized any ancestor before, we need
4542 * to recompute the full path for deleted names,
4543 * since any such path was computed before we
4544 * processed any references and orphanized any
4547 if (orphanized_ancestor) {
4548 ret = update_ref_path(sctx, cur);
4552 ret = send_unlink(sctx, cur->full_path);
4556 ret = dup_ref(cur, &check_dirs);
4561 * If the inode is still orphan, unlink the orphan. This may
4562 * happen when a previous inode did overwrite the first ref
4563 * of this inode and no new refs were added for the current
4564 * inode. Unlinking does not mean that the inode is deleted in
4565 * all cases. There may still be links to this inode in other
4569 ret = send_unlink(sctx, valid_path);
4576 * We did collect all parent dirs where cur_inode was once located. We
4577 * now go through all these dirs and check if they are pending for
4578 * deletion and if it's finally possible to perform the rmdir now.
4579 * We also update the inode stats of the parent dirs here.
4581 list_for_each_entry(cur, &check_dirs, list) {
4583 * In case we had refs into dirs that were not processed yet,
4584 * we don't need to do the utime and rmdir logic for these dirs.
4585 * The dir will be processed later.
4587 if (cur->dir > sctx->cur_ino)
4590 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
4594 if (ret == inode_state_did_create ||
4595 ret == inode_state_no_change) {
4596 ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
4599 } else if (ret == inode_state_did_delete &&
4600 cur->dir != last_dir_ino_rm) {
4601 ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
4605 ret = get_cur_path(sctx, cur->dir,
4606 cur->dir_gen, valid_path);
4609 ret = send_rmdir(sctx, valid_path);
4612 last_dir_ino_rm = cur->dir;
4620 __free_recorded_refs(&check_dirs);
4621 free_recorded_refs(sctx);
4622 fs_path_free(valid_path);
4626 static int rbtree_ref_comp(const void *k, const struct rb_node *node)
4628 const struct recorded_ref *data = k;
4629 const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
4632 if (data->dir > ref->dir)
4634 if (data->dir < ref->dir)
4636 if (data->dir_gen > ref->dir_gen)
4638 if (data->dir_gen < ref->dir_gen)
4640 if (data->name_len > ref->name_len)
4642 if (data->name_len < ref->name_len)
4644 result = strcmp(data->name, ref->name);
4652 static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
4654 const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
4656 return rbtree_ref_comp(entry, parent) < 0;
4659 static int record_ref_in_tree(struct rb_root *root, struct list_head *refs,
4660 struct fs_path *name, u64 dir, u64 dir_gen,
4661 struct send_ctx *sctx)
4664 struct fs_path *path = NULL;
4665 struct recorded_ref *ref = NULL;
4667 path = fs_path_alloc();
4673 ref = recorded_ref_alloc();
4679 ret = get_cur_path(sctx, dir, dir_gen, path);
4682 ret = fs_path_add_path(path, name);
4687 ref->dir_gen = dir_gen;
4688 set_ref_path(ref, path);
4689 list_add_tail(&ref->list, refs);
4690 rb_add(&ref->node, root, rbtree_ref_less);
4694 if (path && (!ref || !ref->full_path))
4696 recorded_ref_free(ref);
4701 static int record_new_ref_if_needed(int num, u64 dir, int index,
4702 struct fs_path *name, void *ctx)
4705 struct send_ctx *sctx = ctx;
4706 struct rb_node *node = NULL;
4707 struct recorded_ref data;
4708 struct recorded_ref *ref;
4711 ret = get_inode_gen(sctx->send_root, dir, &dir_gen);
4716 data.dir_gen = dir_gen;
4717 set_ref_path(&data, name);
4718 node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp);
4720 ref = rb_entry(node, struct recorded_ref, node);
4721 recorded_ref_free(ref);
4723 ret = record_ref_in_tree(&sctx->rbtree_new_refs,
4724 &sctx->new_refs, name, dir, dir_gen,
4731 static int record_deleted_ref_if_needed(int num, u64 dir, int index,
4732 struct fs_path *name, void *ctx)
4735 struct send_ctx *sctx = ctx;
4736 struct rb_node *node = NULL;
4737 struct recorded_ref data;
4738 struct recorded_ref *ref;
4741 ret = get_inode_gen(sctx->parent_root, dir, &dir_gen);
4746 data.dir_gen = dir_gen;
4747 set_ref_path(&data, name);
4748 node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp);
4750 ref = rb_entry(node, struct recorded_ref, node);
4751 recorded_ref_free(ref);
4753 ret = record_ref_in_tree(&sctx->rbtree_deleted_refs,
4754 &sctx->deleted_refs, name, dir,
4761 static int record_new_ref(struct send_ctx *sctx)
4765 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4766 sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4775 static int record_deleted_ref(struct send_ctx *sctx)
4779 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4780 sctx->cmp_key, 0, record_deleted_ref_if_needed,
4790 static int record_changed_ref(struct send_ctx *sctx)
4794 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4795 sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4798 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4799 sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx);
4809 * Record and process all refs at once. Needed when an inode changes the
4810 * generation number, which means that it was deleted and recreated.
4812 static int process_all_refs(struct send_ctx *sctx,
4813 enum btrfs_compare_tree_result cmd)
4817 struct btrfs_root *root;
4818 struct btrfs_path *path;
4819 struct btrfs_key key;
4820 struct btrfs_key found_key;
4821 iterate_inode_ref_t cb;
4822 int pending_move = 0;
4824 path = alloc_path_for_send();
4828 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4829 root = sctx->send_root;
4830 cb = record_new_ref_if_needed;
4831 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4832 root = sctx->parent_root;
4833 cb = record_deleted_ref_if_needed;
4835 btrfs_err(sctx->send_root->fs_info,
4836 "Wrong command %d in process_all_refs", cmd);
4841 key.objectid = sctx->cmp_key->objectid;
4842 key.type = BTRFS_INODE_REF_KEY;
4844 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
4845 if (found_key.objectid != key.objectid ||
4846 (found_key.type != BTRFS_INODE_REF_KEY &&
4847 found_key.type != BTRFS_INODE_EXTREF_KEY))
4850 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4854 /* Catch error found during iteration */
4859 btrfs_release_path(path);
4862 * We don't actually care about pending_move as we are simply
4863 * re-creating this inode and will be rename'ing it into place once we
4864 * rename the parent directory.
4866 ret = process_recorded_refs(sctx, &pending_move);
4868 btrfs_free_path(path);
4872 static int send_set_xattr(struct send_ctx *sctx,
4873 struct fs_path *path,
4874 const char *name, int name_len,
4875 const char *data, int data_len)
4879 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4883 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4884 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4885 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4887 ret = send_cmd(sctx);
4894 static int send_remove_xattr(struct send_ctx *sctx,
4895 struct fs_path *path,
4896 const char *name, int name_len)
4900 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4904 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4905 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4907 ret = send_cmd(sctx);
4914 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4915 const char *name, int name_len, const char *data,
4916 int data_len, void *ctx)
4919 struct send_ctx *sctx = ctx;
4921 struct posix_acl_xattr_header dummy_acl;
4923 /* Capabilities are emitted by finish_inode_if_needed */
4924 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4927 p = fs_path_alloc();
4932 * This hack is needed because empty acls are stored as zero byte
4933 * data in xattrs. Problem with that is, that receiving these zero byte
4934 * acls will fail later. To fix this, we send a dummy acl list that
4935 * only contains the version number and no entries.
4937 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4938 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4939 if (data_len == 0) {
4940 dummy_acl.a_version =
4941 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4942 data = (char *)&dummy_acl;
4943 data_len = sizeof(dummy_acl);
4947 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4951 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4958 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4959 const char *name, int name_len,
4960 const char *data, int data_len, void *ctx)
4963 struct send_ctx *sctx = ctx;
4966 p = fs_path_alloc();
4970 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4974 ret = send_remove_xattr(sctx, p, name, name_len);
4981 static int process_new_xattr(struct send_ctx *sctx)
4985 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4986 __process_new_xattr, sctx);
4991 static int process_deleted_xattr(struct send_ctx *sctx)
4993 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4994 __process_deleted_xattr, sctx);
4997 struct find_xattr_ctx {
5005 static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
5006 int name_len, const char *data, int data_len, void *vctx)
5008 struct find_xattr_ctx *ctx = vctx;
5010 if (name_len == ctx->name_len &&
5011 strncmp(name, ctx->name, name_len) == 0) {
5012 ctx->found_idx = num;
5013 ctx->found_data_len = data_len;
5014 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
5015 if (!ctx->found_data)
5022 static int find_xattr(struct btrfs_root *root,
5023 struct btrfs_path *path,
5024 struct btrfs_key *key,
5025 const char *name, int name_len,
5026 char **data, int *data_len)
5029 struct find_xattr_ctx ctx;
5032 ctx.name_len = name_len;
5034 ctx.found_data = NULL;
5035 ctx.found_data_len = 0;
5037 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
5041 if (ctx.found_idx == -1)
5044 *data = ctx.found_data;
5045 *data_len = ctx.found_data_len;
5047 kfree(ctx.found_data);
5049 return ctx.found_idx;
5053 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
5054 const char *name, int name_len,
5055 const char *data, int data_len,
5059 struct send_ctx *sctx = ctx;
5060 char *found_data = NULL;
5061 int found_data_len = 0;
5063 ret = find_xattr(sctx->parent_root, sctx->right_path,
5064 sctx->cmp_key, name, name_len, &found_data,
5066 if (ret == -ENOENT) {
5067 ret = __process_new_xattr(num, di_key, name, name_len, data,
5069 } else if (ret >= 0) {
5070 if (data_len != found_data_len ||
5071 memcmp(data, found_data, data_len)) {
5072 ret = __process_new_xattr(num, di_key, name, name_len,
5073 data, data_len, ctx);
5083 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
5084 const char *name, int name_len,
5085 const char *data, int data_len,
5089 struct send_ctx *sctx = ctx;
5091 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
5092 name, name_len, NULL, NULL);
5094 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
5102 static int process_changed_xattr(struct send_ctx *sctx)
5106 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
5107 __process_changed_new_xattr, sctx);
5110 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
5111 __process_changed_deleted_xattr, sctx);
5117 static int process_all_new_xattrs(struct send_ctx *sctx)
5121 struct btrfs_root *root;
5122 struct btrfs_path *path;
5123 struct btrfs_key key;
5124 struct btrfs_key found_key;
5126 path = alloc_path_for_send();
5130 root = sctx->send_root;
5132 key.objectid = sctx->cmp_key->objectid;
5133 key.type = BTRFS_XATTR_ITEM_KEY;
5135 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
5136 if (found_key.objectid != key.objectid ||
5137 found_key.type != key.type) {
5142 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
5146 /* Catch error found during iteration */
5150 btrfs_free_path(path);
5154 static int send_verity(struct send_ctx *sctx, struct fs_path *path,
5155 struct fsverity_descriptor *desc)
5159 ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY);
5163 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
5164 TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM,
5165 le8_to_cpu(desc->hash_algorithm));
5166 TLV_PUT_U32(sctx, BTRFS_SEND_A_VERITY_BLOCK_SIZE,
5167 1U << le8_to_cpu(desc->log_blocksize));
5168 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SALT_DATA, desc->salt,
5169 le8_to_cpu(desc->salt_size));
5170 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SIG_DATA, desc->signature,
5171 le32_to_cpu(desc->sig_size));
5173 ret = send_cmd(sctx);
5180 static int process_verity(struct send_ctx *sctx)
5183 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5184 struct inode *inode;
5187 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, sctx->send_root);
5189 return PTR_ERR(inode);
5191 ret = btrfs_get_verity_descriptor(inode, NULL, 0);
5195 if (ret > FS_VERITY_MAX_DESCRIPTOR_SIZE) {
5199 if (!sctx->verity_descriptor) {
5200 sctx->verity_descriptor = kvmalloc(FS_VERITY_MAX_DESCRIPTOR_SIZE,
5202 if (!sctx->verity_descriptor) {
5208 ret = btrfs_get_verity_descriptor(inode, sctx->verity_descriptor, ret);
5212 p = fs_path_alloc();
5217 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5221 ret = send_verity(sctx, p, sctx->verity_descriptor);
5232 static inline u64 max_send_read_size(const struct send_ctx *sctx)
5234 return sctx->send_max_size - SZ_16K;
5237 static int put_data_header(struct send_ctx *sctx, u32 len)
5239 if (WARN_ON_ONCE(sctx->put_data))
5241 sctx->put_data = true;
5242 if (sctx->proto >= 2) {
5244 * Since v2, the data attribute header doesn't include a length,
5245 * it is implicitly to the end of the command.
5247 if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len)
5249 put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size);
5250 sctx->send_size += sizeof(__le16);
5252 struct btrfs_tlv_header *hdr;
5254 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
5256 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
5257 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
5258 put_unaligned_le16(len, &hdr->tlv_len);
5259 sctx->send_size += sizeof(*hdr);
5264 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
5266 struct btrfs_root *root = sctx->send_root;
5267 struct btrfs_fs_info *fs_info = root->fs_info;
5269 pgoff_t index = offset >> PAGE_SHIFT;
5271 unsigned pg_offset = offset_in_page(offset);
5274 ret = put_data_header(sctx, len);
5278 last_index = (offset + len - 1) >> PAGE_SHIFT;
5280 while (index <= last_index) {
5281 unsigned cur_len = min_t(unsigned, len,
5282 PAGE_SIZE - pg_offset);
5284 page = find_lock_page(sctx->cur_inode->i_mapping, index);
5286 page_cache_sync_readahead(sctx->cur_inode->i_mapping,
5287 &sctx->ra, NULL, index,
5288 last_index + 1 - index);
5290 page = find_or_create_page(sctx->cur_inode->i_mapping,
5298 if (PageReadahead(page))
5299 page_cache_async_readahead(sctx->cur_inode->i_mapping,
5300 &sctx->ra, NULL, page_folio(page),
5301 index, last_index + 1 - index);
5303 if (!PageUptodate(page)) {
5304 btrfs_read_folio(NULL, page_folio(page));
5306 if (!PageUptodate(page)) {
5309 "send: IO error at offset %llu for inode %llu root %llu",
5310 page_offset(page), sctx->cur_ino,
5311 sctx->send_root->root_key.objectid);
5318 memcpy_from_page(sctx->send_buf + sctx->send_size, page,
5319 pg_offset, cur_len);
5325 sctx->send_size += cur_len;
5332 * Read some bytes from the current inode/file and send a write command to
5335 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5337 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5341 p = fs_path_alloc();
5345 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5347 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5351 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5355 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5356 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5357 ret = put_file_data(sctx, offset, len);
5361 ret = send_cmd(sctx);
5370 * Send a clone command to user space.
5372 static int send_clone(struct send_ctx *sctx,
5373 u64 offset, u32 len,
5374 struct clone_root *clone_root)
5380 btrfs_debug(sctx->send_root->fs_info,
5381 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5382 offset, len, clone_root->root->root_key.objectid,
5383 clone_root->ino, clone_root->offset);
5385 p = fs_path_alloc();
5389 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5393 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5397 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5398 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5399 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5401 if (clone_root->root == sctx->send_root) {
5402 ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen);
5405 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5407 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5413 * If the parent we're using has a received_uuid set then use that as
5414 * our clone source as that is what we will look for when doing a
5417 * This covers the case that we create a snapshot off of a received
5418 * subvolume and then use that as the parent and try to receive on a
5421 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5422 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5423 clone_root->root->root_item.received_uuid);
5425 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5426 clone_root->root->root_item.uuid);
5427 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5428 btrfs_root_ctransid(&clone_root->root->root_item));
5429 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5430 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5431 clone_root->offset);
5433 ret = send_cmd(sctx);
5442 * Send an update extent command to user space.
5444 static int send_update_extent(struct send_ctx *sctx,
5445 u64 offset, u32 len)
5450 p = fs_path_alloc();
5454 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5458 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5462 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5463 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5464 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5466 ret = send_cmd(sctx);
5474 static int send_hole(struct send_ctx *sctx, u64 end)
5476 struct fs_path *p = NULL;
5477 u64 read_size = max_send_read_size(sctx);
5478 u64 offset = sctx->cur_inode_last_extent;
5482 * A hole that starts at EOF or beyond it. Since we do not yet support
5483 * fallocate (for extent preallocation and hole punching), sending a
5484 * write of zeroes starting at EOF or beyond would later require issuing
5485 * a truncate operation which would undo the write and achieve nothing.
5487 if (offset >= sctx->cur_inode_size)
5491 * Don't go beyond the inode's i_size due to prealloc extents that start
5494 end = min_t(u64, end, sctx->cur_inode_size);
5496 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5497 return send_update_extent(sctx, offset, end - offset);
5499 p = fs_path_alloc();
5502 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5504 goto tlv_put_failure;
5505 while (offset < end) {
5506 u64 len = min(end - offset, read_size);
5508 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5511 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5512 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5513 ret = put_data_header(sctx, len);
5516 memset(sctx->send_buf + sctx->send_size, 0, len);
5517 sctx->send_size += len;
5518 ret = send_cmd(sctx);
5523 sctx->cur_inode_next_write_offset = offset;
5529 static int send_encoded_inline_extent(struct send_ctx *sctx,
5530 struct btrfs_path *path, u64 offset,
5533 struct btrfs_root *root = sctx->send_root;
5534 struct btrfs_fs_info *fs_info = root->fs_info;
5535 struct inode *inode;
5536 struct fs_path *fspath;
5537 struct extent_buffer *leaf = path->nodes[0];
5538 struct btrfs_key key;
5539 struct btrfs_file_extent_item *ei;
5544 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5546 return PTR_ERR(inode);
5548 fspath = fs_path_alloc();
5554 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5558 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5562 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5563 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5564 ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei);
5565 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
5567 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5568 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5569 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5570 min(key.offset + ram_bytes - offset, len));
5571 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes);
5572 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset);
5573 ret = btrfs_encoded_io_compression_from_extent(fs_info,
5574 btrfs_file_extent_compression(leaf, ei));
5577 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5579 ret = put_data_header(sctx, inline_size);
5582 read_extent_buffer(leaf, sctx->send_buf + sctx->send_size,
5583 btrfs_file_extent_inline_start(ei), inline_size);
5584 sctx->send_size += inline_size;
5586 ret = send_cmd(sctx);
5590 fs_path_free(fspath);
5595 static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
5596 u64 offset, u64 len)
5598 struct btrfs_root *root = sctx->send_root;
5599 struct btrfs_fs_info *fs_info = root->fs_info;
5600 struct inode *inode;
5601 struct fs_path *fspath;
5602 struct extent_buffer *leaf = path->nodes[0];
5603 struct btrfs_key key;
5604 struct btrfs_file_extent_item *ei;
5605 u64 disk_bytenr, disk_num_bytes;
5607 struct btrfs_cmd_header *hdr;
5611 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5613 return PTR_ERR(inode);
5615 fspath = fs_path_alloc();
5621 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5625 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5629 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5630 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5631 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
5632 disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei);
5634 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5635 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5636 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5637 min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset,
5639 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN,
5640 btrfs_file_extent_ram_bytes(leaf, ei));
5641 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET,
5642 offset - key.offset + btrfs_file_extent_offset(leaf, ei));
5643 ret = btrfs_encoded_io_compression_from_extent(fs_info,
5644 btrfs_file_extent_compression(leaf, ei));
5647 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5648 TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0);
5650 ret = put_data_header(sctx, disk_num_bytes);
5655 * We want to do I/O directly into the send buffer, so get the next page
5656 * boundary in the send buffer. This means that there may be a gap
5657 * between the beginning of the command and the file data.
5659 data_offset = PAGE_ALIGN(sctx->send_size);
5660 if (data_offset > sctx->send_max_size ||
5661 sctx->send_max_size - data_offset < disk_num_bytes) {
5667 * Note that send_buf is a mapping of send_buf_pages, so this is really
5668 * reading into send_buf.
5670 ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
5671 disk_bytenr, disk_num_bytes,
5672 sctx->send_buf_pages +
5673 (data_offset >> PAGE_SHIFT));
5677 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
5678 hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
5680 crc = crc32c(0, sctx->send_buf, sctx->send_size);
5681 crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
5682 hdr->crc = cpu_to_le32(crc);
5684 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
5687 ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset,
5688 disk_num_bytes, &sctx->send_off);
5690 sctx->send_size = 0;
5691 sctx->put_data = false;
5695 fs_path_free(fspath);
5700 static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
5701 const u64 offset, const u64 len)
5703 const u64 end = offset + len;
5704 struct extent_buffer *leaf = path->nodes[0];
5705 struct btrfs_file_extent_item *ei;
5706 u64 read_size = max_send_read_size(sctx);
5709 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5710 return send_update_extent(sctx, offset, len);
5712 ei = btrfs_item_ptr(leaf, path->slots[0],
5713 struct btrfs_file_extent_item);
5714 if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
5715 btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
5716 bool is_inline = (btrfs_file_extent_type(leaf, ei) ==
5717 BTRFS_FILE_EXTENT_INLINE);
5720 * Send the compressed extent unless the compressed data is
5721 * larger than the decompressed data. This can happen if we're
5722 * not sending the entire extent, either because it has been
5723 * partially overwritten/truncated or because this is a part of
5724 * the extent that we couldn't clone in clone_range().
5727 btrfs_file_extent_inline_item_len(leaf,
5728 path->slots[0]) <= len) {
5729 return send_encoded_inline_extent(sctx, path, offset,
5731 } else if (!is_inline &&
5732 btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) {
5733 return send_encoded_extent(sctx, path, offset, len);
5737 if (sctx->cur_inode == NULL) {
5738 struct btrfs_root *root = sctx->send_root;
5740 sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root);
5741 if (IS_ERR(sctx->cur_inode)) {
5742 int err = PTR_ERR(sctx->cur_inode);
5744 sctx->cur_inode = NULL;
5747 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
5748 file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping);
5751 * It's very likely there are no pages from this inode in the page
5752 * cache, so after reading extents and sending their data, we clean
5753 * the page cache to avoid trashing the page cache (adding pressure
5754 * to the page cache and forcing eviction of other data more useful
5755 * for applications).
5757 * We decide if we should clean the page cache simply by checking
5758 * if the inode's mapping nrpages is 0 when we first open it, and
5759 * not by using something like filemap_range_has_page() before
5760 * reading an extent because when we ask the readahead code to
5761 * read a given file range, it may (and almost always does) read
5762 * pages from beyond that range (see the documentation for
5763 * page_cache_sync_readahead()), so it would not be reliable,
5764 * because after reading the first extent future calls to
5765 * filemap_range_has_page() would return true because the readahead
5766 * on the previous extent resulted in reading pages of the current
5769 sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0);
5770 sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE);
5773 while (sent < len) {
5774 u64 size = min(len - sent, read_size);
5777 ret = send_write(sctx, offset + sent, size);
5783 if (sctx->clean_page_cache && PAGE_ALIGNED(end)) {
5785 * Always operate only on ranges that are a multiple of the page
5786 * size. This is not only to prevent zeroing parts of a page in
5787 * the case of subpage sector size, but also to guarantee we evict
5788 * pages, as passing a range that is smaller than page size does
5789 * not evict the respective page (only zeroes part of its content).
5791 * Always start from the end offset of the last range cleared.
5792 * This is because the readahead code may (and very often does)
5793 * reads pages beyond the range we request for readahead. So if
5794 * we have an extent layout like this:
5796 * [ extent A ] [ extent B ] [ extent C ]
5798 * When we ask page_cache_sync_readahead() to read extent A, it
5799 * may also trigger reads for pages of extent B. If we are doing
5800 * an incremental send and extent B has not changed between the
5801 * parent and send snapshots, some or all of its pages may end
5802 * up being read and placed in the page cache. So when truncating
5803 * the page cache we always start from the end offset of the
5804 * previously processed extent up to the end of the current
5807 truncate_inode_pages_range(&sctx->cur_inode->i_data,
5808 sctx->page_cache_clear_start,
5810 sctx->page_cache_clear_start = end;
5817 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5818 * found, call send_set_xattr function to emit it.
5820 * Return 0 if there isn't a capability, or when the capability was emitted
5821 * successfully, or < 0 if an error occurred.
5823 static int send_capabilities(struct send_ctx *sctx)
5825 struct fs_path *fspath = NULL;
5826 struct btrfs_path *path;
5827 struct btrfs_dir_item *di;
5828 struct extent_buffer *leaf;
5829 unsigned long data_ptr;
5834 path = alloc_path_for_send();
5838 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5839 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5841 /* There is no xattr for this inode */
5843 } else if (IS_ERR(di)) {
5848 leaf = path->nodes[0];
5849 buf_len = btrfs_dir_data_len(leaf, di);
5851 fspath = fs_path_alloc();
5852 buf = kmalloc(buf_len, GFP_KERNEL);
5853 if (!fspath || !buf) {
5858 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5862 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5863 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5865 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5866 strlen(XATTR_NAME_CAPS), buf, buf_len);
5869 fs_path_free(fspath);
5870 btrfs_free_path(path);
5874 static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
5875 struct clone_root *clone_root, const u64 disk_byte,
5876 u64 data_offset, u64 offset, u64 len)
5878 struct btrfs_path *path;
5879 struct btrfs_key key;
5881 struct btrfs_inode_info info;
5882 u64 clone_src_i_size = 0;
5885 * Prevent cloning from a zero offset with a length matching the sector
5886 * size because in some scenarios this will make the receiver fail.
5888 * For example, if in the source filesystem the extent at offset 0
5889 * has a length of sectorsize and it was written using direct IO, then
5890 * it can never be an inline extent (even if compression is enabled).
5891 * Then this extent can be cloned in the original filesystem to a non
5892 * zero file offset, but it may not be possible to clone in the
5893 * destination filesystem because it can be inlined due to compression
5894 * on the destination filesystem (as the receiver's write operations are
5895 * always done using buffered IO). The same happens when the original
5896 * filesystem does not have compression enabled but the destination
5899 if (clone_root->offset == 0 &&
5900 len == sctx->send_root->fs_info->sectorsize)
5901 return send_extent_data(sctx, dst_path, offset, len);
5903 path = alloc_path_for_send();
5908 * There are inodes that have extents that lie behind its i_size. Don't
5909 * accept clones from these extents.
5911 ret = get_inode_info(clone_root->root, clone_root->ino, &info);
5912 btrfs_release_path(path);
5915 clone_src_i_size = info.size;
5918 * We can't send a clone operation for the entire range if we find
5919 * extent items in the respective range in the source file that
5920 * refer to different extents or if we find holes.
5921 * So check for that and do a mix of clone and regular write/copy
5922 * operations if needed.
5926 * mkfs.btrfs -f /dev/sda
5927 * mount /dev/sda /mnt
5928 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5929 * cp --reflink=always /mnt/foo /mnt/bar
5930 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5931 * btrfs subvolume snapshot -r /mnt /mnt/snap
5933 * If when we send the snapshot and we are processing file bar (which
5934 * has a higher inode number than foo) we blindly send a clone operation
5935 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5936 * a file bar that matches the content of file foo - iow, doesn't match
5937 * the content from bar in the original filesystem.
5939 key.objectid = clone_root->ino;
5940 key.type = BTRFS_EXTENT_DATA_KEY;
5941 key.offset = clone_root->offset;
5942 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5945 if (ret > 0 && path->slots[0] > 0) {
5946 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5947 if (key.objectid == clone_root->ino &&
5948 key.type == BTRFS_EXTENT_DATA_KEY)
5953 struct extent_buffer *leaf = path->nodes[0];
5954 int slot = path->slots[0];
5955 struct btrfs_file_extent_item *ei;
5959 u64 clone_data_offset;
5960 bool crossed_src_i_size = false;
5962 if (slot >= btrfs_header_nritems(leaf)) {
5963 ret = btrfs_next_leaf(clone_root->root, path);
5971 btrfs_item_key_to_cpu(leaf, &key, slot);
5974 * We might have an implicit trailing hole (NO_HOLES feature
5975 * enabled). We deal with it after leaving this loop.
5977 if (key.objectid != clone_root->ino ||
5978 key.type != BTRFS_EXTENT_DATA_KEY)
5981 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5982 type = btrfs_file_extent_type(leaf, ei);
5983 if (type == BTRFS_FILE_EXTENT_INLINE) {
5984 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5985 ext_len = PAGE_ALIGN(ext_len);
5987 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5990 if (key.offset + ext_len <= clone_root->offset)
5993 if (key.offset > clone_root->offset) {
5994 /* Implicit hole, NO_HOLES feature enabled. */
5995 u64 hole_len = key.offset - clone_root->offset;
5999 ret = send_extent_data(sctx, dst_path, offset,
6008 clone_root->offset += hole_len;
6009 data_offset += hole_len;
6012 if (key.offset >= clone_root->offset + len)
6015 if (key.offset >= clone_src_i_size)
6018 if (key.offset + ext_len > clone_src_i_size) {
6019 ext_len = clone_src_i_size - key.offset;
6020 crossed_src_i_size = true;
6023 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
6024 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
6025 clone_root->offset = key.offset;
6026 if (clone_data_offset < data_offset &&
6027 clone_data_offset + ext_len > data_offset) {
6030 extent_offset = data_offset - clone_data_offset;
6031 ext_len -= extent_offset;
6032 clone_data_offset += extent_offset;
6033 clone_root->offset += extent_offset;
6037 clone_len = min_t(u64, ext_len, len);
6039 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
6040 clone_data_offset == data_offset) {
6041 const u64 src_end = clone_root->offset + clone_len;
6042 const u64 sectorsize = SZ_64K;
6045 * We can't clone the last block, when its size is not
6046 * sector size aligned, into the middle of a file. If we
6047 * do so, the receiver will get a failure (-EINVAL) when
6048 * trying to clone or will silently corrupt the data in
6049 * the destination file if it's on a kernel without the
6050 * fix introduced by commit ac765f83f1397646
6051 * ("Btrfs: fix data corruption due to cloning of eof
6054 * So issue a clone of the aligned down range plus a
6055 * regular write for the eof block, if we hit that case.
6057 * Also, we use the maximum possible sector size, 64K,
6058 * because we don't know what's the sector size of the
6059 * filesystem that receives the stream, so we have to
6060 * assume the largest possible sector size.
6062 if (src_end == clone_src_i_size &&
6063 !IS_ALIGNED(src_end, sectorsize) &&
6064 offset + clone_len < sctx->cur_inode_size) {
6067 slen = ALIGN_DOWN(src_end - clone_root->offset,
6070 ret = send_clone(sctx, offset, slen,
6075 ret = send_extent_data(sctx, dst_path,
6079 ret = send_clone(sctx, offset, clone_len,
6082 } else if (crossed_src_i_size && clone_len < len) {
6084 * If we are at i_size of the clone source inode and we
6085 * can not clone from it, terminate the loop. This is
6086 * to avoid sending two write operations, one with a
6087 * length matching clone_len and the final one after
6088 * this loop with a length of len - clone_len.
6090 * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
6091 * was passed to the send ioctl), this helps avoid
6092 * sending an encoded write for an offset that is not
6093 * sector size aligned, in case the i_size of the source
6094 * inode is not sector size aligned. That will make the
6095 * receiver fallback to decompression of the data and
6096 * writing it using regular buffered IO, therefore while
6097 * not incorrect, it's not optimal due decompression and
6098 * possible re-compression at the receiver.
6102 ret = send_extent_data(sctx, dst_path, offset,
6112 offset += clone_len;
6113 clone_root->offset += clone_len;
6116 * If we are cloning from the file we are currently processing,
6117 * and using the send root as the clone root, we must stop once
6118 * the current clone offset reaches the current eof of the file
6119 * at the receiver, otherwise we would issue an invalid clone
6120 * operation (source range going beyond eof) and cause the
6121 * receiver to fail. So if we reach the current eof, bail out
6122 * and fallback to a regular write.
6124 if (clone_root->root == sctx->send_root &&
6125 clone_root->ino == sctx->cur_ino &&
6126 clone_root->offset >= sctx->cur_inode_next_write_offset)
6129 data_offset += clone_len;
6135 ret = send_extent_data(sctx, dst_path, offset, len);
6139 btrfs_free_path(path);
6143 static int send_write_or_clone(struct send_ctx *sctx,
6144 struct btrfs_path *path,
6145 struct btrfs_key *key,
6146 struct clone_root *clone_root)
6149 u64 offset = key->offset;
6151 u64 bs = sctx->send_root->fs_info->sectorsize;
6153 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
6157 if (clone_root && IS_ALIGNED(end, bs)) {
6158 struct btrfs_file_extent_item *ei;
6162 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6163 struct btrfs_file_extent_item);
6164 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
6165 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
6166 ret = clone_range(sctx, path, clone_root, disk_byte,
6167 data_offset, offset, end - offset);
6169 ret = send_extent_data(sctx, path, offset, end - offset);
6171 sctx->cur_inode_next_write_offset = end;
6175 static int is_extent_unchanged(struct send_ctx *sctx,
6176 struct btrfs_path *left_path,
6177 struct btrfs_key *ekey)
6180 struct btrfs_key key;
6181 struct btrfs_path *path = NULL;
6182 struct extent_buffer *eb;
6184 struct btrfs_key found_key;
6185 struct btrfs_file_extent_item *ei;
6190 u64 left_offset_fixed;
6198 path = alloc_path_for_send();
6202 eb = left_path->nodes[0];
6203 slot = left_path->slots[0];
6204 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
6205 left_type = btrfs_file_extent_type(eb, ei);
6207 if (left_type != BTRFS_FILE_EXTENT_REG) {
6211 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
6212 left_len = btrfs_file_extent_num_bytes(eb, ei);
6213 left_offset = btrfs_file_extent_offset(eb, ei);
6214 left_gen = btrfs_file_extent_generation(eb, ei);
6217 * Following comments will refer to these graphics. L is the left
6218 * extents which we are checking at the moment. 1-8 are the right
6219 * extents that we iterate.
6222 * |-1-|-2a-|-3-|-4-|-5-|-6-|
6225 * |--1--|-2b-|...(same as above)
6227 * Alternative situation. Happens on files where extents got split.
6229 * |-----------7-----------|-6-|
6231 * Alternative situation. Happens on files which got larger.
6234 * Nothing follows after 8.
6237 key.objectid = ekey->objectid;
6238 key.type = BTRFS_EXTENT_DATA_KEY;
6239 key.offset = ekey->offset;
6240 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
6249 * Handle special case where the right side has no extents at all.
6251 eb = path->nodes[0];
6252 slot = path->slots[0];
6253 btrfs_item_key_to_cpu(eb, &found_key, slot);
6254 if (found_key.objectid != key.objectid ||
6255 found_key.type != key.type) {
6256 /* If we're a hole then just pretend nothing changed */
6257 ret = (left_disknr) ? 0 : 1;
6262 * We're now on 2a, 2b or 7.
6265 while (key.offset < ekey->offset + left_len) {
6266 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
6267 right_type = btrfs_file_extent_type(eb, ei);
6268 if (right_type != BTRFS_FILE_EXTENT_REG &&
6269 right_type != BTRFS_FILE_EXTENT_INLINE) {
6274 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
6275 right_len = btrfs_file_extent_ram_bytes(eb, ei);
6276 right_len = PAGE_ALIGN(right_len);
6278 right_len = btrfs_file_extent_num_bytes(eb, ei);
6282 * Are we at extent 8? If yes, we know the extent is changed.
6283 * This may only happen on the first iteration.
6285 if (found_key.offset + right_len <= ekey->offset) {
6286 /* If we're a hole just pretend nothing changed */
6287 ret = (left_disknr) ? 0 : 1;
6292 * We just wanted to see if when we have an inline extent, what
6293 * follows it is a regular extent (wanted to check the above
6294 * condition for inline extents too). This should normally not
6295 * happen but it's possible for example when we have an inline
6296 * compressed extent representing data with a size matching
6297 * the page size (currently the same as sector size).
6299 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
6304 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
6305 right_offset = btrfs_file_extent_offset(eb, ei);
6306 right_gen = btrfs_file_extent_generation(eb, ei);
6308 left_offset_fixed = left_offset;
6309 if (key.offset < ekey->offset) {
6310 /* Fix the right offset for 2a and 7. */
6311 right_offset += ekey->offset - key.offset;
6313 /* Fix the left offset for all behind 2a and 2b */
6314 left_offset_fixed += key.offset - ekey->offset;
6318 * Check if we have the same extent.
6320 if (left_disknr != right_disknr ||
6321 left_offset_fixed != right_offset ||
6322 left_gen != right_gen) {
6328 * Go to the next extent.
6330 ret = btrfs_next_item(sctx->parent_root, path);
6334 eb = path->nodes[0];
6335 slot = path->slots[0];
6336 btrfs_item_key_to_cpu(eb, &found_key, slot);
6338 if (ret || found_key.objectid != key.objectid ||
6339 found_key.type != key.type) {
6340 key.offset += right_len;
6343 if (found_key.offset != key.offset + right_len) {
6351 * We're now behind the left extent (treat as unchanged) or at the end
6352 * of the right side (treat as changed).
6354 if (key.offset >= ekey->offset + left_len)
6361 btrfs_free_path(path);
6365 static int get_last_extent(struct send_ctx *sctx, u64 offset)
6367 struct btrfs_path *path;
6368 struct btrfs_root *root = sctx->send_root;
6369 struct btrfs_key key;
6372 path = alloc_path_for_send();
6376 sctx->cur_inode_last_extent = 0;
6378 key.objectid = sctx->cur_ino;
6379 key.type = BTRFS_EXTENT_DATA_KEY;
6380 key.offset = offset;
6381 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
6385 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6386 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
6389 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6391 btrfs_free_path(path);
6395 static int range_is_hole_in_parent(struct send_ctx *sctx,
6399 struct btrfs_path *path;
6400 struct btrfs_key key;
6401 struct btrfs_root *root = sctx->parent_root;
6402 u64 search_start = start;
6405 path = alloc_path_for_send();
6409 key.objectid = sctx->cur_ino;
6410 key.type = BTRFS_EXTENT_DATA_KEY;
6411 key.offset = search_start;
6412 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6415 if (ret > 0 && path->slots[0] > 0)
6418 while (search_start < end) {
6419 struct extent_buffer *leaf = path->nodes[0];
6420 int slot = path->slots[0];
6421 struct btrfs_file_extent_item *fi;
6424 if (slot >= btrfs_header_nritems(leaf)) {
6425 ret = btrfs_next_leaf(root, path);
6433 btrfs_item_key_to_cpu(leaf, &key, slot);
6434 if (key.objectid < sctx->cur_ino ||
6435 key.type < BTRFS_EXTENT_DATA_KEY)
6437 if (key.objectid > sctx->cur_ino ||
6438 key.type > BTRFS_EXTENT_DATA_KEY ||
6442 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6443 extent_end = btrfs_file_extent_end(path);
6444 if (extent_end <= start)
6446 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
6447 search_start = extent_end;
6457 btrfs_free_path(path);
6461 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
6462 struct btrfs_key *key)
6466 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
6469 if (sctx->cur_inode_last_extent == (u64)-1) {
6470 ret = get_last_extent(sctx, key->offset - 1);
6475 if (path->slots[0] == 0 &&
6476 sctx->cur_inode_last_extent < key->offset) {
6478 * We might have skipped entire leafs that contained only
6479 * file extent items for our current inode. These leafs have
6480 * a generation number smaller (older) than the one in the
6481 * current leaf and the leaf our last extent came from, and
6482 * are located between these 2 leafs.
6484 ret = get_last_extent(sctx, key->offset - 1);
6489 if (sctx->cur_inode_last_extent < key->offset) {
6490 ret = range_is_hole_in_parent(sctx,
6491 sctx->cur_inode_last_extent,
6496 ret = send_hole(sctx, key->offset);
6500 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6504 static int process_extent(struct send_ctx *sctx,
6505 struct btrfs_path *path,
6506 struct btrfs_key *key)
6508 struct clone_root *found_clone = NULL;
6511 if (S_ISLNK(sctx->cur_inode_mode))
6514 if (sctx->parent_root && !sctx->cur_inode_new) {
6515 ret = is_extent_unchanged(sctx, path, key);
6523 struct btrfs_file_extent_item *ei;
6526 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6527 struct btrfs_file_extent_item);
6528 type = btrfs_file_extent_type(path->nodes[0], ei);
6529 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
6530 type == BTRFS_FILE_EXTENT_REG) {
6532 * The send spec does not have a prealloc command yet,
6533 * so just leave a hole for prealloc'ed extents until
6534 * we have enough commands queued up to justify rev'ing
6537 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
6542 /* Have a hole, just skip it. */
6543 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
6550 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
6551 sctx->cur_inode_size, &found_clone);
6552 if (ret != -ENOENT && ret < 0)
6555 ret = send_write_or_clone(sctx, path, key, found_clone);
6559 ret = maybe_send_hole(sctx, path, key);
6564 static int process_all_extents(struct send_ctx *sctx)
6568 struct btrfs_root *root;
6569 struct btrfs_path *path;
6570 struct btrfs_key key;
6571 struct btrfs_key found_key;
6573 root = sctx->send_root;
6574 path = alloc_path_for_send();
6578 key.objectid = sctx->cmp_key->objectid;
6579 key.type = BTRFS_EXTENT_DATA_KEY;
6581 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
6582 if (found_key.objectid != key.objectid ||
6583 found_key.type != key.type) {
6588 ret = process_extent(sctx, path, &found_key);
6592 /* Catch error found during iteration */
6596 btrfs_free_path(path);
6600 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6602 int *refs_processed)
6606 if (sctx->cur_ino == 0)
6608 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6609 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6611 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6614 ret = process_recorded_refs(sctx, pending_move);
6618 *refs_processed = 1;
6623 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6626 struct btrfs_inode_info info;
6637 bool need_fileattr = false;
6638 int need_truncate = 1;
6639 int pending_move = 0;
6640 int refs_processed = 0;
6642 if (sctx->ignore_cur_inode)
6645 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6651 * We have processed the refs and thus need to advance send_progress.
6652 * Now, calls to get_cur_xxx will take the updated refs of the current
6653 * inode into account.
6655 * On the other hand, if our current inode is a directory and couldn't
6656 * be moved/renamed because its parent was renamed/moved too and it has
6657 * a higher inode number, we can only move/rename our current inode
6658 * after we moved/renamed its parent. Therefore in this case operate on
6659 * the old path (pre move/rename) of our current inode, and the
6660 * move/rename will be performed later.
6662 if (refs_processed && !pending_move)
6663 sctx->send_progress = sctx->cur_ino + 1;
6665 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6667 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6669 ret = get_inode_info(sctx->send_root, sctx->cur_ino, &info);
6672 left_mode = info.mode;
6673 left_uid = info.uid;
6674 left_gid = info.gid;
6675 left_fileattr = info.fileattr;
6677 if (!sctx->parent_root || sctx->cur_inode_new) {
6679 if (!S_ISLNK(sctx->cur_inode_mode))
6681 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6686 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, &info);
6689 old_size = info.size;
6690 right_mode = info.mode;
6691 right_uid = info.uid;
6692 right_gid = info.gid;
6693 right_fileattr = info.fileattr;
6695 if (left_uid != right_uid || left_gid != right_gid)
6697 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6699 if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr)
6700 need_fileattr = true;
6701 if ((old_size == sctx->cur_inode_size) ||
6702 (sctx->cur_inode_size > old_size &&
6703 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6707 if (S_ISREG(sctx->cur_inode_mode)) {
6708 if (need_send_hole(sctx)) {
6709 if (sctx->cur_inode_last_extent == (u64)-1 ||
6710 sctx->cur_inode_last_extent <
6711 sctx->cur_inode_size) {
6712 ret = get_last_extent(sctx, (u64)-1);
6716 if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
6717 ret = range_is_hole_in_parent(sctx,
6718 sctx->cur_inode_last_extent,
6719 sctx->cur_inode_size);
6722 } else if (ret == 0) {
6723 ret = send_hole(sctx, sctx->cur_inode_size);
6727 /* Range is already a hole, skip. */
6732 if (need_truncate) {
6733 ret = send_truncate(sctx, sctx->cur_ino,
6734 sctx->cur_inode_gen,
6735 sctx->cur_inode_size);
6742 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6743 left_uid, left_gid);
6748 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6753 if (need_fileattr) {
6754 ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6760 if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY)
6761 && sctx->cur_inode_needs_verity) {
6762 ret = process_verity(sctx);
6767 ret = send_capabilities(sctx);
6772 * If other directory inodes depended on our current directory
6773 * inode's move/rename, now do their move/rename operations.
6775 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6776 ret = apply_children_dir_moves(sctx);
6780 * Need to send that every time, no matter if it actually
6781 * changed between the two trees as we have done changes to
6782 * the inode before. If our inode is a directory and it's
6783 * waiting to be moved/renamed, we will send its utimes when
6784 * it's moved/renamed, therefore we don't need to do it here.
6786 sctx->send_progress = sctx->cur_ino + 1;
6789 * If the current inode is a non-empty directory, delay issuing
6790 * the utimes command for it, as it's very likely we have inodes
6791 * with an higher number inside it. We want to issue the utimes
6792 * command only after adding all dentries to it.
6794 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_size > 0)
6795 ret = cache_dir_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6797 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6805 ret = trim_dir_utimes_cache(sctx);
6810 static void close_current_inode(struct send_ctx *sctx)
6814 if (sctx->cur_inode == NULL)
6817 i_size = i_size_read(sctx->cur_inode);
6820 * If we are doing an incremental send, we may have extents between the
6821 * last processed extent and the i_size that have not been processed
6822 * because they haven't changed but we may have read some of their pages
6823 * through readahead, see the comments at send_extent_data().
6825 if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size)
6826 truncate_inode_pages_range(&sctx->cur_inode->i_data,
6827 sctx->page_cache_clear_start,
6828 round_up(i_size, PAGE_SIZE) - 1);
6830 iput(sctx->cur_inode);
6831 sctx->cur_inode = NULL;
6834 static int changed_inode(struct send_ctx *sctx,
6835 enum btrfs_compare_tree_result result)
6838 struct btrfs_key *key = sctx->cmp_key;
6839 struct btrfs_inode_item *left_ii = NULL;
6840 struct btrfs_inode_item *right_ii = NULL;
6844 close_current_inode(sctx);
6846 sctx->cur_ino = key->objectid;
6847 sctx->cur_inode_new_gen = false;
6848 sctx->cur_inode_last_extent = (u64)-1;
6849 sctx->cur_inode_next_write_offset = 0;
6850 sctx->ignore_cur_inode = false;
6853 * Set send_progress to current inode. This will tell all get_cur_xxx
6854 * functions that the current inode's refs are not updated yet. Later,
6855 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6857 sctx->send_progress = sctx->cur_ino;
6859 if (result == BTRFS_COMPARE_TREE_NEW ||
6860 result == BTRFS_COMPARE_TREE_CHANGED) {
6861 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6862 sctx->left_path->slots[0],
6863 struct btrfs_inode_item);
6864 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6867 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6868 sctx->right_path->slots[0],
6869 struct btrfs_inode_item);
6870 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6873 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6874 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6875 sctx->right_path->slots[0],
6876 struct btrfs_inode_item);
6878 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6882 * The cur_ino = root dir case is special here. We can't treat
6883 * the inode as deleted+reused because it would generate a
6884 * stream that tries to delete/mkdir the root dir.
6886 if (left_gen != right_gen &&
6887 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6888 sctx->cur_inode_new_gen = true;
6892 * Normally we do not find inodes with a link count of zero (orphans)
6893 * because the most common case is to create a snapshot and use it
6894 * for a send operation. However other less common use cases involve
6895 * using a subvolume and send it after turning it to RO mode just
6896 * after deleting all hard links of a file while holding an open
6897 * file descriptor against it or turning a RO snapshot into RW mode,
6898 * keep an open file descriptor against a file, delete it and then
6899 * turn the snapshot back to RO mode before using it for a send
6900 * operation. The former is what the receiver operation does.
6901 * Therefore, if we want to send these snapshots soon after they're
6902 * received, we need to handle orphan inodes as well. Moreover, orphans
6903 * can appear not only in the send snapshot but also in the parent
6904 * snapshot. Here are several cases:
6906 * Case 1: BTRFS_COMPARE_TREE_NEW
6907 * | send snapshot | action
6908 * --------------------------------
6909 * nlink | 0 | ignore
6911 * Case 2: BTRFS_COMPARE_TREE_DELETED
6912 * | parent snapshot | action
6913 * ----------------------------------
6914 * nlink | 0 | as usual
6915 * Note: No unlinks will be sent because there're no paths for it.
6917 * Case 3: BTRFS_COMPARE_TREE_CHANGED
6918 * | | parent snapshot | send snapshot | action
6919 * -----------------------------------------------------------------------
6920 * subcase 1 | nlink | 0 | 0 | ignore
6921 * subcase 2 | nlink | >0 | 0 | new_gen(deletion)
6922 * subcase 3 | nlink | 0 | >0 | new_gen(creation)
6925 if (result == BTRFS_COMPARE_TREE_NEW) {
6926 if (btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii) == 0) {
6927 sctx->ignore_cur_inode = true;
6930 sctx->cur_inode_gen = left_gen;
6931 sctx->cur_inode_new = true;
6932 sctx->cur_inode_deleted = false;
6933 sctx->cur_inode_size = btrfs_inode_size(
6934 sctx->left_path->nodes[0], left_ii);
6935 sctx->cur_inode_mode = btrfs_inode_mode(
6936 sctx->left_path->nodes[0], left_ii);
6937 sctx->cur_inode_rdev = btrfs_inode_rdev(
6938 sctx->left_path->nodes[0], left_ii);
6939 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6940 ret = send_create_inode_if_needed(sctx);
6941 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6942 sctx->cur_inode_gen = right_gen;
6943 sctx->cur_inode_new = false;
6944 sctx->cur_inode_deleted = true;
6945 sctx->cur_inode_size = btrfs_inode_size(
6946 sctx->right_path->nodes[0], right_ii);
6947 sctx->cur_inode_mode = btrfs_inode_mode(
6948 sctx->right_path->nodes[0], right_ii);
6949 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6950 u32 new_nlinks, old_nlinks;
6952 new_nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6953 old_nlinks = btrfs_inode_nlink(sctx->right_path->nodes[0], right_ii);
6954 if (new_nlinks == 0 && old_nlinks == 0) {
6955 sctx->ignore_cur_inode = true;
6957 } else if (new_nlinks == 0 || old_nlinks == 0) {
6958 sctx->cur_inode_new_gen = 1;
6961 * We need to do some special handling in case the inode was
6962 * reported as changed with a changed generation number. This
6963 * means that the original inode was deleted and new inode
6964 * reused the same inum. So we have to treat the old inode as
6965 * deleted and the new one as new.
6967 if (sctx->cur_inode_new_gen) {
6969 * First, process the inode as if it was deleted.
6971 if (old_nlinks > 0) {
6972 sctx->cur_inode_gen = right_gen;
6973 sctx->cur_inode_new = false;
6974 sctx->cur_inode_deleted = true;
6975 sctx->cur_inode_size = btrfs_inode_size(
6976 sctx->right_path->nodes[0], right_ii);
6977 sctx->cur_inode_mode = btrfs_inode_mode(
6978 sctx->right_path->nodes[0], right_ii);
6979 ret = process_all_refs(sctx,
6980 BTRFS_COMPARE_TREE_DELETED);
6986 * Now process the inode as if it was new.
6988 if (new_nlinks > 0) {
6989 sctx->cur_inode_gen = left_gen;
6990 sctx->cur_inode_new = true;
6991 sctx->cur_inode_deleted = false;
6992 sctx->cur_inode_size = btrfs_inode_size(
6993 sctx->left_path->nodes[0],
6995 sctx->cur_inode_mode = btrfs_inode_mode(
6996 sctx->left_path->nodes[0],
6998 sctx->cur_inode_rdev = btrfs_inode_rdev(
6999 sctx->left_path->nodes[0],
7001 ret = send_create_inode_if_needed(sctx);
7005 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
7009 * Advance send_progress now as we did not get
7010 * into process_recorded_refs_if_needed in the
7013 sctx->send_progress = sctx->cur_ino + 1;
7016 * Now process all extents and xattrs of the
7017 * inode as if they were all new.
7019 ret = process_all_extents(sctx);
7022 ret = process_all_new_xattrs(sctx);
7027 sctx->cur_inode_gen = left_gen;
7028 sctx->cur_inode_new = false;
7029 sctx->cur_inode_new_gen = false;
7030 sctx->cur_inode_deleted = false;
7031 sctx->cur_inode_size = btrfs_inode_size(
7032 sctx->left_path->nodes[0], left_ii);
7033 sctx->cur_inode_mode = btrfs_inode_mode(
7034 sctx->left_path->nodes[0], left_ii);
7043 * We have to process new refs before deleted refs, but compare_trees gives us
7044 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
7045 * first and later process them in process_recorded_refs.
7046 * For the cur_inode_new_gen case, we skip recording completely because
7047 * changed_inode did already initiate processing of refs. The reason for this is
7048 * that in this case, compare_tree actually compares the refs of 2 different
7049 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
7050 * refs of the right tree as deleted and all refs of the left tree as new.
7052 static int changed_ref(struct send_ctx *sctx,
7053 enum btrfs_compare_tree_result result)
7057 if (sctx->cur_ino != sctx->cmp_key->objectid) {
7058 inconsistent_snapshot_error(sctx, result, "reference");
7062 if (!sctx->cur_inode_new_gen &&
7063 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
7064 if (result == BTRFS_COMPARE_TREE_NEW)
7065 ret = record_new_ref(sctx);
7066 else if (result == BTRFS_COMPARE_TREE_DELETED)
7067 ret = record_deleted_ref(sctx);
7068 else if (result == BTRFS_COMPARE_TREE_CHANGED)
7069 ret = record_changed_ref(sctx);
7076 * Process new/deleted/changed xattrs. We skip processing in the
7077 * cur_inode_new_gen case because changed_inode did already initiate processing
7078 * of xattrs. The reason is the same as in changed_ref
7080 static int changed_xattr(struct send_ctx *sctx,
7081 enum btrfs_compare_tree_result result)
7085 if (sctx->cur_ino != sctx->cmp_key->objectid) {
7086 inconsistent_snapshot_error(sctx, result, "xattr");
7090 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7091 if (result == BTRFS_COMPARE_TREE_NEW)
7092 ret = process_new_xattr(sctx);
7093 else if (result == BTRFS_COMPARE_TREE_DELETED)
7094 ret = process_deleted_xattr(sctx);
7095 else if (result == BTRFS_COMPARE_TREE_CHANGED)
7096 ret = process_changed_xattr(sctx);
7103 * Process new/deleted/changed extents. We skip processing in the
7104 * cur_inode_new_gen case because changed_inode did already initiate processing
7105 * of extents. The reason is the same as in changed_ref
7107 static int changed_extent(struct send_ctx *sctx,
7108 enum btrfs_compare_tree_result result)
7113 * We have found an extent item that changed without the inode item
7114 * having changed. This can happen either after relocation (where the
7115 * disk_bytenr of an extent item is replaced at
7116 * relocation.c:replace_file_extents()) or after deduplication into a
7117 * file in both the parent and send snapshots (where an extent item can
7118 * get modified or replaced with a new one). Note that deduplication
7119 * updates the inode item, but it only changes the iversion (sequence
7120 * field in the inode item) of the inode, so if a file is deduplicated
7121 * the same amount of times in both the parent and send snapshots, its
7122 * iversion becomes the same in both snapshots, whence the inode item is
7123 * the same on both snapshots.
7125 if (sctx->cur_ino != sctx->cmp_key->objectid)
7128 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7129 if (result != BTRFS_COMPARE_TREE_DELETED)
7130 ret = process_extent(sctx, sctx->left_path,
7137 static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result)
7141 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
7142 if (result == BTRFS_COMPARE_TREE_NEW)
7143 sctx->cur_inode_needs_verity = true;
7148 static int dir_changed(struct send_ctx *sctx, u64 dir)
7150 u64 orig_gen, new_gen;
7153 ret = get_inode_gen(sctx->send_root, dir, &new_gen);
7157 ret = get_inode_gen(sctx->parent_root, dir, &orig_gen);
7161 return (orig_gen != new_gen) ? 1 : 0;
7164 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
7165 struct btrfs_key *key)
7167 struct btrfs_inode_extref *extref;
7168 struct extent_buffer *leaf;
7169 u64 dirid = 0, last_dirid = 0;
7176 /* Easy case, just check this one dirid */
7177 if (key->type == BTRFS_INODE_REF_KEY) {
7178 dirid = key->offset;
7180 ret = dir_changed(sctx, dirid);
7184 leaf = path->nodes[0];
7185 item_size = btrfs_item_size(leaf, path->slots[0]);
7186 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
7187 while (cur_offset < item_size) {
7188 extref = (struct btrfs_inode_extref *)(ptr +
7190 dirid = btrfs_inode_extref_parent(leaf, extref);
7191 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
7192 cur_offset += ref_name_len + sizeof(*extref);
7193 if (dirid == last_dirid)
7195 ret = dir_changed(sctx, dirid);
7205 * Updates compare related fields in sctx and simply forwards to the actual
7206 * changed_xxx functions.
7208 static int changed_cb(struct btrfs_path *left_path,
7209 struct btrfs_path *right_path,
7210 struct btrfs_key *key,
7211 enum btrfs_compare_tree_result result,
7212 struct send_ctx *sctx)
7217 * We can not hold the commit root semaphore here. This is because in
7218 * the case of sending and receiving to the same filesystem, using a
7219 * pipe, could result in a deadlock:
7221 * 1) The task running send blocks on the pipe because it's full;
7223 * 2) The task running receive, which is the only consumer of the pipe,
7224 * is waiting for a transaction commit (for example due to a space
7225 * reservation when doing a write or triggering a transaction commit
7226 * when creating a subvolume);
7228 * 3) The transaction is waiting to write lock the commit root semaphore,
7229 * but can not acquire it since it's being held at 1).
7231 * Down this call chain we write to the pipe through kernel_write().
7232 * The same type of problem can also happen when sending to a file that
7233 * is stored in the same filesystem - when reserving space for a write
7234 * into the file, we can trigger a transaction commit.
7236 * Our caller has supplied us with clones of leaves from the send and
7237 * parent roots, so we're safe here from a concurrent relocation and
7238 * further reallocation of metadata extents while we are here. Below we
7239 * also assert that the leaves are clones.
7241 lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
7244 * We always have a send root, so left_path is never NULL. We will not
7245 * have a leaf when we have reached the end of the send root but have
7246 * not yet reached the end of the parent root.
7248 if (left_path->nodes[0])
7249 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
7250 &left_path->nodes[0]->bflags));
7252 * When doing a full send we don't have a parent root, so right_path is
7253 * NULL. When doing an incremental send, we may have reached the end of
7254 * the parent root already, so we don't have a leaf at right_path.
7256 if (right_path && right_path->nodes[0])
7257 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
7258 &right_path->nodes[0]->bflags));
7260 if (result == BTRFS_COMPARE_TREE_SAME) {
7261 if (key->type == BTRFS_INODE_REF_KEY ||
7262 key->type == BTRFS_INODE_EXTREF_KEY) {
7263 ret = compare_refs(sctx, left_path, key);
7268 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
7269 return maybe_send_hole(sctx, left_path, key);
7273 result = BTRFS_COMPARE_TREE_CHANGED;
7277 sctx->left_path = left_path;
7278 sctx->right_path = right_path;
7279 sctx->cmp_key = key;
7281 ret = finish_inode_if_needed(sctx, 0);
7285 /* Ignore non-FS objects */
7286 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
7287 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
7290 if (key->type == BTRFS_INODE_ITEM_KEY) {
7291 ret = changed_inode(sctx, result);
7292 } else if (!sctx->ignore_cur_inode) {
7293 if (key->type == BTRFS_INODE_REF_KEY ||
7294 key->type == BTRFS_INODE_EXTREF_KEY)
7295 ret = changed_ref(sctx, result);
7296 else if (key->type == BTRFS_XATTR_ITEM_KEY)
7297 ret = changed_xattr(sctx, result);
7298 else if (key->type == BTRFS_EXTENT_DATA_KEY)
7299 ret = changed_extent(sctx, result);
7300 else if (key->type == BTRFS_VERITY_DESC_ITEM_KEY &&
7302 ret = changed_verity(sctx, result);
7309 static int search_key_again(const struct send_ctx *sctx,
7310 struct btrfs_root *root,
7311 struct btrfs_path *path,
7312 const struct btrfs_key *key)
7316 if (!path->need_commit_sem)
7317 lockdep_assert_held_read(&root->fs_info->commit_root_sem);
7320 * Roots used for send operations are readonly and no one can add,
7321 * update or remove keys from them, so we should be able to find our
7322 * key again. The only exception is deduplication, which can operate on
7323 * readonly roots and add, update or remove keys to/from them - but at
7324 * the moment we don't allow it to run in parallel with send.
7326 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7329 btrfs_print_tree(path->nodes[path->lowest_level], false);
7330 btrfs_err(root->fs_info,
7331 "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
7332 key->objectid, key->type, key->offset,
7333 (root == sctx->parent_root ? "parent" : "send"),
7334 root->root_key.objectid, path->lowest_level,
7335 path->slots[path->lowest_level]);
7342 static int full_send_tree(struct send_ctx *sctx)
7345 struct btrfs_root *send_root = sctx->send_root;
7346 struct btrfs_key key;
7347 struct btrfs_fs_info *fs_info = send_root->fs_info;
7348 struct btrfs_path *path;
7350 path = alloc_path_for_send();
7353 path->reada = READA_FORWARD_ALWAYS;
7355 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
7356 key.type = BTRFS_INODE_ITEM_KEY;
7359 down_read(&fs_info->commit_root_sem);
7360 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7361 up_read(&fs_info->commit_root_sem);
7363 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
7370 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
7372 ret = changed_cb(path, NULL, &key,
7373 BTRFS_COMPARE_TREE_NEW, sctx);
7377 down_read(&fs_info->commit_root_sem);
7378 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7379 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7380 up_read(&fs_info->commit_root_sem);
7382 * A transaction used for relocating a block group was
7383 * committed or is about to finish its commit. Release
7384 * our path (leaf) and restart the search, so that we
7385 * avoid operating on any file extent items that are
7386 * stale, with a disk_bytenr that reflects a pre
7387 * relocation value. This way we avoid as much as
7388 * possible to fallback to regular writes when checking
7389 * if we can clone file ranges.
7391 btrfs_release_path(path);
7392 ret = search_key_again(sctx, send_root, path, &key);
7396 up_read(&fs_info->commit_root_sem);
7399 ret = btrfs_next_item(send_root, path);
7409 ret = finish_inode_if_needed(sctx, 1);
7412 btrfs_free_path(path);
7416 static int replace_node_with_clone(struct btrfs_path *path, int level)
7418 struct extent_buffer *clone;
7420 clone = btrfs_clone_extent_buffer(path->nodes[level]);
7424 free_extent_buffer(path->nodes[level]);
7425 path->nodes[level] = clone;
7430 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
7432 struct extent_buffer *eb;
7433 struct extent_buffer *parent = path->nodes[*level];
7434 int slot = path->slots[*level];
7435 const int nritems = btrfs_header_nritems(parent);
7439 lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
7441 BUG_ON(*level == 0);
7442 eb = btrfs_read_node_slot(parent, slot);
7447 * Trigger readahead for the next leaves we will process, so that it is
7448 * very likely that when we need them they are already in memory and we
7449 * will not block on disk IO. For nodes we only do readahead for one,
7450 * since the time window between processing nodes is typically larger.
7452 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
7454 for (slot++; slot < nritems && reada_done < reada_max; slot++) {
7455 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
7456 btrfs_readahead_node_child(parent, slot);
7457 reada_done += eb->fs_info->nodesize;
7461 path->nodes[*level - 1] = eb;
7462 path->slots[*level - 1] = 0;
7466 return replace_node_with_clone(path, 0);
7471 static int tree_move_next_or_upnext(struct btrfs_path *path,
7472 int *level, int root_level)
7476 nritems = btrfs_header_nritems(path->nodes[*level]);
7478 path->slots[*level]++;
7480 while (path->slots[*level] >= nritems) {
7481 if (*level == root_level) {
7482 path->slots[*level] = nritems - 1;
7487 path->slots[*level] = 0;
7488 free_extent_buffer(path->nodes[*level]);
7489 path->nodes[*level] = NULL;
7491 path->slots[*level]++;
7493 nritems = btrfs_header_nritems(path->nodes[*level]);
7500 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
7503 static int tree_advance(struct btrfs_path *path,
7504 int *level, int root_level,
7506 struct btrfs_key *key,
7511 if (*level == 0 || !allow_down) {
7512 ret = tree_move_next_or_upnext(path, level, root_level);
7514 ret = tree_move_down(path, level, reada_min_gen);
7518 * Even if we have reached the end of a tree, ret is -1, update the key
7519 * anyway, so that in case we need to restart due to a block group
7520 * relocation, we can assert that the last key of the root node still
7521 * exists in the tree.
7524 btrfs_item_key_to_cpu(path->nodes[*level], key,
7525 path->slots[*level]);
7527 btrfs_node_key_to_cpu(path->nodes[*level], key,
7528 path->slots[*level]);
7533 static int tree_compare_item(struct btrfs_path *left_path,
7534 struct btrfs_path *right_path,
7539 unsigned long off1, off2;
7541 len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]);
7542 len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]);
7546 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
7547 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
7548 right_path->slots[0]);
7550 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
7552 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
7559 * A transaction used for relocating a block group was committed or is about to
7560 * finish its commit. Release our paths and restart the search, so that we are
7561 * not using stale extent buffers:
7563 * 1) For levels > 0, we are only holding references of extent buffers, without
7564 * any locks on them, which does not prevent them from having been relocated
7565 * and reallocated after the last time we released the commit root semaphore.
7566 * The exception are the root nodes, for which we always have a clone, see
7567 * the comment at btrfs_compare_trees();
7569 * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
7570 * we are safe from the concurrent relocation and reallocation. However they
7571 * can have file extent items with a pre relocation disk_bytenr value, so we
7572 * restart the start from the current commit roots and clone the new leaves so
7573 * that we get the post relocation disk_bytenr values. Not doing so, could
7574 * make us clone the wrong data in case there are new extents using the old
7575 * disk_bytenr that happen to be shared.
7577 static int restart_after_relocation(struct btrfs_path *left_path,
7578 struct btrfs_path *right_path,
7579 const struct btrfs_key *left_key,
7580 const struct btrfs_key *right_key,
7583 const struct send_ctx *sctx)
7588 lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
7590 btrfs_release_path(left_path);
7591 btrfs_release_path(right_path);
7594 * Since keys can not be added or removed to/from our roots because they
7595 * are readonly and we do not allow deduplication to run in parallel
7596 * (which can add, remove or change keys), the layout of the trees should
7599 left_path->lowest_level = left_level;
7600 ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
7604 right_path->lowest_level = right_level;
7605 ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
7610 * If the lowest level nodes are leaves, clone them so that they can be
7611 * safely used by changed_cb() while not under the protection of the
7612 * commit root semaphore, even if relocation and reallocation happens in
7615 if (left_level == 0) {
7616 ret = replace_node_with_clone(left_path, 0);
7621 if (right_level == 0) {
7622 ret = replace_node_with_clone(right_path, 0);
7628 * Now clone the root nodes (unless they happen to be the leaves we have
7629 * already cloned). This is to protect against concurrent snapshotting of
7630 * the send and parent roots (see the comment at btrfs_compare_trees()).
7632 root_level = btrfs_header_level(sctx->send_root->commit_root);
7633 if (root_level > 0) {
7634 ret = replace_node_with_clone(left_path, root_level);
7639 root_level = btrfs_header_level(sctx->parent_root->commit_root);
7640 if (root_level > 0) {
7641 ret = replace_node_with_clone(right_path, root_level);
7650 * This function compares two trees and calls the provided callback for
7651 * every changed/new/deleted item it finds.
7652 * If shared tree blocks are encountered, whole subtrees are skipped, making
7653 * the compare pretty fast on snapshotted subvolumes.
7655 * This currently works on commit roots only. As commit roots are read only,
7656 * we don't do any locking. The commit roots are protected with transactions.
7657 * Transactions are ended and rejoined when a commit is tried in between.
7659 * This function checks for modifications done to the trees while comparing.
7660 * If it detects a change, it aborts immediately.
7662 static int btrfs_compare_trees(struct btrfs_root *left_root,
7663 struct btrfs_root *right_root, struct send_ctx *sctx)
7665 struct btrfs_fs_info *fs_info = left_root->fs_info;
7668 struct btrfs_path *left_path = NULL;
7669 struct btrfs_path *right_path = NULL;
7670 struct btrfs_key left_key;
7671 struct btrfs_key right_key;
7672 char *tmp_buf = NULL;
7673 int left_root_level;
7674 int right_root_level;
7677 int left_end_reached = 0;
7678 int right_end_reached = 0;
7679 int advance_left = 0;
7680 int advance_right = 0;
7687 left_path = btrfs_alloc_path();
7692 right_path = btrfs_alloc_path();
7698 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
7704 left_path->search_commit_root = 1;
7705 left_path->skip_locking = 1;
7706 right_path->search_commit_root = 1;
7707 right_path->skip_locking = 1;
7710 * Strategy: Go to the first items of both trees. Then do
7712 * If both trees are at level 0
7713 * Compare keys of current items
7714 * If left < right treat left item as new, advance left tree
7716 * If left > right treat right item as deleted, advance right tree
7718 * If left == right do deep compare of items, treat as changed if
7719 * needed, advance both trees and repeat
7720 * If both trees are at the same level but not at level 0
7721 * Compare keys of current nodes/leafs
7722 * If left < right advance left tree and repeat
7723 * If left > right advance right tree and repeat
7724 * If left == right compare blockptrs of the next nodes/leafs
7725 * If they match advance both trees but stay at the same level
7727 * If they don't match advance both trees while allowing to go
7729 * If tree levels are different
7730 * Advance the tree that needs it and repeat
7732 * Advancing a tree means:
7733 * If we are at level 0, try to go to the next slot. If that's not
7734 * possible, go one level up and repeat. Stop when we found a level
7735 * where we could go to the next slot. We may at this point be on a
7738 * If we are not at level 0 and not on shared tree blocks, go one
7741 * If we are not at level 0 and on shared tree blocks, go one slot to
7742 * the right if possible or go up and right.
7745 down_read(&fs_info->commit_root_sem);
7746 left_level = btrfs_header_level(left_root->commit_root);
7747 left_root_level = left_level;
7749 * We clone the root node of the send and parent roots to prevent races
7750 * with snapshot creation of these roots. Snapshot creation COWs the
7751 * root node of a tree, so after the transaction is committed the old
7752 * extent can be reallocated while this send operation is still ongoing.
7753 * So we clone them, under the commit root semaphore, to be race free.
7755 left_path->nodes[left_level] =
7756 btrfs_clone_extent_buffer(left_root->commit_root);
7757 if (!left_path->nodes[left_level]) {
7762 right_level = btrfs_header_level(right_root->commit_root);
7763 right_root_level = right_level;
7764 right_path->nodes[right_level] =
7765 btrfs_clone_extent_buffer(right_root->commit_root);
7766 if (!right_path->nodes[right_level]) {
7771 * Our right root is the parent root, while the left root is the "send"
7772 * root. We know that all new nodes/leaves in the left root must have
7773 * a generation greater than the right root's generation, so we trigger
7774 * readahead for those nodes and leaves of the left root, as we know we
7775 * will need to read them at some point.
7777 reada_min_gen = btrfs_header_generation(right_root->commit_root);
7779 if (left_level == 0)
7780 btrfs_item_key_to_cpu(left_path->nodes[left_level],
7781 &left_key, left_path->slots[left_level]);
7783 btrfs_node_key_to_cpu(left_path->nodes[left_level],
7784 &left_key, left_path->slots[left_level]);
7785 if (right_level == 0)
7786 btrfs_item_key_to_cpu(right_path->nodes[right_level],
7787 &right_key, right_path->slots[right_level]);
7789 btrfs_node_key_to_cpu(right_path->nodes[right_level],
7790 &right_key, right_path->slots[right_level]);
7792 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7795 if (need_resched() ||
7796 rwsem_is_contended(&fs_info->commit_root_sem)) {
7797 up_read(&fs_info->commit_root_sem);
7799 down_read(&fs_info->commit_root_sem);
7802 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7803 ret = restart_after_relocation(left_path, right_path,
7804 &left_key, &right_key,
7805 left_level, right_level,
7809 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7812 if (advance_left && !left_end_reached) {
7813 ret = tree_advance(left_path, &left_level,
7815 advance_left != ADVANCE_ONLY_NEXT,
7816 &left_key, reada_min_gen);
7818 left_end_reached = ADVANCE;
7823 if (advance_right && !right_end_reached) {
7824 ret = tree_advance(right_path, &right_level,
7826 advance_right != ADVANCE_ONLY_NEXT,
7827 &right_key, reada_min_gen);
7829 right_end_reached = ADVANCE;
7835 if (left_end_reached && right_end_reached) {
7838 } else if (left_end_reached) {
7839 if (right_level == 0) {
7840 up_read(&fs_info->commit_root_sem);
7841 ret = changed_cb(left_path, right_path,
7843 BTRFS_COMPARE_TREE_DELETED,
7847 down_read(&fs_info->commit_root_sem);
7849 advance_right = ADVANCE;
7851 } else if (right_end_reached) {
7852 if (left_level == 0) {
7853 up_read(&fs_info->commit_root_sem);
7854 ret = changed_cb(left_path, right_path,
7856 BTRFS_COMPARE_TREE_NEW,
7860 down_read(&fs_info->commit_root_sem);
7862 advance_left = ADVANCE;
7866 if (left_level == 0 && right_level == 0) {
7867 up_read(&fs_info->commit_root_sem);
7868 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7870 ret = changed_cb(left_path, right_path,
7872 BTRFS_COMPARE_TREE_NEW,
7874 advance_left = ADVANCE;
7875 } else if (cmp > 0) {
7876 ret = changed_cb(left_path, right_path,
7878 BTRFS_COMPARE_TREE_DELETED,
7880 advance_right = ADVANCE;
7882 enum btrfs_compare_tree_result result;
7884 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7885 ret = tree_compare_item(left_path, right_path,
7888 result = BTRFS_COMPARE_TREE_CHANGED;
7890 result = BTRFS_COMPARE_TREE_SAME;
7891 ret = changed_cb(left_path, right_path,
7892 &left_key, result, sctx);
7893 advance_left = ADVANCE;
7894 advance_right = ADVANCE;
7899 down_read(&fs_info->commit_root_sem);
7900 } else if (left_level == right_level) {
7901 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7903 advance_left = ADVANCE;
7904 } else if (cmp > 0) {
7905 advance_right = ADVANCE;
7907 left_blockptr = btrfs_node_blockptr(
7908 left_path->nodes[left_level],
7909 left_path->slots[left_level]);
7910 right_blockptr = btrfs_node_blockptr(
7911 right_path->nodes[right_level],
7912 right_path->slots[right_level]);
7913 left_gen = btrfs_node_ptr_generation(
7914 left_path->nodes[left_level],
7915 left_path->slots[left_level]);
7916 right_gen = btrfs_node_ptr_generation(
7917 right_path->nodes[right_level],
7918 right_path->slots[right_level]);
7919 if (left_blockptr == right_blockptr &&
7920 left_gen == right_gen) {
7922 * As we're on a shared block, don't
7923 * allow to go deeper.
7925 advance_left = ADVANCE_ONLY_NEXT;
7926 advance_right = ADVANCE_ONLY_NEXT;
7928 advance_left = ADVANCE;
7929 advance_right = ADVANCE;
7932 } else if (left_level < right_level) {
7933 advance_right = ADVANCE;
7935 advance_left = ADVANCE;
7940 up_read(&fs_info->commit_root_sem);
7942 btrfs_free_path(left_path);
7943 btrfs_free_path(right_path);
7948 static int send_subvol(struct send_ctx *sctx)
7952 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7953 ret = send_header(sctx);
7958 ret = send_subvol_begin(sctx);
7962 if (sctx->parent_root) {
7963 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7966 ret = finish_inode_if_needed(sctx, 1);
7970 ret = full_send_tree(sctx);
7976 free_recorded_refs(sctx);
7981 * If orphan cleanup did remove any orphans from a root, it means the tree
7982 * was modified and therefore the commit root is not the same as the current
7983 * root anymore. This is a problem, because send uses the commit root and
7984 * therefore can see inode items that don't exist in the current root anymore,
7985 * and for example make calls to btrfs_iget, which will do tree lookups based
7986 * on the current root and not on the commit root. Those lookups will fail,
7987 * returning a -ESTALE error, and making send fail with that error. So make
7988 * sure a send does not see any orphans we have just removed, and that it will
7989 * see the same inodes regardless of whether a transaction commit happened
7990 * before it started (meaning that the commit root will be the same as the
7991 * current root) or not.
7993 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7996 struct btrfs_trans_handle *trans = NULL;
7999 if (sctx->parent_root &&
8000 sctx->parent_root->node != sctx->parent_root->commit_root)
8003 for (i = 0; i < sctx->clone_roots_cnt; i++)
8004 if (sctx->clone_roots[i].root->node !=
8005 sctx->clone_roots[i].root->commit_root)
8009 return btrfs_end_transaction(trans);
8014 /* Use any root, all fs roots will get their commit roots updated. */
8016 trans = btrfs_join_transaction(sctx->send_root);
8018 return PTR_ERR(trans);
8022 return btrfs_commit_transaction(trans);
8026 * Make sure any existing dellaloc is flushed for any root used by a send
8027 * operation so that we do not miss any data and we do not race with writeback
8028 * finishing and changing a tree while send is using the tree. This could
8029 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
8030 * a send operation then uses the subvolume.
8031 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
8033 static int flush_delalloc_roots(struct send_ctx *sctx)
8035 struct btrfs_root *root = sctx->parent_root;
8040 ret = btrfs_start_delalloc_snapshot(root, false);
8043 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
8046 for (i = 0; i < sctx->clone_roots_cnt; i++) {
8047 root = sctx->clone_roots[i].root;
8048 ret = btrfs_start_delalloc_snapshot(root, false);
8051 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
8057 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
8059 spin_lock(&root->root_item_lock);
8060 root->send_in_progress--;
8062 * Not much left to do, we don't know why it's unbalanced and
8063 * can't blindly reset it to 0.
8065 if (root->send_in_progress < 0)
8066 btrfs_err(root->fs_info,
8067 "send_in_progress unbalanced %d root %llu",
8068 root->send_in_progress, root->root_key.objectid);
8069 spin_unlock(&root->root_item_lock);
8072 static void dedupe_in_progress_warn(const struct btrfs_root *root)
8074 btrfs_warn_rl(root->fs_info,
8075 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
8076 root->root_key.objectid, root->dedupe_in_progress);
8079 long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
8082 struct btrfs_root *send_root = BTRFS_I(inode)->root;
8083 struct btrfs_fs_info *fs_info = send_root->fs_info;
8084 struct btrfs_root *clone_root;
8085 struct send_ctx *sctx = NULL;
8087 u64 *clone_sources_tmp = NULL;
8088 int clone_sources_to_rollback = 0;
8090 int sort_clone_roots = 0;
8091 struct btrfs_lru_cache_entry *entry;
8092 struct btrfs_lru_cache_entry *tmp;
8094 if (!capable(CAP_SYS_ADMIN))
8098 * The subvolume must remain read-only during send, protect against
8099 * making it RW. This also protects against deletion.
8101 spin_lock(&send_root->root_item_lock);
8102 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
8103 dedupe_in_progress_warn(send_root);
8104 spin_unlock(&send_root->root_item_lock);
8107 send_root->send_in_progress++;
8108 spin_unlock(&send_root->root_item_lock);
8111 * Userspace tools do the checks and warn the user if it's
8114 if (!btrfs_root_readonly(send_root)) {
8120 * Check that we don't overflow at later allocations, we request
8121 * clone_sources_count + 1 items, and compare to unsigned long inside
8122 * access_ok. Also set an upper limit for allocation size so this can't
8123 * easily exhaust memory. Max number of clone sources is about 200K.
8125 if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
8130 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
8135 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
8141 INIT_LIST_HEAD(&sctx->new_refs);
8142 INIT_LIST_HEAD(&sctx->deleted_refs);
8144 btrfs_lru_cache_init(&sctx->name_cache, SEND_MAX_NAME_CACHE_SIZE);
8145 btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE);
8146 btrfs_lru_cache_init(&sctx->dir_created_cache,
8147 SEND_MAX_DIR_CREATED_CACHE_SIZE);
8149 * This cache is periodically trimmed to a fixed size elsewhere, see
8150 * cache_dir_utimes() and trim_dir_utimes_cache().
8152 btrfs_lru_cache_init(&sctx->dir_utimes_cache, 0);
8154 sctx->pending_dir_moves = RB_ROOT;
8155 sctx->waiting_dir_moves = RB_ROOT;
8156 sctx->orphan_dirs = RB_ROOT;
8157 sctx->rbtree_new_refs = RB_ROOT;
8158 sctx->rbtree_deleted_refs = RB_ROOT;
8160 sctx->flags = arg->flags;
8162 if (arg->flags & BTRFS_SEND_FLAG_VERSION) {
8163 if (arg->version > BTRFS_SEND_STREAM_VERSION) {
8167 /* Zero means "use the highest version" */
8168 sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION;
8172 if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) {
8177 sctx->send_filp = fget(arg->send_fd);
8178 if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
8183 sctx->send_root = send_root;
8185 * Unlikely but possible, if the subvolume is marked for deletion but
8186 * is slow to remove the directory entry, send can still be started
8188 if (btrfs_root_dead(sctx->send_root)) {
8193 sctx->clone_roots_cnt = arg->clone_sources_count;
8195 if (sctx->proto >= 2) {
8196 u32 send_buf_num_pages;
8198 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V2;
8199 sctx->send_buf = vmalloc(sctx->send_max_size);
8200 if (!sctx->send_buf) {
8204 send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT;
8205 sctx->send_buf_pages = kcalloc(send_buf_num_pages,
8206 sizeof(*sctx->send_buf_pages),
8208 if (!sctx->send_buf_pages) {
8212 for (i = 0; i < send_buf_num_pages; i++) {
8213 sctx->send_buf_pages[i] =
8214 vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT));
8217 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1;
8218 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
8220 if (!sctx->send_buf) {
8225 sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
8226 sizeof(*sctx->clone_roots),
8228 if (!sctx->clone_roots) {
8233 alloc_size = array_size(sizeof(*arg->clone_sources),
8234 arg->clone_sources_count);
8236 if (arg->clone_sources_count) {
8237 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
8238 if (!clone_sources_tmp) {
8243 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
8250 for (i = 0; i < arg->clone_sources_count; i++) {
8251 clone_root = btrfs_get_fs_root(fs_info,
8252 clone_sources_tmp[i], true);
8253 if (IS_ERR(clone_root)) {
8254 ret = PTR_ERR(clone_root);
8257 spin_lock(&clone_root->root_item_lock);
8258 if (!btrfs_root_readonly(clone_root) ||
8259 btrfs_root_dead(clone_root)) {
8260 spin_unlock(&clone_root->root_item_lock);
8261 btrfs_put_root(clone_root);
8265 if (clone_root->dedupe_in_progress) {
8266 dedupe_in_progress_warn(clone_root);
8267 spin_unlock(&clone_root->root_item_lock);
8268 btrfs_put_root(clone_root);
8272 clone_root->send_in_progress++;
8273 spin_unlock(&clone_root->root_item_lock);
8275 sctx->clone_roots[i].root = clone_root;
8276 clone_sources_to_rollback = i + 1;
8278 kvfree(clone_sources_tmp);
8279 clone_sources_tmp = NULL;
8282 if (arg->parent_root) {
8283 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
8285 if (IS_ERR(sctx->parent_root)) {
8286 ret = PTR_ERR(sctx->parent_root);
8290 spin_lock(&sctx->parent_root->root_item_lock);
8291 sctx->parent_root->send_in_progress++;
8292 if (!btrfs_root_readonly(sctx->parent_root) ||
8293 btrfs_root_dead(sctx->parent_root)) {
8294 spin_unlock(&sctx->parent_root->root_item_lock);
8298 if (sctx->parent_root->dedupe_in_progress) {
8299 dedupe_in_progress_warn(sctx->parent_root);
8300 spin_unlock(&sctx->parent_root->root_item_lock);
8304 spin_unlock(&sctx->parent_root->root_item_lock);
8308 * Clones from send_root are allowed, but only if the clone source
8309 * is behind the current send position. This is checked while searching
8310 * for possible clone sources.
8312 sctx->clone_roots[sctx->clone_roots_cnt++].root =
8313 btrfs_grab_root(sctx->send_root);
8315 /* We do a bsearch later */
8316 sort(sctx->clone_roots, sctx->clone_roots_cnt,
8317 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
8319 sort_clone_roots = 1;
8321 ret = flush_delalloc_roots(sctx);
8325 ret = ensure_commit_roots_uptodate(sctx);
8329 ret = send_subvol(sctx);
8333 btrfs_lru_cache_for_each_entry_safe(&sctx->dir_utimes_cache, entry, tmp) {
8334 ret = send_utimes(sctx, entry->key, entry->gen);
8337 btrfs_lru_cache_remove(&sctx->dir_utimes_cache, entry);
8340 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
8341 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
8344 ret = send_cmd(sctx);
8350 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
8351 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
8353 struct pending_dir_move *pm;
8355 n = rb_first(&sctx->pending_dir_moves);
8356 pm = rb_entry(n, struct pending_dir_move, node);
8357 while (!list_empty(&pm->list)) {
8358 struct pending_dir_move *pm2;
8360 pm2 = list_first_entry(&pm->list,
8361 struct pending_dir_move, list);
8362 free_pending_move(sctx, pm2);
8364 free_pending_move(sctx, pm);
8367 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
8368 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
8370 struct waiting_dir_move *dm;
8372 n = rb_first(&sctx->waiting_dir_moves);
8373 dm = rb_entry(n, struct waiting_dir_move, node);
8374 rb_erase(&dm->node, &sctx->waiting_dir_moves);
8378 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
8379 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
8381 struct orphan_dir_info *odi;
8383 n = rb_first(&sctx->orphan_dirs);
8384 odi = rb_entry(n, struct orphan_dir_info, node);
8385 free_orphan_dir_info(sctx, odi);
8388 if (sort_clone_roots) {
8389 for (i = 0; i < sctx->clone_roots_cnt; i++) {
8390 btrfs_root_dec_send_in_progress(
8391 sctx->clone_roots[i].root);
8392 btrfs_put_root(sctx->clone_roots[i].root);
8395 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
8396 btrfs_root_dec_send_in_progress(
8397 sctx->clone_roots[i].root);
8398 btrfs_put_root(sctx->clone_roots[i].root);
8401 btrfs_root_dec_send_in_progress(send_root);
8403 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
8404 btrfs_root_dec_send_in_progress(sctx->parent_root);
8405 btrfs_put_root(sctx->parent_root);
8408 kvfree(clone_sources_tmp);
8411 if (sctx->send_filp)
8412 fput(sctx->send_filp);
8414 kvfree(sctx->clone_roots);
8415 kfree(sctx->send_buf_pages);
8416 kvfree(sctx->send_buf);
8417 kvfree(sctx->verity_descriptor);
8419 close_current_inode(sctx);
8421 btrfs_lru_cache_clear(&sctx->name_cache);
8422 btrfs_lru_cache_clear(&sctx->backref_cache);
8423 btrfs_lru_cache_clear(&sctx->dir_created_cache);
8424 btrfs_lru_cache_clear(&sctx->dir_utimes_cache);