2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 btrfs_set_path_blocking(p);
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
103 btrfs_clear_lock_blocking_rw(held, held_rw);
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
119 * It is safe to call this on paths that no locks or extent buffers held.
121 noinline void btrfs_release_path(struct btrfs_path *p)
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
133 free_extent_buffer(p->nodes[i]);
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 struct extent_buffer *eb;
154 eb = rcu_dereference(root->node);
157 * RCU really hurts here, we could free up the root node because
158 * it was cow'ed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
162 if (atomic_inc_not_zero(&eb->refs)) {
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 struct extent_buffer *eb;
181 eb = btrfs_root_node(root);
183 if (eb == root->node)
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 struct extent_buffer *eb;
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
214 static void add_root_to_dirty_list(struct btrfs_root *root)
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 spin_lock(&root->fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
230 spin_unlock(&root->fs_info->trans_lock);
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 struct extent_buffer *cow;
246 struct btrfs_disk_key disk_key;
248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
249 trans->transid != root->fs_info->running_transaction->transid);
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->last_trans);
253 level = btrfs_header_level(buf);
255 btrfs_item_key(buf, &disk_key, 0);
257 btrfs_node_key(buf, &disk_key, 0);
259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
260 &disk_key, level, buf->start, 0);
264 copy_extent_buffer(cow, buf, 0, 0, cow->len);
265 btrfs_set_header_bytenr(cow, cow->start);
266 btrfs_set_header_generation(cow, trans->transid);
267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
269 BTRFS_HEADER_FLAG_RELOC);
270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 btrfs_set_header_owner(cow, new_root_objectid);
275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
282 ret = btrfs_inc_ref(trans, root, cow, 0);
287 btrfs_mark_buffer_dirty(cow);
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_ROOT_REPLACE,
302 struct tree_mod_move {
307 struct tree_mod_root {
312 struct tree_mod_elem {
314 u64 index; /* shifted logical */
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key;
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move;
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root;
336 * Pull a new tree mod seq number for our operation.
338 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
340 return atomic64_inc_return(&fs_info->tree_mod_seq);
344 * This adds a new blocker to the tree mod log's blocker list if the @elem
345 * passed does not already have a sequence number set. So when a caller expects
346 * to record tree modifications, it should ensure to set elem->seq to zero
347 * before calling btrfs_get_tree_mod_seq.
348 * Returns a fresh, unused tree log modification sequence number, even if no new
351 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
354 write_lock(&fs_info->tree_mod_log_lock);
356 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
357 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
359 write_unlock(&fs_info->tree_mod_log_lock);
364 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
365 struct seq_list *elem)
367 struct rb_root *tm_root;
368 struct rb_node *node;
369 struct rb_node *next;
370 struct seq_list *cur_elem;
371 struct tree_mod_elem *tm;
372 u64 min_seq = (u64)-1;
373 u64 seq_putting = elem->seq;
378 write_lock(&fs_info->tree_mod_log_lock);
379 list_del(&elem->list);
382 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
383 if (cur_elem->seq < min_seq) {
384 if (seq_putting > cur_elem->seq) {
386 * blocker with lower sequence number exists, we
387 * cannot remove anything from the log
389 write_unlock(&fs_info->tree_mod_log_lock);
392 min_seq = cur_elem->seq;
397 * anything that's lower than the lowest existing (read: blocked)
398 * sequence number can be removed from the tree.
400 tm_root = &fs_info->tree_mod_log;
401 for (node = rb_first(tm_root); node; node = next) {
402 next = rb_next(node);
403 tm = container_of(node, struct tree_mod_elem, node);
404 if (tm->seq >= min_seq)
406 rb_erase(node, tm_root);
409 write_unlock(&fs_info->tree_mod_log_lock);
413 * key order of the log:
416 * the index is the shifted logical of the *new* root node for root replace
417 * operations, or the shifted logical of the affected block for all other
420 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
423 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
425 struct rb_root *tm_root;
426 struct rb_node **new;
427 struct rb_node *parent = NULL;
428 struct tree_mod_elem *cur;
432 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
434 tm_root = &fs_info->tree_mod_log;
435 new = &tm_root->rb_node;
437 cur = container_of(*new, struct tree_mod_elem, node);
439 if (cur->index < tm->index)
440 new = &((*new)->rb_left);
441 else if (cur->index > tm->index)
442 new = &((*new)->rb_right);
443 else if (cur->seq < tm->seq)
444 new = &((*new)->rb_left);
445 else if (cur->seq > tm->seq)
446 new = &((*new)->rb_right);
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
457 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
458 * returns zero with the tree_mod_log_lock acquired. The caller must hold
459 * this until all tree mod log insertions are recorded in the rb tree and then
460 * write unlock fs_info::tree_mod_log_lock.
462 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
463 struct extent_buffer *eb) {
465 if (list_empty(&(fs_info)->tree_mod_seq_list))
467 if (eb && btrfs_header_level(eb) == 0)
470 write_lock(&fs_info->tree_mod_log_lock);
471 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
472 write_unlock(&fs_info->tree_mod_log_lock);
479 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
480 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
481 struct extent_buffer *eb)
484 if (list_empty(&(fs_info)->tree_mod_seq_list))
486 if (eb && btrfs_header_level(eb) == 0)
492 static struct tree_mod_elem *
493 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
494 enum mod_log_op op, gfp_t flags)
496 struct tree_mod_elem *tm;
498 tm = kzalloc(sizeof(*tm), flags);
502 tm->index = eb->start >> PAGE_CACHE_SHIFT;
503 if (op != MOD_LOG_KEY_ADD) {
504 btrfs_node_key(eb, &tm->key, slot);
505 tm->blockptr = btrfs_node_blockptr(eb, slot);
509 tm->generation = btrfs_node_ptr_generation(eb, slot);
510 RB_CLEAR_NODE(&tm->node);
516 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
517 struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
520 struct tree_mod_elem *tm;
523 if (!tree_mod_need_log(fs_info, eb))
526 tm = alloc_tree_mod_elem(eb, slot, op, flags);
530 if (tree_mod_dont_log(fs_info, eb)) {
535 ret = __tree_mod_log_insert(fs_info, tm);
536 write_unlock(&eb->fs_info->tree_mod_log_lock);
544 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
545 struct extent_buffer *eb, int dst_slot, int src_slot,
546 int nr_items, gfp_t flags)
548 struct tree_mod_elem *tm = NULL;
549 struct tree_mod_elem **tm_list = NULL;
554 if (!tree_mod_need_log(fs_info, eb))
557 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
561 tm = kzalloc(sizeof(*tm), flags);
567 tm->index = eb->start >> PAGE_CACHE_SHIFT;
569 tm->move.dst_slot = dst_slot;
570 tm->move.nr_items = nr_items;
571 tm->op = MOD_LOG_MOVE_KEYS;
573 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
574 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
575 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
582 if (tree_mod_dont_log(fs_info, eb))
587 * When we override something during the move, we log these removals.
588 * This can only happen when we move towards the beginning of the
589 * buffer, i.e. dst_slot < src_slot.
591 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
592 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
597 ret = __tree_mod_log_insert(fs_info, tm);
600 write_unlock(&eb->fs_info->tree_mod_log_lock);
605 for (i = 0; i < nr_items; i++) {
606 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
607 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
611 write_unlock(&eb->fs_info->tree_mod_log_lock);
619 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
620 struct tree_mod_elem **tm_list,
626 for (i = nritems - 1; i >= 0; i--) {
627 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
629 for (j = nritems - 1; j > i; j--)
630 rb_erase(&tm_list[j]->node,
631 &fs_info->tree_mod_log);
640 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
641 struct extent_buffer *old_root,
642 struct extent_buffer *new_root, gfp_t flags,
645 struct tree_mod_elem *tm = NULL;
646 struct tree_mod_elem **tm_list = NULL;
651 if (!tree_mod_need_log(fs_info, NULL))
654 if (log_removal && btrfs_header_level(old_root) > 0) {
655 nritems = btrfs_header_nritems(old_root);
656 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
662 for (i = 0; i < nritems; i++) {
663 tm_list[i] = alloc_tree_mod_elem(old_root, i,
664 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
672 tm = kzalloc(sizeof(*tm), flags);
678 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
679 tm->old_root.logical = old_root->start;
680 tm->old_root.level = btrfs_header_level(old_root);
681 tm->generation = btrfs_header_generation(old_root);
682 tm->op = MOD_LOG_ROOT_REPLACE;
684 if (tree_mod_dont_log(fs_info, NULL))
688 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
690 ret = __tree_mod_log_insert(fs_info, tm);
692 write_unlock(&fs_info->tree_mod_log_lock);
701 for (i = 0; i < nritems; i++)
710 static struct tree_mod_elem *
711 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
714 struct rb_root *tm_root;
715 struct rb_node *node;
716 struct tree_mod_elem *cur = NULL;
717 struct tree_mod_elem *found = NULL;
718 u64 index = start >> PAGE_CACHE_SHIFT;
720 read_lock(&fs_info->tree_mod_log_lock);
721 tm_root = &fs_info->tree_mod_log;
722 node = tm_root->rb_node;
724 cur = container_of(node, struct tree_mod_elem, node);
725 if (cur->index < index) {
726 node = node->rb_left;
727 } else if (cur->index > index) {
728 node = node->rb_right;
729 } else if (cur->seq < min_seq) {
730 node = node->rb_left;
731 } else if (!smallest) {
732 /* we want the node with the highest seq */
734 BUG_ON(found->seq > cur->seq);
736 node = node->rb_left;
737 } else if (cur->seq > min_seq) {
738 /* we want the node with the smallest seq */
740 BUG_ON(found->seq < cur->seq);
742 node = node->rb_right;
748 read_unlock(&fs_info->tree_mod_log_lock);
754 * this returns the element from the log with the smallest time sequence
755 * value that's in the log (the oldest log item). any element with a time
756 * sequence lower than min_seq will be ignored.
758 static struct tree_mod_elem *
759 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
762 return __tree_mod_log_search(fs_info, start, min_seq, 1);
766 * this returns the element from the log with the largest time sequence
767 * value that's in the log (the most recent log item). any element with
768 * a time sequence lower than min_seq will be ignored.
770 static struct tree_mod_elem *
771 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
773 return __tree_mod_log_search(fs_info, start, min_seq, 0);
777 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
778 struct extent_buffer *src, unsigned long dst_offset,
779 unsigned long src_offset, int nr_items)
782 struct tree_mod_elem **tm_list = NULL;
783 struct tree_mod_elem **tm_list_add, **tm_list_rem;
787 if (!tree_mod_need_log(fs_info, NULL))
790 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
793 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
798 tm_list_add = tm_list;
799 tm_list_rem = tm_list + nr_items;
800 for (i = 0; i < nr_items; i++) {
801 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
802 MOD_LOG_KEY_REMOVE, GFP_NOFS);
803 if (!tm_list_rem[i]) {
808 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
809 MOD_LOG_KEY_ADD, GFP_NOFS);
810 if (!tm_list_add[i]) {
816 if (tree_mod_dont_log(fs_info, NULL))
820 for (i = 0; i < nr_items; i++) {
821 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
824 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
829 write_unlock(&fs_info->tree_mod_log_lock);
835 for (i = 0; i < nr_items * 2; i++) {
836 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
837 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
841 write_unlock(&fs_info->tree_mod_log_lock);
848 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
849 int dst_offset, int src_offset, int nr_items)
852 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
858 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
859 struct extent_buffer *eb, int slot, int atomic)
863 ret = tree_mod_log_insert_key(fs_info, eb, slot,
865 atomic ? GFP_ATOMIC : GFP_NOFS);
870 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
872 struct tree_mod_elem **tm_list = NULL;
877 if (btrfs_header_level(eb) == 0)
880 if (!tree_mod_need_log(fs_info, NULL))
883 nritems = btrfs_header_nritems(eb);
884 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
888 for (i = 0; i < nritems; i++) {
889 tm_list[i] = alloc_tree_mod_elem(eb, i,
890 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
897 if (tree_mod_dont_log(fs_info, eb))
900 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
901 write_unlock(&eb->fs_info->tree_mod_log_lock);
909 for (i = 0; i < nritems; i++)
917 tree_mod_log_set_root_pointer(struct btrfs_root *root,
918 struct extent_buffer *new_root_node,
922 ret = tree_mod_log_insert_root(root->fs_info, root->node,
923 new_root_node, GFP_NOFS, log_removal);
928 * check if the tree block can be shared by multiple trees
930 int btrfs_block_can_be_shared(struct btrfs_root *root,
931 struct extent_buffer *buf)
934 * Tree blocks not in refernece counted trees and tree roots
935 * are never shared. If a block was allocated after the last
936 * snapshot and the block was not allocated by tree relocation,
937 * we know the block is not shared.
939 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
940 buf != root->node && buf != root->commit_root &&
941 (btrfs_header_generation(buf) <=
942 btrfs_root_last_snapshot(&root->root_item) ||
943 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
945 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
946 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
947 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
953 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
954 struct btrfs_root *root,
955 struct extent_buffer *buf,
956 struct extent_buffer *cow,
966 * Backrefs update rules:
968 * Always use full backrefs for extent pointers in tree block
969 * allocated by tree relocation.
971 * If a shared tree block is no longer referenced by its owner
972 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
973 * use full backrefs for extent pointers in tree block.
975 * If a tree block is been relocating
976 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
977 * use full backrefs for extent pointers in tree block.
978 * The reason for this is some operations (such as drop tree)
979 * are only allowed for blocks use full backrefs.
982 if (btrfs_block_can_be_shared(root, buf)) {
983 ret = btrfs_lookup_extent_info(trans, root, buf->start,
984 btrfs_header_level(buf), 1,
990 btrfs_std_error(root->fs_info, ret, NULL);
995 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
996 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
997 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1002 owner = btrfs_header_owner(buf);
1003 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1004 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1007 if ((owner == root->root_key.objectid ||
1008 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1009 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1010 ret = btrfs_inc_ref(trans, root, buf, 1);
1011 BUG_ON(ret); /* -ENOMEM */
1013 if (root->root_key.objectid ==
1014 BTRFS_TREE_RELOC_OBJECTID) {
1015 ret = btrfs_dec_ref(trans, root, buf, 0);
1016 BUG_ON(ret); /* -ENOMEM */
1017 ret = btrfs_inc_ref(trans, root, cow, 1);
1018 BUG_ON(ret); /* -ENOMEM */
1020 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1023 if (root->root_key.objectid ==
1024 BTRFS_TREE_RELOC_OBJECTID)
1025 ret = btrfs_inc_ref(trans, root, cow, 1);
1027 ret = btrfs_inc_ref(trans, root, cow, 0);
1028 BUG_ON(ret); /* -ENOMEM */
1030 if (new_flags != 0) {
1031 int level = btrfs_header_level(buf);
1033 ret = btrfs_set_disk_extent_flags(trans, root,
1036 new_flags, level, 0);
1041 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1042 if (root->root_key.objectid ==
1043 BTRFS_TREE_RELOC_OBJECTID)
1044 ret = btrfs_inc_ref(trans, root, cow, 1);
1046 ret = btrfs_inc_ref(trans, root, cow, 0);
1047 BUG_ON(ret); /* -ENOMEM */
1048 ret = btrfs_dec_ref(trans, root, buf, 1);
1049 BUG_ON(ret); /* -ENOMEM */
1051 clean_tree_block(trans, root->fs_info, buf);
1058 * does the dirty work in cow of a single block. The parent block (if
1059 * supplied) is updated to point to the new cow copy. The new buffer is marked
1060 * dirty and returned locked. If you modify the block it needs to be marked
1063 * search_start -- an allocation hint for the new block
1065 * empty_size -- a hint that you plan on doing more cow. This is the size in
1066 * bytes the allocator should try to find free next to the block it returns.
1067 * This is just a hint and may be ignored by the allocator.
1069 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1070 struct btrfs_root *root,
1071 struct extent_buffer *buf,
1072 struct extent_buffer *parent, int parent_slot,
1073 struct extent_buffer **cow_ret,
1074 u64 search_start, u64 empty_size)
1076 struct btrfs_disk_key disk_key;
1077 struct extent_buffer *cow;
1080 int unlock_orig = 0;
1083 if (*cow_ret == buf)
1086 btrfs_assert_tree_locked(buf);
1088 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1089 trans->transid != root->fs_info->running_transaction->transid);
1090 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1091 trans->transid != root->last_trans);
1093 level = btrfs_header_level(buf);
1096 btrfs_item_key(buf, &disk_key, 0);
1098 btrfs_node_key(buf, &disk_key, 0);
1100 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1102 parent_start = parent->start;
1108 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1109 root->root_key.objectid, &disk_key, level,
1110 search_start, empty_size);
1112 return PTR_ERR(cow);
1114 /* cow is set to blocking by btrfs_init_new_buffer */
1116 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1117 btrfs_set_header_bytenr(cow, cow->start);
1118 btrfs_set_header_generation(cow, trans->transid);
1119 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1120 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1121 BTRFS_HEADER_FLAG_RELOC);
1122 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1123 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1125 btrfs_set_header_owner(cow, root->root_key.objectid);
1127 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1130 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1132 btrfs_tree_unlock(cow);
1133 free_extent_buffer(cow);
1134 btrfs_abort_transaction(trans, root, ret);
1138 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1139 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1141 btrfs_tree_unlock(cow);
1142 free_extent_buffer(cow);
1143 btrfs_abort_transaction(trans, root, ret);
1148 if (buf == root->node) {
1149 WARN_ON(parent && parent != buf);
1150 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1151 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1152 parent_start = buf->start;
1156 extent_buffer_get(cow);
1157 tree_mod_log_set_root_pointer(root, cow, 1);
1158 rcu_assign_pointer(root->node, cow);
1160 btrfs_free_tree_block(trans, root, buf, parent_start,
1162 free_extent_buffer(buf);
1163 add_root_to_dirty_list(root);
1165 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1166 parent_start = parent->start;
1170 WARN_ON(trans->transid != btrfs_header_generation(parent));
1171 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1172 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1173 btrfs_set_node_blockptr(parent, parent_slot,
1175 btrfs_set_node_ptr_generation(parent, parent_slot,
1177 btrfs_mark_buffer_dirty(parent);
1179 ret = tree_mod_log_free_eb(root->fs_info, buf);
1181 btrfs_tree_unlock(cow);
1182 free_extent_buffer(cow);
1183 btrfs_abort_transaction(trans, root, ret);
1187 btrfs_free_tree_block(trans, root, buf, parent_start,
1191 btrfs_tree_unlock(buf);
1192 free_extent_buffer_stale(buf);
1193 btrfs_mark_buffer_dirty(cow);
1199 * returns the logical address of the oldest predecessor of the given root.
1200 * entries older than time_seq are ignored.
1202 static struct tree_mod_elem *
1203 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1204 struct extent_buffer *eb_root, u64 time_seq)
1206 struct tree_mod_elem *tm;
1207 struct tree_mod_elem *found = NULL;
1208 u64 root_logical = eb_root->start;
1215 * the very last operation that's logged for a root is the replacement
1216 * operation (if it is replaced at all). this has the index of the *new*
1217 * root, making it the very first operation that's logged for this root.
1220 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1225 * if there are no tree operation for the oldest root, we simply
1226 * return it. this should only happen if that (old) root is at
1233 * if there's an operation that's not a root replacement, we
1234 * found the oldest version of our root. normally, we'll find a
1235 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1237 if (tm->op != MOD_LOG_ROOT_REPLACE)
1241 root_logical = tm->old_root.logical;
1245 /* if there's no old root to return, return what we found instead */
1253 * tm is a pointer to the first operation to rewind within eb. then, all
1254 * previous operations will be rewinded (until we reach something older than
1258 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1259 u64 time_seq, struct tree_mod_elem *first_tm)
1262 struct rb_node *next;
1263 struct tree_mod_elem *tm = first_tm;
1264 unsigned long o_dst;
1265 unsigned long o_src;
1266 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1268 n = btrfs_header_nritems(eb);
1269 read_lock(&fs_info->tree_mod_log_lock);
1270 while (tm && tm->seq >= time_seq) {
1272 * all the operations are recorded with the operator used for
1273 * the modification. as we're going backwards, we do the
1274 * opposite of each operation here.
1277 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1278 BUG_ON(tm->slot < n);
1280 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1281 case MOD_LOG_KEY_REMOVE:
1282 btrfs_set_node_key(eb, &tm->key, tm->slot);
1283 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1284 btrfs_set_node_ptr_generation(eb, tm->slot,
1288 case MOD_LOG_KEY_REPLACE:
1289 BUG_ON(tm->slot >= n);
1290 btrfs_set_node_key(eb, &tm->key, tm->slot);
1291 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1292 btrfs_set_node_ptr_generation(eb, tm->slot,
1295 case MOD_LOG_KEY_ADD:
1296 /* if a move operation is needed it's in the log */
1299 case MOD_LOG_MOVE_KEYS:
1300 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1301 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1302 memmove_extent_buffer(eb, o_dst, o_src,
1303 tm->move.nr_items * p_size);
1305 case MOD_LOG_ROOT_REPLACE:
1307 * this operation is special. for roots, this must be
1308 * handled explicitly before rewinding.
1309 * for non-roots, this operation may exist if the node
1310 * was a root: root A -> child B; then A gets empty and
1311 * B is promoted to the new root. in the mod log, we'll
1312 * have a root-replace operation for B, a tree block
1313 * that is no root. we simply ignore that operation.
1317 next = rb_next(&tm->node);
1320 tm = container_of(next, struct tree_mod_elem, node);
1321 if (tm->index != first_tm->index)
1324 read_unlock(&fs_info->tree_mod_log_lock);
1325 btrfs_set_header_nritems(eb, n);
1329 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1330 * is returned. If rewind operations happen, a fresh buffer is returned. The
1331 * returned buffer is always read-locked. If the returned buffer is not the
1332 * input buffer, the lock on the input buffer is released and the input buffer
1333 * is freed (its refcount is decremented).
1335 static struct extent_buffer *
1336 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1337 struct extent_buffer *eb, u64 time_seq)
1339 struct extent_buffer *eb_rewin;
1340 struct tree_mod_elem *tm;
1345 if (btrfs_header_level(eb) == 0)
1348 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1352 btrfs_set_path_blocking(path);
1353 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1355 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1356 BUG_ON(tm->slot != 0);
1357 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1359 btrfs_tree_read_unlock_blocking(eb);
1360 free_extent_buffer(eb);
1363 btrfs_set_header_bytenr(eb_rewin, eb->start);
1364 btrfs_set_header_backref_rev(eb_rewin,
1365 btrfs_header_backref_rev(eb));
1366 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1367 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1369 eb_rewin = btrfs_clone_extent_buffer(eb);
1371 btrfs_tree_read_unlock_blocking(eb);
1372 free_extent_buffer(eb);
1377 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1378 btrfs_tree_read_unlock_blocking(eb);
1379 free_extent_buffer(eb);
1381 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
1382 eb_rewin, btrfs_header_level(eb_rewin));
1383 btrfs_tree_read_lock(eb_rewin);
1384 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1385 WARN_ON(btrfs_header_nritems(eb_rewin) >
1386 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1392 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1393 * value. If there are no changes, the current root->root_node is returned. If
1394 * anything changed in between, there's a fresh buffer allocated on which the
1395 * rewind operations are done. In any case, the returned buffer is read locked.
1396 * Returns NULL on error (with no locks held).
1398 static inline struct extent_buffer *
1399 get_old_root(struct btrfs_root *root, u64 time_seq)
1401 struct tree_mod_elem *tm;
1402 struct extent_buffer *eb = NULL;
1403 struct extent_buffer *eb_root;
1404 u64 eb_root_owner = 0;
1405 struct extent_buffer *old;
1406 struct tree_mod_root *old_root = NULL;
1407 u64 old_generation = 0;
1410 eb_root = btrfs_read_lock_root_node(root);
1411 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1415 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1416 old_root = &tm->old_root;
1417 old_generation = tm->generation;
1418 logical = old_root->logical;
1420 logical = eb_root->start;
1423 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1424 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1425 btrfs_tree_read_unlock(eb_root);
1426 free_extent_buffer(eb_root);
1427 old = read_tree_block(root, logical, 0);
1428 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1430 free_extent_buffer(old);
1431 btrfs_warn(root->fs_info,
1432 "failed to read tree block %llu from get_old_root", logical);
1434 btrfs_tree_read_lock(old);
1435 eb = btrfs_clone_extent_buffer(old);
1436 btrfs_tree_read_unlock(old);
1437 free_extent_buffer(old);
1439 } else if (old_root) {
1440 eb_root_owner = btrfs_header_owner(eb_root);
1441 btrfs_tree_read_unlock(eb_root);
1442 free_extent_buffer(eb_root);
1443 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1445 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1446 eb = btrfs_clone_extent_buffer(eb_root);
1447 btrfs_tree_read_unlock_blocking(eb_root);
1448 free_extent_buffer(eb_root);
1454 btrfs_set_header_bytenr(eb, eb->start);
1455 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1456 btrfs_set_header_owner(eb, eb_root_owner);
1457 btrfs_set_header_level(eb, old_root->level);
1458 btrfs_set_header_generation(eb, old_generation);
1460 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
1461 btrfs_header_level(eb));
1462 btrfs_tree_read_lock(eb);
1464 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1466 WARN_ON(btrfs_header_level(eb) != 0);
1467 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1472 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1474 struct tree_mod_elem *tm;
1476 struct extent_buffer *eb_root = btrfs_root_node(root);
1478 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1479 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1480 level = tm->old_root.level;
1482 level = btrfs_header_level(eb_root);
1484 free_extent_buffer(eb_root);
1489 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1490 struct btrfs_root *root,
1491 struct extent_buffer *buf)
1493 if (btrfs_test_is_dummy_root(root))
1496 /* ensure we can see the force_cow */
1500 * We do not need to cow a block if
1501 * 1) this block is not created or changed in this transaction;
1502 * 2) this block does not belong to TREE_RELOC tree;
1503 * 3) the root is not forced COW.
1505 * What is forced COW:
1506 * when we create snapshot during commiting the transaction,
1507 * after we've finished coping src root, we must COW the shared
1508 * block to ensure the metadata consistency.
1510 if (btrfs_header_generation(buf) == trans->transid &&
1511 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1512 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1513 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1514 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1520 * cows a single block, see __btrfs_cow_block for the real work.
1521 * This version of it has extra checks so that a block isn't cow'd more than
1522 * once per transaction, as long as it hasn't been written yet
1524 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1525 struct btrfs_root *root, struct extent_buffer *buf,
1526 struct extent_buffer *parent, int parent_slot,
1527 struct extent_buffer **cow_ret)
1532 if (trans->transaction != root->fs_info->running_transaction)
1533 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1535 root->fs_info->running_transaction->transid);
1537 if (trans->transid != root->fs_info->generation)
1538 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1539 trans->transid, root->fs_info->generation);
1541 if (!should_cow_block(trans, root, buf)) {
1542 trans->dirty = true;
1547 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1550 btrfs_set_lock_blocking(parent);
1551 btrfs_set_lock_blocking(buf);
1553 ret = __btrfs_cow_block(trans, root, buf, parent,
1554 parent_slot, cow_ret, search_start, 0);
1556 trace_btrfs_cow_block(root, buf, *cow_ret);
1562 * helper function for defrag to decide if two blocks pointed to by a
1563 * node are actually close by
1565 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1567 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1569 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1575 * compare two keys in a memcmp fashion
1577 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1579 struct btrfs_key k1;
1581 btrfs_disk_key_to_cpu(&k1, disk);
1583 return btrfs_comp_cpu_keys(&k1, k2);
1587 * same as comp_keys only with two btrfs_key's
1589 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1591 if (k1->objectid > k2->objectid)
1593 if (k1->objectid < k2->objectid)
1595 if (k1->type > k2->type)
1597 if (k1->type < k2->type)
1599 if (k1->offset > k2->offset)
1601 if (k1->offset < k2->offset)
1607 * this is used by the defrag code to go through all the
1608 * leaves pointed to by a node and reallocate them so that
1609 * disk order is close to key order
1611 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1612 struct btrfs_root *root, struct extent_buffer *parent,
1613 int start_slot, u64 *last_ret,
1614 struct btrfs_key *progress)
1616 struct extent_buffer *cur;
1619 u64 search_start = *last_ret;
1629 int progress_passed = 0;
1630 struct btrfs_disk_key disk_key;
1632 parent_level = btrfs_header_level(parent);
1634 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1635 WARN_ON(trans->transid != root->fs_info->generation);
1637 parent_nritems = btrfs_header_nritems(parent);
1638 blocksize = root->nodesize;
1639 end_slot = parent_nritems - 1;
1641 if (parent_nritems <= 1)
1644 btrfs_set_lock_blocking(parent);
1646 for (i = start_slot; i <= end_slot; i++) {
1649 btrfs_node_key(parent, &disk_key, i);
1650 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1653 progress_passed = 1;
1654 blocknr = btrfs_node_blockptr(parent, i);
1655 gen = btrfs_node_ptr_generation(parent, i);
1656 if (last_block == 0)
1657 last_block = blocknr;
1660 other = btrfs_node_blockptr(parent, i - 1);
1661 close = close_blocks(blocknr, other, blocksize);
1663 if (!close && i < end_slot) {
1664 other = btrfs_node_blockptr(parent, i + 1);
1665 close = close_blocks(blocknr, other, blocksize);
1668 last_block = blocknr;
1672 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1674 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1677 if (!cur || !uptodate) {
1679 cur = read_tree_block(root, blocknr, gen);
1681 return PTR_ERR(cur);
1682 } else if (!extent_buffer_uptodate(cur)) {
1683 free_extent_buffer(cur);
1686 } else if (!uptodate) {
1687 err = btrfs_read_buffer(cur, gen);
1689 free_extent_buffer(cur);
1694 if (search_start == 0)
1695 search_start = last_block;
1697 btrfs_tree_lock(cur);
1698 btrfs_set_lock_blocking(cur);
1699 err = __btrfs_cow_block(trans, root, cur, parent, i,
1702 (end_slot - i) * blocksize));
1704 btrfs_tree_unlock(cur);
1705 free_extent_buffer(cur);
1708 search_start = cur->start;
1709 last_block = cur->start;
1710 *last_ret = search_start;
1711 btrfs_tree_unlock(cur);
1712 free_extent_buffer(cur);
1719 * search for key in the extent_buffer. The items start at offset p,
1720 * and they are item_size apart. There are 'max' items in p.
1722 * the slot in the array is returned via slot, and it points to
1723 * the place where you would insert key if it is not found in
1726 * slot may point to max if the key is bigger than all of the keys
1728 static noinline int generic_bin_search(struct extent_buffer *eb,
1730 int item_size, struct btrfs_key *key,
1737 struct btrfs_disk_key *tmp = NULL;
1738 struct btrfs_disk_key unaligned;
1739 unsigned long offset;
1741 unsigned long map_start = 0;
1742 unsigned long map_len = 0;
1745 while (low < high) {
1746 mid = (low + high) / 2;
1747 offset = p + mid * item_size;
1749 if (!kaddr || offset < map_start ||
1750 (offset + sizeof(struct btrfs_disk_key)) >
1751 map_start + map_len) {
1753 err = map_private_extent_buffer(eb, offset,
1754 sizeof(struct btrfs_disk_key),
1755 &kaddr, &map_start, &map_len);
1758 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1761 read_extent_buffer(eb, &unaligned,
1762 offset, sizeof(unaligned));
1767 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1770 ret = comp_keys(tmp, key);
1786 * simple bin_search frontend that does the right thing for
1789 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1790 int level, int *slot)
1793 return generic_bin_search(eb,
1794 offsetof(struct btrfs_leaf, items),
1795 sizeof(struct btrfs_item),
1796 key, btrfs_header_nritems(eb),
1799 return generic_bin_search(eb,
1800 offsetof(struct btrfs_node, ptrs),
1801 sizeof(struct btrfs_key_ptr),
1802 key, btrfs_header_nritems(eb),
1806 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1807 int level, int *slot)
1809 return bin_search(eb, key, level, slot);
1812 static void root_add_used(struct btrfs_root *root, u32 size)
1814 spin_lock(&root->accounting_lock);
1815 btrfs_set_root_used(&root->root_item,
1816 btrfs_root_used(&root->root_item) + size);
1817 spin_unlock(&root->accounting_lock);
1820 static void root_sub_used(struct btrfs_root *root, u32 size)
1822 spin_lock(&root->accounting_lock);
1823 btrfs_set_root_used(&root->root_item,
1824 btrfs_root_used(&root->root_item) - size);
1825 spin_unlock(&root->accounting_lock);
1828 /* given a node and slot number, this reads the blocks it points to. The
1829 * extent buffer is returned with a reference taken (but unlocked).
1830 * NULL is returned on error.
1832 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1833 struct extent_buffer *parent, int slot)
1835 int level = btrfs_header_level(parent);
1836 struct extent_buffer *eb;
1840 if (slot >= btrfs_header_nritems(parent))
1845 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1846 btrfs_node_ptr_generation(parent, slot));
1847 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1849 free_extent_buffer(eb);
1857 * node level balancing, used to make sure nodes are in proper order for
1858 * item deletion. We balance from the top down, so we have to make sure
1859 * that a deletion won't leave an node completely empty later on.
1861 static noinline int balance_level(struct btrfs_trans_handle *trans,
1862 struct btrfs_root *root,
1863 struct btrfs_path *path, int level)
1865 struct extent_buffer *right = NULL;
1866 struct extent_buffer *mid;
1867 struct extent_buffer *left = NULL;
1868 struct extent_buffer *parent = NULL;
1872 int orig_slot = path->slots[level];
1878 mid = path->nodes[level];
1880 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1881 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1882 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1884 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1886 if (level < BTRFS_MAX_LEVEL - 1) {
1887 parent = path->nodes[level + 1];
1888 pslot = path->slots[level + 1];
1892 * deal with the case where there is only one pointer in the root
1893 * by promoting the node below to a root
1896 struct extent_buffer *child;
1898 if (btrfs_header_nritems(mid) != 1)
1901 /* promote the child to a root */
1902 child = read_node_slot(root, mid, 0);
1905 btrfs_std_error(root->fs_info, ret, NULL);
1909 btrfs_tree_lock(child);
1910 btrfs_set_lock_blocking(child);
1911 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1913 btrfs_tree_unlock(child);
1914 free_extent_buffer(child);
1918 tree_mod_log_set_root_pointer(root, child, 1);
1919 rcu_assign_pointer(root->node, child);
1921 add_root_to_dirty_list(root);
1922 btrfs_tree_unlock(child);
1924 path->locks[level] = 0;
1925 path->nodes[level] = NULL;
1926 clean_tree_block(trans, root->fs_info, mid);
1927 btrfs_tree_unlock(mid);
1928 /* once for the path */
1929 free_extent_buffer(mid);
1931 root_sub_used(root, mid->len);
1932 btrfs_free_tree_block(trans, root, mid, 0, 1);
1933 /* once for the root ptr */
1934 free_extent_buffer_stale(mid);
1937 if (btrfs_header_nritems(mid) >
1938 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1941 left = read_node_slot(root, parent, pslot - 1);
1943 btrfs_tree_lock(left);
1944 btrfs_set_lock_blocking(left);
1945 wret = btrfs_cow_block(trans, root, left,
1946 parent, pslot - 1, &left);
1952 right = read_node_slot(root, parent, pslot + 1);
1954 btrfs_tree_lock(right);
1955 btrfs_set_lock_blocking(right);
1956 wret = btrfs_cow_block(trans, root, right,
1957 parent, pslot + 1, &right);
1964 /* first, try to make some room in the middle buffer */
1966 orig_slot += btrfs_header_nritems(left);
1967 wret = push_node_left(trans, root, left, mid, 1);
1973 * then try to empty the right most buffer into the middle
1976 wret = push_node_left(trans, root, mid, right, 1);
1977 if (wret < 0 && wret != -ENOSPC)
1979 if (btrfs_header_nritems(right) == 0) {
1980 clean_tree_block(trans, root->fs_info, right);
1981 btrfs_tree_unlock(right);
1982 del_ptr(root, path, level + 1, pslot + 1);
1983 root_sub_used(root, right->len);
1984 btrfs_free_tree_block(trans, root, right, 0, 1);
1985 free_extent_buffer_stale(right);
1988 struct btrfs_disk_key right_key;
1989 btrfs_node_key(right, &right_key, 0);
1990 tree_mod_log_set_node_key(root->fs_info, parent,
1992 btrfs_set_node_key(parent, &right_key, pslot + 1);
1993 btrfs_mark_buffer_dirty(parent);
1996 if (btrfs_header_nritems(mid) == 1) {
1998 * we're not allowed to leave a node with one item in the
1999 * tree during a delete. A deletion from lower in the tree
2000 * could try to delete the only pointer in this node.
2001 * So, pull some keys from the left.
2002 * There has to be a left pointer at this point because
2003 * otherwise we would have pulled some pointers from the
2008 btrfs_std_error(root->fs_info, ret, NULL);
2011 wret = balance_node_right(trans, root, mid, left);
2017 wret = push_node_left(trans, root, left, mid, 1);
2023 if (btrfs_header_nritems(mid) == 0) {
2024 clean_tree_block(trans, root->fs_info, mid);
2025 btrfs_tree_unlock(mid);
2026 del_ptr(root, path, level + 1, pslot);
2027 root_sub_used(root, mid->len);
2028 btrfs_free_tree_block(trans, root, mid, 0, 1);
2029 free_extent_buffer_stale(mid);
2032 /* update the parent key to reflect our changes */
2033 struct btrfs_disk_key mid_key;
2034 btrfs_node_key(mid, &mid_key, 0);
2035 tree_mod_log_set_node_key(root->fs_info, parent,
2037 btrfs_set_node_key(parent, &mid_key, pslot);
2038 btrfs_mark_buffer_dirty(parent);
2041 /* update the path */
2043 if (btrfs_header_nritems(left) > orig_slot) {
2044 extent_buffer_get(left);
2045 /* left was locked after cow */
2046 path->nodes[level] = left;
2047 path->slots[level + 1] -= 1;
2048 path->slots[level] = orig_slot;
2050 btrfs_tree_unlock(mid);
2051 free_extent_buffer(mid);
2054 orig_slot -= btrfs_header_nritems(left);
2055 path->slots[level] = orig_slot;
2058 /* double check we haven't messed things up */
2060 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2064 btrfs_tree_unlock(right);
2065 free_extent_buffer(right);
2068 if (path->nodes[level] != left)
2069 btrfs_tree_unlock(left);
2070 free_extent_buffer(left);
2075 /* Node balancing for insertion. Here we only split or push nodes around
2076 * when they are completely full. This is also done top down, so we
2077 * have to be pessimistic.
2079 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2080 struct btrfs_root *root,
2081 struct btrfs_path *path, int level)
2083 struct extent_buffer *right = NULL;
2084 struct extent_buffer *mid;
2085 struct extent_buffer *left = NULL;
2086 struct extent_buffer *parent = NULL;
2090 int orig_slot = path->slots[level];
2095 mid = path->nodes[level];
2096 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2098 if (level < BTRFS_MAX_LEVEL - 1) {
2099 parent = path->nodes[level + 1];
2100 pslot = path->slots[level + 1];
2106 left = read_node_slot(root, parent, pslot - 1);
2108 /* first, try to make some room in the middle buffer */
2112 btrfs_tree_lock(left);
2113 btrfs_set_lock_blocking(left);
2115 left_nr = btrfs_header_nritems(left);
2116 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2119 ret = btrfs_cow_block(trans, root, left, parent,
2124 wret = push_node_left(trans, root,
2131 struct btrfs_disk_key disk_key;
2132 orig_slot += left_nr;
2133 btrfs_node_key(mid, &disk_key, 0);
2134 tree_mod_log_set_node_key(root->fs_info, parent,
2136 btrfs_set_node_key(parent, &disk_key, pslot);
2137 btrfs_mark_buffer_dirty(parent);
2138 if (btrfs_header_nritems(left) > orig_slot) {
2139 path->nodes[level] = left;
2140 path->slots[level + 1] -= 1;
2141 path->slots[level] = orig_slot;
2142 btrfs_tree_unlock(mid);
2143 free_extent_buffer(mid);
2146 btrfs_header_nritems(left);
2147 path->slots[level] = orig_slot;
2148 btrfs_tree_unlock(left);
2149 free_extent_buffer(left);
2153 btrfs_tree_unlock(left);
2154 free_extent_buffer(left);
2156 right = read_node_slot(root, parent, pslot + 1);
2159 * then try to empty the right most buffer into the middle
2164 btrfs_tree_lock(right);
2165 btrfs_set_lock_blocking(right);
2167 right_nr = btrfs_header_nritems(right);
2168 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2171 ret = btrfs_cow_block(trans, root, right,
2177 wret = balance_node_right(trans, root,
2184 struct btrfs_disk_key disk_key;
2186 btrfs_node_key(right, &disk_key, 0);
2187 tree_mod_log_set_node_key(root->fs_info, parent,
2189 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2190 btrfs_mark_buffer_dirty(parent);
2192 if (btrfs_header_nritems(mid) <= orig_slot) {
2193 path->nodes[level] = right;
2194 path->slots[level + 1] += 1;
2195 path->slots[level] = orig_slot -
2196 btrfs_header_nritems(mid);
2197 btrfs_tree_unlock(mid);
2198 free_extent_buffer(mid);
2200 btrfs_tree_unlock(right);
2201 free_extent_buffer(right);
2205 btrfs_tree_unlock(right);
2206 free_extent_buffer(right);
2212 * readahead one full node of leaves, finding things that are close
2213 * to the block in 'slot', and triggering ra on them.
2215 static void reada_for_search(struct btrfs_root *root,
2216 struct btrfs_path *path,
2217 int level, int slot, u64 objectid)
2219 struct extent_buffer *node;
2220 struct btrfs_disk_key disk_key;
2226 int direction = path->reada;
2227 struct extent_buffer *eb;
2235 if (!path->nodes[level])
2238 node = path->nodes[level];
2240 search = btrfs_node_blockptr(node, slot);
2241 blocksize = root->nodesize;
2242 eb = btrfs_find_tree_block(root->fs_info, search);
2244 free_extent_buffer(eb);
2250 nritems = btrfs_header_nritems(node);
2254 if (direction < 0) {
2258 } else if (direction > 0) {
2263 if (path->reada < 0 && objectid) {
2264 btrfs_node_key(node, &disk_key, nr);
2265 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2268 search = btrfs_node_blockptr(node, nr);
2269 if ((search <= target && target - search <= 65536) ||
2270 (search > target && search - target <= 65536)) {
2271 gen = btrfs_node_ptr_generation(node, nr);
2272 readahead_tree_block(root, search);
2276 if ((nread > 65536 || nscan > 32))
2281 static noinline void reada_for_balance(struct btrfs_root *root,
2282 struct btrfs_path *path, int level)
2286 struct extent_buffer *parent;
2287 struct extent_buffer *eb;
2292 parent = path->nodes[level + 1];
2296 nritems = btrfs_header_nritems(parent);
2297 slot = path->slots[level + 1];
2300 block1 = btrfs_node_blockptr(parent, slot - 1);
2301 gen = btrfs_node_ptr_generation(parent, slot - 1);
2302 eb = btrfs_find_tree_block(root->fs_info, block1);
2304 * if we get -eagain from btrfs_buffer_uptodate, we
2305 * don't want to return eagain here. That will loop
2308 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2310 free_extent_buffer(eb);
2312 if (slot + 1 < nritems) {
2313 block2 = btrfs_node_blockptr(parent, slot + 1);
2314 gen = btrfs_node_ptr_generation(parent, slot + 1);
2315 eb = btrfs_find_tree_block(root->fs_info, block2);
2316 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2318 free_extent_buffer(eb);
2322 readahead_tree_block(root, block1);
2324 readahead_tree_block(root, block2);
2329 * when we walk down the tree, it is usually safe to unlock the higher layers
2330 * in the tree. The exceptions are when our path goes through slot 0, because
2331 * operations on the tree might require changing key pointers higher up in the
2334 * callers might also have set path->keep_locks, which tells this code to keep
2335 * the lock if the path points to the last slot in the block. This is part of
2336 * walking through the tree, and selecting the next slot in the higher block.
2338 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2339 * if lowest_unlock is 1, level 0 won't be unlocked
2341 static noinline void unlock_up(struct btrfs_path *path, int level,
2342 int lowest_unlock, int min_write_lock_level,
2343 int *write_lock_level)
2346 int skip_level = level;
2348 struct extent_buffer *t;
2350 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2351 if (!path->nodes[i])
2353 if (!path->locks[i])
2355 if (!no_skips && path->slots[i] == 0) {
2359 if (!no_skips && path->keep_locks) {
2362 nritems = btrfs_header_nritems(t);
2363 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2368 if (skip_level < i && i >= lowest_unlock)
2372 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2373 btrfs_tree_unlock_rw(t, path->locks[i]);
2375 if (write_lock_level &&
2376 i > min_write_lock_level &&
2377 i <= *write_lock_level) {
2378 *write_lock_level = i - 1;
2385 * This releases any locks held in the path starting at level and
2386 * going all the way up to the root.
2388 * btrfs_search_slot will keep the lock held on higher nodes in a few
2389 * corner cases, such as COW of the block at slot zero in the node. This
2390 * ignores those rules, and it should only be called when there are no
2391 * more updates to be done higher up in the tree.
2393 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2397 if (path->keep_locks)
2400 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2401 if (!path->nodes[i])
2403 if (!path->locks[i])
2405 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2411 * helper function for btrfs_search_slot. The goal is to find a block
2412 * in cache without setting the path to blocking. If we find the block
2413 * we return zero and the path is unchanged.
2415 * If we can't find the block, we set the path blocking and do some
2416 * reada. -EAGAIN is returned and the search must be repeated.
2419 read_block_for_search(struct btrfs_trans_handle *trans,
2420 struct btrfs_root *root, struct btrfs_path *p,
2421 struct extent_buffer **eb_ret, int level, int slot,
2422 struct btrfs_key *key, u64 time_seq)
2426 struct extent_buffer *b = *eb_ret;
2427 struct extent_buffer *tmp;
2430 blocknr = btrfs_node_blockptr(b, slot);
2431 gen = btrfs_node_ptr_generation(b, slot);
2433 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2435 /* first we do an atomic uptodate check */
2436 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2441 /* the pages were up to date, but we failed
2442 * the generation number check. Do a full
2443 * read for the generation number that is correct.
2444 * We must do this without dropping locks so
2445 * we can trust our generation number
2447 btrfs_set_path_blocking(p);
2449 /* now we're allowed to do a blocking uptodate check */
2450 ret = btrfs_read_buffer(tmp, gen);
2455 free_extent_buffer(tmp);
2456 btrfs_release_path(p);
2461 * reduce lock contention at high levels
2462 * of the btree by dropping locks before
2463 * we read. Don't release the lock on the current
2464 * level because we need to walk this node to figure
2465 * out which blocks to read.
2467 btrfs_unlock_up_safe(p, level + 1);
2468 btrfs_set_path_blocking(p);
2470 free_extent_buffer(tmp);
2472 reada_for_search(root, p, level, slot, key->objectid);
2475 tmp = read_tree_block(root, blocknr, gen);
2478 * If the read above didn't mark this buffer up to date,
2479 * it will never end up being up to date. Set ret to EIO now
2480 * and give up so that our caller doesn't loop forever
2483 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2485 free_extent_buffer(tmp);
2488 btrfs_release_path(p);
2493 * helper function for btrfs_search_slot. This does all of the checks
2494 * for node-level blocks and does any balancing required based on
2497 * If no extra work was required, zero is returned. If we had to
2498 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2502 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2503 struct btrfs_root *root, struct btrfs_path *p,
2504 struct extent_buffer *b, int level, int ins_len,
2505 int *write_lock_level)
2508 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2509 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2512 if (*write_lock_level < level + 1) {
2513 *write_lock_level = level + 1;
2514 btrfs_release_path(p);
2518 btrfs_set_path_blocking(p);
2519 reada_for_balance(root, p, level);
2520 sret = split_node(trans, root, p, level);
2521 btrfs_clear_path_blocking(p, NULL, 0);
2528 b = p->nodes[level];
2529 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2530 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2533 if (*write_lock_level < level + 1) {
2534 *write_lock_level = level + 1;
2535 btrfs_release_path(p);
2539 btrfs_set_path_blocking(p);
2540 reada_for_balance(root, p, level);
2541 sret = balance_level(trans, root, p, level);
2542 btrfs_clear_path_blocking(p, NULL, 0);
2548 b = p->nodes[level];
2550 btrfs_release_path(p);
2553 BUG_ON(btrfs_header_nritems(b) == 1);
2563 static void key_search_validate(struct extent_buffer *b,
2564 struct btrfs_key *key,
2567 #ifdef CONFIG_BTRFS_ASSERT
2568 struct btrfs_disk_key disk_key;
2570 btrfs_cpu_key_to_disk(&disk_key, key);
2573 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2574 offsetof(struct btrfs_leaf, items[0].key),
2577 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2578 offsetof(struct btrfs_node, ptrs[0].key),
2583 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2584 int level, int *prev_cmp, int *slot)
2586 if (*prev_cmp != 0) {
2587 *prev_cmp = bin_search(b, key, level, slot);
2591 key_search_validate(b, key, level);
2597 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2598 u64 iobjectid, u64 ioff, u8 key_type,
2599 struct btrfs_key *found_key)
2602 struct btrfs_key key;
2603 struct extent_buffer *eb;
2608 key.type = key_type;
2609 key.objectid = iobjectid;
2612 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2616 eb = path->nodes[0];
2617 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2618 ret = btrfs_next_leaf(fs_root, path);
2621 eb = path->nodes[0];
2624 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2625 if (found_key->type != key.type ||
2626 found_key->objectid != key.objectid)
2633 * look for key in the tree. path is filled in with nodes along the way
2634 * if key is found, we return zero and you can find the item in the leaf
2635 * level of the path (level 0)
2637 * If the key isn't found, the path points to the slot where it should
2638 * be inserted, and 1 is returned. If there are other errors during the
2639 * search a negative error number is returned.
2641 * if ins_len > 0, nodes and leaves will be split as we walk down the
2642 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2645 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2646 *root, struct btrfs_key *key, struct btrfs_path *p, int
2649 struct extent_buffer *b;
2654 int lowest_unlock = 1;
2656 /* everything at write_lock_level or lower must be write locked */
2657 int write_lock_level = 0;
2658 u8 lowest_level = 0;
2659 int min_write_lock_level;
2662 lowest_level = p->lowest_level;
2663 WARN_ON(lowest_level && ins_len > 0);
2664 WARN_ON(p->nodes[0] != NULL);
2665 BUG_ON(!cow && ins_len);
2670 /* when we are removing items, we might have to go up to level
2671 * two as we update tree pointers Make sure we keep write
2672 * for those levels as well
2674 write_lock_level = 2;
2675 } else if (ins_len > 0) {
2677 * for inserting items, make sure we have a write lock on
2678 * level 1 so we can update keys
2680 write_lock_level = 1;
2684 write_lock_level = -1;
2686 if (cow && (p->keep_locks || p->lowest_level))
2687 write_lock_level = BTRFS_MAX_LEVEL;
2689 min_write_lock_level = write_lock_level;
2694 * we try very hard to do read locks on the root
2696 root_lock = BTRFS_READ_LOCK;
2698 if (p->search_commit_root) {
2700 * the commit roots are read only
2701 * so we always do read locks
2703 if (p->need_commit_sem)
2704 down_read(&root->fs_info->commit_root_sem);
2705 b = root->commit_root;
2706 extent_buffer_get(b);
2707 level = btrfs_header_level(b);
2708 if (p->need_commit_sem)
2709 up_read(&root->fs_info->commit_root_sem);
2710 if (!p->skip_locking)
2711 btrfs_tree_read_lock(b);
2713 if (p->skip_locking) {
2714 b = btrfs_root_node(root);
2715 level = btrfs_header_level(b);
2717 /* we don't know the level of the root node
2718 * until we actually have it read locked
2720 b = btrfs_read_lock_root_node(root);
2721 level = btrfs_header_level(b);
2722 if (level <= write_lock_level) {
2723 /* whoops, must trade for write lock */
2724 btrfs_tree_read_unlock(b);
2725 free_extent_buffer(b);
2726 b = btrfs_lock_root_node(root);
2727 root_lock = BTRFS_WRITE_LOCK;
2729 /* the level might have changed, check again */
2730 level = btrfs_header_level(b);
2734 p->nodes[level] = b;
2735 if (!p->skip_locking)
2736 p->locks[level] = root_lock;
2739 level = btrfs_header_level(b);
2742 * setup the path here so we can release it under lock
2743 * contention with the cow code
2746 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2749 * if we don't really need to cow this block
2750 * then we don't want to set the path blocking,
2751 * so we test it here
2753 if (!should_cow_block(trans, root, b)) {
2754 trans->dirty = true;
2759 * must have write locks on this node and the
2762 if (level > write_lock_level ||
2763 (level + 1 > write_lock_level &&
2764 level + 1 < BTRFS_MAX_LEVEL &&
2765 p->nodes[level + 1])) {
2766 write_lock_level = level + 1;
2767 btrfs_release_path(p);
2771 btrfs_set_path_blocking(p);
2773 err = btrfs_cow_block(trans, root, b, NULL, 0,
2776 err = btrfs_cow_block(trans, root, b,
2777 p->nodes[level + 1],
2778 p->slots[level + 1], &b);
2785 p->nodes[level] = b;
2786 btrfs_clear_path_blocking(p, NULL, 0);
2789 * we have a lock on b and as long as we aren't changing
2790 * the tree, there is no way to for the items in b to change.
2791 * It is safe to drop the lock on our parent before we
2792 * go through the expensive btree search on b.
2794 * If we're inserting or deleting (ins_len != 0), then we might
2795 * be changing slot zero, which may require changing the parent.
2796 * So, we can't drop the lock until after we know which slot
2797 * we're operating on.
2799 if (!ins_len && !p->keep_locks) {
2802 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2803 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2808 ret = key_search(b, key, level, &prev_cmp, &slot);
2812 if (ret && slot > 0) {
2816 p->slots[level] = slot;
2817 err = setup_nodes_for_search(trans, root, p, b, level,
2818 ins_len, &write_lock_level);
2825 b = p->nodes[level];
2826 slot = p->slots[level];
2829 * slot 0 is special, if we change the key
2830 * we have to update the parent pointer
2831 * which means we must have a write lock
2834 if (slot == 0 && ins_len &&
2835 write_lock_level < level + 1) {
2836 write_lock_level = level + 1;
2837 btrfs_release_path(p);
2841 unlock_up(p, level, lowest_unlock,
2842 min_write_lock_level, &write_lock_level);
2844 if (level == lowest_level) {
2850 err = read_block_for_search(trans, root, p,
2851 &b, level, slot, key, 0);
2859 if (!p->skip_locking) {
2860 level = btrfs_header_level(b);
2861 if (level <= write_lock_level) {
2862 err = btrfs_try_tree_write_lock(b);
2864 btrfs_set_path_blocking(p);
2866 btrfs_clear_path_blocking(p, b,
2869 p->locks[level] = BTRFS_WRITE_LOCK;
2871 err = btrfs_tree_read_lock_atomic(b);
2873 btrfs_set_path_blocking(p);
2874 btrfs_tree_read_lock(b);
2875 btrfs_clear_path_blocking(p, b,
2878 p->locks[level] = BTRFS_READ_LOCK;
2880 p->nodes[level] = b;
2883 p->slots[level] = slot;
2885 btrfs_leaf_free_space(root, b) < ins_len) {
2886 if (write_lock_level < 1) {
2887 write_lock_level = 1;
2888 btrfs_release_path(p);
2892 btrfs_set_path_blocking(p);
2893 err = split_leaf(trans, root, key,
2894 p, ins_len, ret == 0);
2895 btrfs_clear_path_blocking(p, NULL, 0);
2903 if (!p->search_for_split)
2904 unlock_up(p, level, lowest_unlock,
2905 min_write_lock_level, &write_lock_level);
2912 * we don't really know what they plan on doing with the path
2913 * from here on, so for now just mark it as blocking
2915 if (!p->leave_spinning)
2916 btrfs_set_path_blocking(p);
2917 if (ret < 0 && !p->skip_release_on_error)
2918 btrfs_release_path(p);
2923 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2924 * current state of the tree together with the operations recorded in the tree
2925 * modification log to search for the key in a previous version of this tree, as
2926 * denoted by the time_seq parameter.
2928 * Naturally, there is no support for insert, delete or cow operations.
2930 * The resulting path and return value will be set up as if we called
2931 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2933 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2934 struct btrfs_path *p, u64 time_seq)
2936 struct extent_buffer *b;
2941 int lowest_unlock = 1;
2942 u8 lowest_level = 0;
2945 lowest_level = p->lowest_level;
2946 WARN_ON(p->nodes[0] != NULL);
2948 if (p->search_commit_root) {
2950 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2954 b = get_old_root(root, time_seq);
2959 level = btrfs_header_level(b);
2960 p->locks[level] = BTRFS_READ_LOCK;
2963 level = btrfs_header_level(b);
2964 p->nodes[level] = b;
2965 btrfs_clear_path_blocking(p, NULL, 0);
2968 * we have a lock on b and as long as we aren't changing
2969 * the tree, there is no way to for the items in b to change.
2970 * It is safe to drop the lock on our parent before we
2971 * go through the expensive btree search on b.
2973 btrfs_unlock_up_safe(p, level + 1);
2976 * Since we can unwind eb's we want to do a real search every
2980 ret = key_search(b, key, level, &prev_cmp, &slot);
2984 if (ret && slot > 0) {
2988 p->slots[level] = slot;
2989 unlock_up(p, level, lowest_unlock, 0, NULL);
2991 if (level == lowest_level) {
2997 err = read_block_for_search(NULL, root, p, &b, level,
2998 slot, key, time_seq);
3006 level = btrfs_header_level(b);
3007 err = btrfs_tree_read_lock_atomic(b);
3009 btrfs_set_path_blocking(p);
3010 btrfs_tree_read_lock(b);
3011 btrfs_clear_path_blocking(p, b,
3014 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3019 p->locks[level] = BTRFS_READ_LOCK;
3020 p->nodes[level] = b;
3022 p->slots[level] = slot;
3023 unlock_up(p, level, lowest_unlock, 0, NULL);
3029 if (!p->leave_spinning)
3030 btrfs_set_path_blocking(p);
3032 btrfs_release_path(p);
3038 * helper to use instead of search slot if no exact match is needed but
3039 * instead the next or previous item should be returned.
3040 * When find_higher is true, the next higher item is returned, the next lower
3042 * When return_any and find_higher are both true, and no higher item is found,
3043 * return the next lower instead.
3044 * When return_any is true and find_higher is false, and no lower item is found,
3045 * return the next higher instead.
3046 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3049 int btrfs_search_slot_for_read(struct btrfs_root *root,
3050 struct btrfs_key *key, struct btrfs_path *p,
3051 int find_higher, int return_any)
3054 struct extent_buffer *leaf;
3057 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3061 * a return value of 1 means the path is at the position where the
3062 * item should be inserted. Normally this is the next bigger item,
3063 * but in case the previous item is the last in a leaf, path points
3064 * to the first free slot in the previous leaf, i.e. at an invalid
3070 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3071 ret = btrfs_next_leaf(root, p);
3077 * no higher item found, return the next
3082 btrfs_release_path(p);
3086 if (p->slots[0] == 0) {
3087 ret = btrfs_prev_leaf(root, p);
3092 if (p->slots[0] == btrfs_header_nritems(leaf))
3099 * no lower item found, return the next
3104 btrfs_release_path(p);
3114 * adjust the pointers going up the tree, starting at level
3115 * making sure the right key of each node is points to 'key'.
3116 * This is used after shifting pointers to the left, so it stops
3117 * fixing up pointers when a given leaf/node is not in slot 0 of the
3121 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3122 struct btrfs_path *path,
3123 struct btrfs_disk_key *key, int level)
3126 struct extent_buffer *t;
3128 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3129 int tslot = path->slots[i];
3130 if (!path->nodes[i])
3133 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3134 btrfs_set_node_key(t, key, tslot);
3135 btrfs_mark_buffer_dirty(path->nodes[i]);
3144 * This function isn't completely safe. It's the caller's responsibility
3145 * that the new key won't break the order
3147 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3148 struct btrfs_path *path,
3149 struct btrfs_key *new_key)
3151 struct btrfs_disk_key disk_key;
3152 struct extent_buffer *eb;
3155 eb = path->nodes[0];
3156 slot = path->slots[0];
3158 btrfs_item_key(eb, &disk_key, slot - 1);
3159 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3161 if (slot < btrfs_header_nritems(eb) - 1) {
3162 btrfs_item_key(eb, &disk_key, slot + 1);
3163 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3166 btrfs_cpu_key_to_disk(&disk_key, new_key);
3167 btrfs_set_item_key(eb, &disk_key, slot);
3168 btrfs_mark_buffer_dirty(eb);
3170 fixup_low_keys(fs_info, path, &disk_key, 1);
3174 * try to push data from one node into the next node left in the
3177 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3178 * error, and > 0 if there was no room in the left hand block.
3180 static int push_node_left(struct btrfs_trans_handle *trans,
3181 struct btrfs_root *root, struct extent_buffer *dst,
3182 struct extent_buffer *src, int empty)
3189 src_nritems = btrfs_header_nritems(src);
3190 dst_nritems = btrfs_header_nritems(dst);
3191 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3192 WARN_ON(btrfs_header_generation(src) != trans->transid);
3193 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3195 if (!empty && src_nritems <= 8)
3198 if (push_items <= 0)
3202 push_items = min(src_nritems, push_items);
3203 if (push_items < src_nritems) {
3204 /* leave at least 8 pointers in the node if
3205 * we aren't going to empty it
3207 if (src_nritems - push_items < 8) {
3208 if (push_items <= 8)
3214 push_items = min(src_nritems - 8, push_items);
3216 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3219 btrfs_abort_transaction(trans, root, ret);
3222 copy_extent_buffer(dst, src,
3223 btrfs_node_key_ptr_offset(dst_nritems),
3224 btrfs_node_key_ptr_offset(0),
3225 push_items * sizeof(struct btrfs_key_ptr));
3227 if (push_items < src_nritems) {
3229 * don't call tree_mod_log_eb_move here, key removal was already
3230 * fully logged by tree_mod_log_eb_copy above.
3232 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3233 btrfs_node_key_ptr_offset(push_items),
3234 (src_nritems - push_items) *
3235 sizeof(struct btrfs_key_ptr));
3237 btrfs_set_header_nritems(src, src_nritems - push_items);
3238 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3239 btrfs_mark_buffer_dirty(src);
3240 btrfs_mark_buffer_dirty(dst);
3246 * try to push data from one node into the next node right in the
3249 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3250 * error, and > 0 if there was no room in the right hand block.
3252 * this will only push up to 1/2 the contents of the left node over
3254 static int balance_node_right(struct btrfs_trans_handle *trans,
3255 struct btrfs_root *root,
3256 struct extent_buffer *dst,
3257 struct extent_buffer *src)
3265 WARN_ON(btrfs_header_generation(src) != trans->transid);
3266 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3268 src_nritems = btrfs_header_nritems(src);
3269 dst_nritems = btrfs_header_nritems(dst);
3270 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3271 if (push_items <= 0)
3274 if (src_nritems < 4)
3277 max_push = src_nritems / 2 + 1;
3278 /* don't try to empty the node */
3279 if (max_push >= src_nritems)
3282 if (max_push < push_items)
3283 push_items = max_push;
3285 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3286 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3287 btrfs_node_key_ptr_offset(0),
3289 sizeof(struct btrfs_key_ptr));
3291 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3292 src_nritems - push_items, push_items);
3294 btrfs_abort_transaction(trans, root, ret);
3297 copy_extent_buffer(dst, src,
3298 btrfs_node_key_ptr_offset(0),
3299 btrfs_node_key_ptr_offset(src_nritems - push_items),
3300 push_items * sizeof(struct btrfs_key_ptr));
3302 btrfs_set_header_nritems(src, src_nritems - push_items);
3303 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3305 btrfs_mark_buffer_dirty(src);
3306 btrfs_mark_buffer_dirty(dst);
3312 * helper function to insert a new root level in the tree.
3313 * A new node is allocated, and a single item is inserted to
3314 * point to the existing root
3316 * returns zero on success or < 0 on failure.
3318 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3319 struct btrfs_root *root,
3320 struct btrfs_path *path, int level)
3323 struct extent_buffer *lower;
3324 struct extent_buffer *c;
3325 struct extent_buffer *old;
3326 struct btrfs_disk_key lower_key;
3328 BUG_ON(path->nodes[level]);
3329 BUG_ON(path->nodes[level-1] != root->node);
3331 lower = path->nodes[level-1];
3333 btrfs_item_key(lower, &lower_key, 0);
3335 btrfs_node_key(lower, &lower_key, 0);
3337 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3338 &lower_key, level, root->node->start, 0);
3342 root_add_used(root, root->nodesize);
3344 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3345 btrfs_set_header_nritems(c, 1);
3346 btrfs_set_header_level(c, level);
3347 btrfs_set_header_bytenr(c, c->start);
3348 btrfs_set_header_generation(c, trans->transid);
3349 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3350 btrfs_set_header_owner(c, root->root_key.objectid);
3352 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3355 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3356 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3358 btrfs_set_node_key(c, &lower_key, 0);
3359 btrfs_set_node_blockptr(c, 0, lower->start);
3360 lower_gen = btrfs_header_generation(lower);
3361 WARN_ON(lower_gen != trans->transid);
3363 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3365 btrfs_mark_buffer_dirty(c);
3368 tree_mod_log_set_root_pointer(root, c, 0);
3369 rcu_assign_pointer(root->node, c);
3371 /* the super has an extra ref to root->node */
3372 free_extent_buffer(old);
3374 add_root_to_dirty_list(root);
3375 extent_buffer_get(c);
3376 path->nodes[level] = c;
3377 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3378 path->slots[level] = 0;
3383 * worker function to insert a single pointer in a node.
3384 * the node should have enough room for the pointer already
3386 * slot and level indicate where you want the key to go, and
3387 * blocknr is the block the key points to.
3389 static void insert_ptr(struct btrfs_trans_handle *trans,
3390 struct btrfs_root *root, struct btrfs_path *path,
3391 struct btrfs_disk_key *key, u64 bytenr,
3392 int slot, int level)
3394 struct extent_buffer *lower;
3398 BUG_ON(!path->nodes[level]);
3399 btrfs_assert_tree_locked(path->nodes[level]);
3400 lower = path->nodes[level];
3401 nritems = btrfs_header_nritems(lower);
3402 BUG_ON(slot > nritems);
3403 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3404 if (slot != nritems) {
3406 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3407 slot, nritems - slot);
3408 memmove_extent_buffer(lower,
3409 btrfs_node_key_ptr_offset(slot + 1),
3410 btrfs_node_key_ptr_offset(slot),
3411 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3414 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3415 MOD_LOG_KEY_ADD, GFP_NOFS);
3418 btrfs_set_node_key(lower, key, slot);
3419 btrfs_set_node_blockptr(lower, slot, bytenr);
3420 WARN_ON(trans->transid == 0);
3421 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3422 btrfs_set_header_nritems(lower, nritems + 1);
3423 btrfs_mark_buffer_dirty(lower);
3427 * split the node at the specified level in path in two.
3428 * The path is corrected to point to the appropriate node after the split
3430 * Before splitting this tries to make some room in the node by pushing
3431 * left and right, if either one works, it returns right away.
3433 * returns 0 on success and < 0 on failure
3435 static noinline int split_node(struct btrfs_trans_handle *trans,
3436 struct btrfs_root *root,
3437 struct btrfs_path *path, int level)
3439 struct extent_buffer *c;
3440 struct extent_buffer *split;
3441 struct btrfs_disk_key disk_key;
3446 c = path->nodes[level];
3447 WARN_ON(btrfs_header_generation(c) != trans->transid);
3448 if (c == root->node) {
3450 * trying to split the root, lets make a new one
3452 * tree mod log: We don't log_removal old root in
3453 * insert_new_root, because that root buffer will be kept as a
3454 * normal node. We are going to log removal of half of the
3455 * elements below with tree_mod_log_eb_copy. We're holding a
3456 * tree lock on the buffer, which is why we cannot race with
3457 * other tree_mod_log users.
3459 ret = insert_new_root(trans, root, path, level + 1);
3463 ret = push_nodes_for_insert(trans, root, path, level);
3464 c = path->nodes[level];
3465 if (!ret && btrfs_header_nritems(c) <
3466 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3472 c_nritems = btrfs_header_nritems(c);
3473 mid = (c_nritems + 1) / 2;
3474 btrfs_node_key(c, &disk_key, mid);
3476 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3477 &disk_key, level, c->start, 0);
3479 return PTR_ERR(split);
3481 root_add_used(root, root->nodesize);
3483 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3484 btrfs_set_header_level(split, btrfs_header_level(c));
3485 btrfs_set_header_bytenr(split, split->start);
3486 btrfs_set_header_generation(split, trans->transid);
3487 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3488 btrfs_set_header_owner(split, root->root_key.objectid);
3489 write_extent_buffer(split, root->fs_info->fsid,
3490 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3491 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3492 btrfs_header_chunk_tree_uuid(split),
3495 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3496 mid, c_nritems - mid);
3498 btrfs_abort_transaction(trans, root, ret);
3501 copy_extent_buffer(split, c,
3502 btrfs_node_key_ptr_offset(0),
3503 btrfs_node_key_ptr_offset(mid),
3504 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3505 btrfs_set_header_nritems(split, c_nritems - mid);
3506 btrfs_set_header_nritems(c, mid);
3509 btrfs_mark_buffer_dirty(c);
3510 btrfs_mark_buffer_dirty(split);
3512 insert_ptr(trans, root, path, &disk_key, split->start,
3513 path->slots[level + 1] + 1, level + 1);
3515 if (path->slots[level] >= mid) {
3516 path->slots[level] -= mid;
3517 btrfs_tree_unlock(c);
3518 free_extent_buffer(c);
3519 path->nodes[level] = split;
3520 path->slots[level + 1] += 1;
3522 btrfs_tree_unlock(split);
3523 free_extent_buffer(split);
3529 * how many bytes are required to store the items in a leaf. start
3530 * and nr indicate which items in the leaf to check. This totals up the
3531 * space used both by the item structs and the item data
3533 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3535 struct btrfs_item *start_item;
3536 struct btrfs_item *end_item;
3537 struct btrfs_map_token token;
3539 int nritems = btrfs_header_nritems(l);
3540 int end = min(nritems, start + nr) - 1;
3544 btrfs_init_map_token(&token);
3545 start_item = btrfs_item_nr(start);
3546 end_item = btrfs_item_nr(end);
3547 data_len = btrfs_token_item_offset(l, start_item, &token) +
3548 btrfs_token_item_size(l, start_item, &token);
3549 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3550 data_len += sizeof(struct btrfs_item) * nr;
3551 WARN_ON(data_len < 0);
3556 * The space between the end of the leaf items and
3557 * the start of the leaf data. IOW, how much room
3558 * the leaf has left for both items and data
3560 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3561 struct extent_buffer *leaf)
3563 int nritems = btrfs_header_nritems(leaf);
3565 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3567 btrfs_crit(root->fs_info,
3568 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3569 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3570 leaf_space_used(leaf, 0, nritems), nritems);
3576 * min slot controls the lowest index we're willing to push to the
3577 * right. We'll push up to and including min_slot, but no lower
3579 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3580 struct btrfs_root *root,
3581 struct btrfs_path *path,
3582 int data_size, int empty,
3583 struct extent_buffer *right,
3584 int free_space, u32 left_nritems,
3587 struct extent_buffer *left = path->nodes[0];
3588 struct extent_buffer *upper = path->nodes[1];
3589 struct btrfs_map_token token;
3590 struct btrfs_disk_key disk_key;
3595 struct btrfs_item *item;
3601 btrfs_init_map_token(&token);
3606 nr = max_t(u32, 1, min_slot);
3608 if (path->slots[0] >= left_nritems)
3609 push_space += data_size;
3611 slot = path->slots[1];
3612 i = left_nritems - 1;
3614 item = btrfs_item_nr(i);
3616 if (!empty && push_items > 0) {
3617 if (path->slots[0] > i)
3619 if (path->slots[0] == i) {
3620 int space = btrfs_leaf_free_space(root, left);
3621 if (space + push_space * 2 > free_space)
3626 if (path->slots[0] == i)
3627 push_space += data_size;
3629 this_item_size = btrfs_item_size(left, item);
3630 if (this_item_size + sizeof(*item) + push_space > free_space)
3634 push_space += this_item_size + sizeof(*item);
3640 if (push_items == 0)
3643 WARN_ON(!empty && push_items == left_nritems);
3645 /* push left to right */
3646 right_nritems = btrfs_header_nritems(right);
3648 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3649 push_space -= leaf_data_end(root, left);
3651 /* make room in the right data area */
3652 data_end = leaf_data_end(root, right);
3653 memmove_extent_buffer(right,
3654 btrfs_leaf_data(right) + data_end - push_space,
3655 btrfs_leaf_data(right) + data_end,
3656 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3658 /* copy from the left data area */
3659 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3660 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3661 btrfs_leaf_data(left) + leaf_data_end(root, left),
3664 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3665 btrfs_item_nr_offset(0),
3666 right_nritems * sizeof(struct btrfs_item));
3668 /* copy the items from left to right */
3669 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3670 btrfs_item_nr_offset(left_nritems - push_items),
3671 push_items * sizeof(struct btrfs_item));
3673 /* update the item pointers */
3674 right_nritems += push_items;
3675 btrfs_set_header_nritems(right, right_nritems);
3676 push_space = BTRFS_LEAF_DATA_SIZE(root);
3677 for (i = 0; i < right_nritems; i++) {
3678 item = btrfs_item_nr(i);
3679 push_space -= btrfs_token_item_size(right, item, &token);
3680 btrfs_set_token_item_offset(right, item, push_space, &token);
3683 left_nritems -= push_items;
3684 btrfs_set_header_nritems(left, left_nritems);
3687 btrfs_mark_buffer_dirty(left);
3689 clean_tree_block(trans, root->fs_info, left);
3691 btrfs_mark_buffer_dirty(right);
3693 btrfs_item_key(right, &disk_key, 0);
3694 btrfs_set_node_key(upper, &disk_key, slot + 1);
3695 btrfs_mark_buffer_dirty(upper);
3697 /* then fixup the leaf pointer in the path */
3698 if (path->slots[0] >= left_nritems) {
3699 path->slots[0] -= left_nritems;
3700 if (btrfs_header_nritems(path->nodes[0]) == 0)
3701 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3702 btrfs_tree_unlock(path->nodes[0]);
3703 free_extent_buffer(path->nodes[0]);
3704 path->nodes[0] = right;
3705 path->slots[1] += 1;
3707 btrfs_tree_unlock(right);
3708 free_extent_buffer(right);
3713 btrfs_tree_unlock(right);
3714 free_extent_buffer(right);
3719 * push some data in the path leaf to the right, trying to free up at
3720 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3722 * returns 1 if the push failed because the other node didn't have enough
3723 * room, 0 if everything worked out and < 0 if there were major errors.
3725 * this will push starting from min_slot to the end of the leaf. It won't
3726 * push any slot lower than min_slot
3728 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3729 *root, struct btrfs_path *path,
3730 int min_data_size, int data_size,
3731 int empty, u32 min_slot)
3733 struct extent_buffer *left = path->nodes[0];
3734 struct extent_buffer *right;
3735 struct extent_buffer *upper;
3741 if (!path->nodes[1])
3744 slot = path->slots[1];
3745 upper = path->nodes[1];
3746 if (slot >= btrfs_header_nritems(upper) - 1)
3749 btrfs_assert_tree_locked(path->nodes[1]);
3751 right = read_node_slot(root, upper, slot + 1);
3755 btrfs_tree_lock(right);
3756 btrfs_set_lock_blocking(right);
3758 free_space = btrfs_leaf_free_space(root, right);
3759 if (free_space < data_size)
3762 /* cow and double check */
3763 ret = btrfs_cow_block(trans, root, right, upper,
3768 free_space = btrfs_leaf_free_space(root, right);
3769 if (free_space < data_size)
3772 left_nritems = btrfs_header_nritems(left);
3773 if (left_nritems == 0)
3776 if (path->slots[0] == left_nritems && !empty) {
3777 /* Key greater than all keys in the leaf, right neighbor has
3778 * enough room for it and we're not emptying our leaf to delete
3779 * it, therefore use right neighbor to insert the new item and
3780 * no need to touch/dirty our left leaft. */
3781 btrfs_tree_unlock(left);
3782 free_extent_buffer(left);
3783 path->nodes[0] = right;
3789 return __push_leaf_right(trans, root, path, min_data_size, empty,
3790 right, free_space, left_nritems, min_slot);
3792 btrfs_tree_unlock(right);
3793 free_extent_buffer(right);
3798 * push some data in the path leaf to the left, trying to free up at
3799 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3801 * max_slot can put a limit on how far into the leaf we'll push items. The
3802 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3805 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3806 struct btrfs_root *root,
3807 struct btrfs_path *path, int data_size,
3808 int empty, struct extent_buffer *left,
3809 int free_space, u32 right_nritems,
3812 struct btrfs_disk_key disk_key;
3813 struct extent_buffer *right = path->nodes[0];
3817 struct btrfs_item *item;
3818 u32 old_left_nritems;
3822 u32 old_left_item_size;
3823 struct btrfs_map_token token;
3825 btrfs_init_map_token(&token);
3828 nr = min(right_nritems, max_slot);
3830 nr = min(right_nritems - 1, max_slot);
3832 for (i = 0; i < nr; i++) {
3833 item = btrfs_item_nr(i);
3835 if (!empty && push_items > 0) {
3836 if (path->slots[0] < i)
3838 if (path->slots[0] == i) {
3839 int space = btrfs_leaf_free_space(root, right);
3840 if (space + push_space * 2 > free_space)
3845 if (path->slots[0] == i)
3846 push_space += data_size;
3848 this_item_size = btrfs_item_size(right, item);
3849 if (this_item_size + sizeof(*item) + push_space > free_space)
3853 push_space += this_item_size + sizeof(*item);
3856 if (push_items == 0) {
3860 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3862 /* push data from right to left */
3863 copy_extent_buffer(left, right,
3864 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3865 btrfs_item_nr_offset(0),
3866 push_items * sizeof(struct btrfs_item));
3868 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3869 btrfs_item_offset_nr(right, push_items - 1);
3871 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3872 leaf_data_end(root, left) - push_space,
3873 btrfs_leaf_data(right) +
3874 btrfs_item_offset_nr(right, push_items - 1),
3876 old_left_nritems = btrfs_header_nritems(left);
3877 BUG_ON(old_left_nritems <= 0);
3879 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3880 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3883 item = btrfs_item_nr(i);
3885 ioff = btrfs_token_item_offset(left, item, &token);
3886 btrfs_set_token_item_offset(left, item,
3887 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3890 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3892 /* fixup right node */
3893 if (push_items > right_nritems)
3894 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3897 if (push_items < right_nritems) {
3898 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3899 leaf_data_end(root, right);
3900 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3901 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3902 btrfs_leaf_data(right) +
3903 leaf_data_end(root, right), push_space);
3905 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3906 btrfs_item_nr_offset(push_items),
3907 (btrfs_header_nritems(right) - push_items) *
3908 sizeof(struct btrfs_item));
3910 right_nritems -= push_items;
3911 btrfs_set_header_nritems(right, right_nritems);
3912 push_space = BTRFS_LEAF_DATA_SIZE(root);
3913 for (i = 0; i < right_nritems; i++) {
3914 item = btrfs_item_nr(i);
3916 push_space = push_space - btrfs_token_item_size(right,
3918 btrfs_set_token_item_offset(right, item, push_space, &token);
3921 btrfs_mark_buffer_dirty(left);
3923 btrfs_mark_buffer_dirty(right);
3925 clean_tree_block(trans, root->fs_info, right);
3927 btrfs_item_key(right, &disk_key, 0);
3928 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3930 /* then fixup the leaf pointer in the path */
3931 if (path->slots[0] < push_items) {
3932 path->slots[0] += old_left_nritems;
3933 btrfs_tree_unlock(path->nodes[0]);
3934 free_extent_buffer(path->nodes[0]);
3935 path->nodes[0] = left;
3936 path->slots[1] -= 1;
3938 btrfs_tree_unlock(left);
3939 free_extent_buffer(left);
3940 path->slots[0] -= push_items;
3942 BUG_ON(path->slots[0] < 0);
3945 btrfs_tree_unlock(left);
3946 free_extent_buffer(left);
3951 * push some data in the path leaf to the left, trying to free up at
3952 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3954 * max_slot can put a limit on how far into the leaf we'll push items. The
3955 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3958 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3959 *root, struct btrfs_path *path, int min_data_size,
3960 int data_size, int empty, u32 max_slot)
3962 struct extent_buffer *right = path->nodes[0];
3963 struct extent_buffer *left;
3969 slot = path->slots[1];
3972 if (!path->nodes[1])
3975 right_nritems = btrfs_header_nritems(right);
3976 if (right_nritems == 0)
3979 btrfs_assert_tree_locked(path->nodes[1]);
3981 left = read_node_slot(root, path->nodes[1], slot - 1);
3985 btrfs_tree_lock(left);
3986 btrfs_set_lock_blocking(left);
3988 free_space = btrfs_leaf_free_space(root, left);
3989 if (free_space < data_size) {
3994 /* cow and double check */
3995 ret = btrfs_cow_block(trans, root, left,
3996 path->nodes[1], slot - 1, &left);
3998 /* we hit -ENOSPC, but it isn't fatal here */
4004 free_space = btrfs_leaf_free_space(root, left);
4005 if (free_space < data_size) {
4010 return __push_leaf_left(trans, root, path, min_data_size,
4011 empty, left, free_space, right_nritems,
4014 btrfs_tree_unlock(left);
4015 free_extent_buffer(left);
4020 * split the path's leaf in two, making sure there is at least data_size
4021 * available for the resulting leaf level of the path.
4023 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4024 struct btrfs_root *root,
4025 struct btrfs_path *path,
4026 struct extent_buffer *l,
4027 struct extent_buffer *right,
4028 int slot, int mid, int nritems)
4033 struct btrfs_disk_key disk_key;
4034 struct btrfs_map_token token;
4036 btrfs_init_map_token(&token);
4038 nritems = nritems - mid;
4039 btrfs_set_header_nritems(right, nritems);
4040 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4042 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4043 btrfs_item_nr_offset(mid),
4044 nritems * sizeof(struct btrfs_item));
4046 copy_extent_buffer(right, l,
4047 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4048 data_copy_size, btrfs_leaf_data(l) +
4049 leaf_data_end(root, l), data_copy_size);
4051 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4052 btrfs_item_end_nr(l, mid);
4054 for (i = 0; i < nritems; i++) {
4055 struct btrfs_item *item = btrfs_item_nr(i);
4058 ioff = btrfs_token_item_offset(right, item, &token);
4059 btrfs_set_token_item_offset(right, item,
4060 ioff + rt_data_off, &token);
4063 btrfs_set_header_nritems(l, mid);
4064 btrfs_item_key(right, &disk_key, 0);
4065 insert_ptr(trans, root, path, &disk_key, right->start,
4066 path->slots[1] + 1, 1);
4068 btrfs_mark_buffer_dirty(right);
4069 btrfs_mark_buffer_dirty(l);
4070 BUG_ON(path->slots[0] != slot);
4073 btrfs_tree_unlock(path->nodes[0]);
4074 free_extent_buffer(path->nodes[0]);
4075 path->nodes[0] = right;
4076 path->slots[0] -= mid;
4077 path->slots[1] += 1;
4079 btrfs_tree_unlock(right);
4080 free_extent_buffer(right);
4083 BUG_ON(path->slots[0] < 0);
4087 * double splits happen when we need to insert a big item in the middle
4088 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4089 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4092 * We avoid this by trying to push the items on either side of our target
4093 * into the adjacent leaves. If all goes well we can avoid the double split
4096 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4097 struct btrfs_root *root,
4098 struct btrfs_path *path,
4105 int space_needed = data_size;
4107 slot = path->slots[0];
4108 if (slot < btrfs_header_nritems(path->nodes[0]))
4109 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4112 * try to push all the items after our slot into the
4115 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4122 nritems = btrfs_header_nritems(path->nodes[0]);
4124 * our goal is to get our slot at the start or end of a leaf. If
4125 * we've done so we're done
4127 if (path->slots[0] == 0 || path->slots[0] == nritems)
4130 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4133 /* try to push all the items before our slot into the next leaf */
4134 slot = path->slots[0];
4135 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4148 * split the path's leaf in two, making sure there is at least data_size
4149 * available for the resulting leaf level of the path.
4151 * returns 0 if all went well and < 0 on failure.
4153 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4154 struct btrfs_root *root,
4155 struct btrfs_key *ins_key,
4156 struct btrfs_path *path, int data_size,
4159 struct btrfs_disk_key disk_key;
4160 struct extent_buffer *l;
4164 struct extent_buffer *right;
4165 struct btrfs_fs_info *fs_info = root->fs_info;
4169 int num_doubles = 0;
4170 int tried_avoid_double = 0;
4173 slot = path->slots[0];
4174 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4175 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4178 /* first try to make some room by pushing left and right */
4179 if (data_size && path->nodes[1]) {
4180 int space_needed = data_size;
4182 if (slot < btrfs_header_nritems(l))
4183 space_needed -= btrfs_leaf_free_space(root, l);
4185 wret = push_leaf_right(trans, root, path, space_needed,
4186 space_needed, 0, 0);
4190 wret = push_leaf_left(trans, root, path, space_needed,
4191 space_needed, 0, (u32)-1);
4197 /* did the pushes work? */
4198 if (btrfs_leaf_free_space(root, l) >= data_size)
4202 if (!path->nodes[1]) {
4203 ret = insert_new_root(trans, root, path, 1);
4210 slot = path->slots[0];
4211 nritems = btrfs_header_nritems(l);
4212 mid = (nritems + 1) / 2;
4216 leaf_space_used(l, mid, nritems - mid) + data_size >
4217 BTRFS_LEAF_DATA_SIZE(root)) {
4218 if (slot >= nritems) {
4222 if (mid != nritems &&
4223 leaf_space_used(l, mid, nritems - mid) +
4224 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4225 if (data_size && !tried_avoid_double)
4226 goto push_for_double;
4232 if (leaf_space_used(l, 0, mid) + data_size >
4233 BTRFS_LEAF_DATA_SIZE(root)) {
4234 if (!extend && data_size && slot == 0) {
4236 } else if ((extend || !data_size) && slot == 0) {
4240 if (mid != nritems &&
4241 leaf_space_used(l, mid, nritems - mid) +
4242 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4243 if (data_size && !tried_avoid_double)
4244 goto push_for_double;
4252 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4254 btrfs_item_key(l, &disk_key, mid);
4256 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4257 &disk_key, 0, l->start, 0);
4259 return PTR_ERR(right);
4261 root_add_used(root, root->nodesize);
4263 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4264 btrfs_set_header_bytenr(right, right->start);
4265 btrfs_set_header_generation(right, trans->transid);
4266 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4267 btrfs_set_header_owner(right, root->root_key.objectid);
4268 btrfs_set_header_level(right, 0);
4269 write_extent_buffer(right, fs_info->fsid,
4270 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4272 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4273 btrfs_header_chunk_tree_uuid(right),
4278 btrfs_set_header_nritems(right, 0);
4279 insert_ptr(trans, root, path, &disk_key, right->start,
4280 path->slots[1] + 1, 1);
4281 btrfs_tree_unlock(path->nodes[0]);
4282 free_extent_buffer(path->nodes[0]);
4283 path->nodes[0] = right;
4285 path->slots[1] += 1;
4287 btrfs_set_header_nritems(right, 0);
4288 insert_ptr(trans, root, path, &disk_key, right->start,
4290 btrfs_tree_unlock(path->nodes[0]);
4291 free_extent_buffer(path->nodes[0]);
4292 path->nodes[0] = right;
4294 if (path->slots[1] == 0)
4295 fixup_low_keys(fs_info, path, &disk_key, 1);
4297 btrfs_mark_buffer_dirty(right);
4301 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4304 BUG_ON(num_doubles != 0);
4312 push_for_double_split(trans, root, path, data_size);
4313 tried_avoid_double = 1;
4314 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4319 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4320 struct btrfs_root *root,
4321 struct btrfs_path *path, int ins_len)
4323 struct btrfs_key key;
4324 struct extent_buffer *leaf;
4325 struct btrfs_file_extent_item *fi;
4330 leaf = path->nodes[0];
4331 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4333 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4334 key.type != BTRFS_EXTENT_CSUM_KEY);
4336 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4339 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4340 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4341 fi = btrfs_item_ptr(leaf, path->slots[0],
4342 struct btrfs_file_extent_item);
4343 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4345 btrfs_release_path(path);
4347 path->keep_locks = 1;
4348 path->search_for_split = 1;
4349 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4350 path->search_for_split = 0;
4357 leaf = path->nodes[0];
4358 /* if our item isn't there, return now */
4359 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4362 /* the leaf has changed, it now has room. return now */
4363 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4366 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4367 fi = btrfs_item_ptr(leaf, path->slots[0],
4368 struct btrfs_file_extent_item);
4369 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4373 btrfs_set_path_blocking(path);
4374 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4378 path->keep_locks = 0;
4379 btrfs_unlock_up_safe(path, 1);
4382 path->keep_locks = 0;
4386 static noinline int split_item(struct btrfs_trans_handle *trans,
4387 struct btrfs_root *root,
4388 struct btrfs_path *path,
4389 struct btrfs_key *new_key,
4390 unsigned long split_offset)
4392 struct extent_buffer *leaf;
4393 struct btrfs_item *item;
4394 struct btrfs_item *new_item;
4400 struct btrfs_disk_key disk_key;
4402 leaf = path->nodes[0];
4403 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4405 btrfs_set_path_blocking(path);
4407 item = btrfs_item_nr(path->slots[0]);
4408 orig_offset = btrfs_item_offset(leaf, item);
4409 item_size = btrfs_item_size(leaf, item);
4411 buf = kmalloc(item_size, GFP_NOFS);
4415 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4416 path->slots[0]), item_size);
4418 slot = path->slots[0] + 1;
4419 nritems = btrfs_header_nritems(leaf);
4420 if (slot != nritems) {
4421 /* shift the items */
4422 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4423 btrfs_item_nr_offset(slot),
4424 (nritems - slot) * sizeof(struct btrfs_item));
4427 btrfs_cpu_key_to_disk(&disk_key, new_key);
4428 btrfs_set_item_key(leaf, &disk_key, slot);
4430 new_item = btrfs_item_nr(slot);
4432 btrfs_set_item_offset(leaf, new_item, orig_offset);
4433 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4435 btrfs_set_item_offset(leaf, item,
4436 orig_offset + item_size - split_offset);
4437 btrfs_set_item_size(leaf, item, split_offset);
4439 btrfs_set_header_nritems(leaf, nritems + 1);
4441 /* write the data for the start of the original item */
4442 write_extent_buffer(leaf, buf,
4443 btrfs_item_ptr_offset(leaf, path->slots[0]),
4446 /* write the data for the new item */
4447 write_extent_buffer(leaf, buf + split_offset,
4448 btrfs_item_ptr_offset(leaf, slot),
4449 item_size - split_offset);
4450 btrfs_mark_buffer_dirty(leaf);
4452 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4458 * This function splits a single item into two items,
4459 * giving 'new_key' to the new item and splitting the
4460 * old one at split_offset (from the start of the item).
4462 * The path may be released by this operation. After
4463 * the split, the path is pointing to the old item. The
4464 * new item is going to be in the same node as the old one.
4466 * Note, the item being split must be smaller enough to live alone on
4467 * a tree block with room for one extra struct btrfs_item
4469 * This allows us to split the item in place, keeping a lock on the
4470 * leaf the entire time.
4472 int btrfs_split_item(struct btrfs_trans_handle *trans,
4473 struct btrfs_root *root,
4474 struct btrfs_path *path,
4475 struct btrfs_key *new_key,
4476 unsigned long split_offset)
4479 ret = setup_leaf_for_split(trans, root, path,
4480 sizeof(struct btrfs_item));
4484 ret = split_item(trans, root, path, new_key, split_offset);
4489 * This function duplicate a item, giving 'new_key' to the new item.
4490 * It guarantees both items live in the same tree leaf and the new item
4491 * is contiguous with the original item.
4493 * This allows us to split file extent in place, keeping a lock on the
4494 * leaf the entire time.
4496 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4497 struct btrfs_root *root,
4498 struct btrfs_path *path,
4499 struct btrfs_key *new_key)
4501 struct extent_buffer *leaf;
4505 leaf = path->nodes[0];
4506 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4507 ret = setup_leaf_for_split(trans, root, path,
4508 item_size + sizeof(struct btrfs_item));
4513 setup_items_for_insert(root, path, new_key, &item_size,
4514 item_size, item_size +
4515 sizeof(struct btrfs_item), 1);
4516 leaf = path->nodes[0];
4517 memcpy_extent_buffer(leaf,
4518 btrfs_item_ptr_offset(leaf, path->slots[0]),
4519 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4525 * make the item pointed to by the path smaller. new_size indicates
4526 * how small to make it, and from_end tells us if we just chop bytes
4527 * off the end of the item or if we shift the item to chop bytes off
4530 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4531 u32 new_size, int from_end)
4534 struct extent_buffer *leaf;
4535 struct btrfs_item *item;
4537 unsigned int data_end;
4538 unsigned int old_data_start;
4539 unsigned int old_size;
4540 unsigned int size_diff;
4542 struct btrfs_map_token token;
4544 btrfs_init_map_token(&token);
4546 leaf = path->nodes[0];
4547 slot = path->slots[0];
4549 old_size = btrfs_item_size_nr(leaf, slot);
4550 if (old_size == new_size)
4553 nritems = btrfs_header_nritems(leaf);
4554 data_end = leaf_data_end(root, leaf);
4556 old_data_start = btrfs_item_offset_nr(leaf, slot);
4558 size_diff = old_size - new_size;
4561 BUG_ON(slot >= nritems);
4564 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4566 /* first correct the data pointers */
4567 for (i = slot; i < nritems; i++) {
4569 item = btrfs_item_nr(i);
4571 ioff = btrfs_token_item_offset(leaf, item, &token);
4572 btrfs_set_token_item_offset(leaf, item,
4573 ioff + size_diff, &token);
4576 /* shift the data */
4578 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4579 data_end + size_diff, btrfs_leaf_data(leaf) +
4580 data_end, old_data_start + new_size - data_end);
4582 struct btrfs_disk_key disk_key;
4585 btrfs_item_key(leaf, &disk_key, slot);
4587 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4589 struct btrfs_file_extent_item *fi;
4591 fi = btrfs_item_ptr(leaf, slot,
4592 struct btrfs_file_extent_item);
4593 fi = (struct btrfs_file_extent_item *)(
4594 (unsigned long)fi - size_diff);
4596 if (btrfs_file_extent_type(leaf, fi) ==
4597 BTRFS_FILE_EXTENT_INLINE) {
4598 ptr = btrfs_item_ptr_offset(leaf, slot);
4599 memmove_extent_buffer(leaf, ptr,
4601 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4605 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4606 data_end + size_diff, btrfs_leaf_data(leaf) +
4607 data_end, old_data_start - data_end);
4609 offset = btrfs_disk_key_offset(&disk_key);
4610 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4611 btrfs_set_item_key(leaf, &disk_key, slot);
4613 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4616 item = btrfs_item_nr(slot);
4617 btrfs_set_item_size(leaf, item, new_size);
4618 btrfs_mark_buffer_dirty(leaf);
4620 if (btrfs_leaf_free_space(root, leaf) < 0) {
4621 btrfs_print_leaf(root, leaf);
4627 * make the item pointed to by the path bigger, data_size is the added size.
4629 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4633 struct extent_buffer *leaf;
4634 struct btrfs_item *item;
4636 unsigned int data_end;
4637 unsigned int old_data;
4638 unsigned int old_size;
4640 struct btrfs_map_token token;
4642 btrfs_init_map_token(&token);
4644 leaf = path->nodes[0];
4646 nritems = btrfs_header_nritems(leaf);
4647 data_end = leaf_data_end(root, leaf);
4649 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4650 btrfs_print_leaf(root, leaf);
4653 slot = path->slots[0];
4654 old_data = btrfs_item_end_nr(leaf, slot);
4657 if (slot >= nritems) {
4658 btrfs_print_leaf(root, leaf);
4659 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4665 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4667 /* first correct the data pointers */
4668 for (i = slot; i < nritems; i++) {
4670 item = btrfs_item_nr(i);
4672 ioff = btrfs_token_item_offset(leaf, item, &token);
4673 btrfs_set_token_item_offset(leaf, item,
4674 ioff - data_size, &token);
4677 /* shift the data */
4678 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4679 data_end - data_size, btrfs_leaf_data(leaf) +
4680 data_end, old_data - data_end);
4682 data_end = old_data;
4683 old_size = btrfs_item_size_nr(leaf, slot);
4684 item = btrfs_item_nr(slot);
4685 btrfs_set_item_size(leaf, item, old_size + data_size);
4686 btrfs_mark_buffer_dirty(leaf);
4688 if (btrfs_leaf_free_space(root, leaf) < 0) {
4689 btrfs_print_leaf(root, leaf);
4695 * this is a helper for btrfs_insert_empty_items, the main goal here is
4696 * to save stack depth by doing the bulk of the work in a function
4697 * that doesn't call btrfs_search_slot
4699 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4700 struct btrfs_key *cpu_key, u32 *data_size,
4701 u32 total_data, u32 total_size, int nr)
4703 struct btrfs_item *item;
4706 unsigned int data_end;
4707 struct btrfs_disk_key disk_key;
4708 struct extent_buffer *leaf;
4710 struct btrfs_map_token token;
4712 if (path->slots[0] == 0) {
4713 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4714 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4716 btrfs_unlock_up_safe(path, 1);
4718 btrfs_init_map_token(&token);
4720 leaf = path->nodes[0];
4721 slot = path->slots[0];
4723 nritems = btrfs_header_nritems(leaf);
4724 data_end = leaf_data_end(root, leaf);
4726 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4727 btrfs_print_leaf(root, leaf);
4728 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4729 total_size, btrfs_leaf_free_space(root, leaf));
4733 if (slot != nritems) {
4734 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4736 if (old_data < data_end) {
4737 btrfs_print_leaf(root, leaf);
4738 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4739 slot, old_data, data_end);
4743 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4745 /* first correct the data pointers */
4746 for (i = slot; i < nritems; i++) {
4749 item = btrfs_item_nr( i);
4750 ioff = btrfs_token_item_offset(leaf, item, &token);
4751 btrfs_set_token_item_offset(leaf, item,
4752 ioff - total_data, &token);
4754 /* shift the items */
4755 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4756 btrfs_item_nr_offset(slot),
4757 (nritems - slot) * sizeof(struct btrfs_item));
4759 /* shift the data */
4760 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4761 data_end - total_data, btrfs_leaf_data(leaf) +
4762 data_end, old_data - data_end);
4763 data_end = old_data;
4766 /* setup the item for the new data */
4767 for (i = 0; i < nr; i++) {
4768 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4769 btrfs_set_item_key(leaf, &disk_key, slot + i);
4770 item = btrfs_item_nr(slot + i);
4771 btrfs_set_token_item_offset(leaf, item,
4772 data_end - data_size[i], &token);
4773 data_end -= data_size[i];
4774 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4777 btrfs_set_header_nritems(leaf, nritems + nr);
4778 btrfs_mark_buffer_dirty(leaf);
4780 if (btrfs_leaf_free_space(root, leaf) < 0) {
4781 btrfs_print_leaf(root, leaf);
4787 * Given a key and some data, insert items into the tree.
4788 * This does all the path init required, making room in the tree if needed.
4790 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4791 struct btrfs_root *root,
4792 struct btrfs_path *path,
4793 struct btrfs_key *cpu_key, u32 *data_size,
4802 for (i = 0; i < nr; i++)
4803 total_data += data_size[i];
4805 total_size = total_data + (nr * sizeof(struct btrfs_item));
4806 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4812 slot = path->slots[0];
4815 setup_items_for_insert(root, path, cpu_key, data_size,
4816 total_data, total_size, nr);
4821 * Given a key and some data, insert an item into the tree.
4822 * This does all the path init required, making room in the tree if needed.
4824 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4825 *root, struct btrfs_key *cpu_key, void *data, u32
4829 struct btrfs_path *path;
4830 struct extent_buffer *leaf;
4833 path = btrfs_alloc_path();
4836 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4838 leaf = path->nodes[0];
4839 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4840 write_extent_buffer(leaf, data, ptr, data_size);
4841 btrfs_mark_buffer_dirty(leaf);
4843 btrfs_free_path(path);
4848 * delete the pointer from a given node.
4850 * the tree should have been previously balanced so the deletion does not
4853 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4854 int level, int slot)
4856 struct extent_buffer *parent = path->nodes[level];
4860 nritems = btrfs_header_nritems(parent);
4861 if (slot != nritems - 1) {
4863 tree_mod_log_eb_move(root->fs_info, parent, slot,
4864 slot + 1, nritems - slot - 1);
4865 memmove_extent_buffer(parent,
4866 btrfs_node_key_ptr_offset(slot),
4867 btrfs_node_key_ptr_offset(slot + 1),
4868 sizeof(struct btrfs_key_ptr) *
4869 (nritems - slot - 1));
4871 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4872 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4877 btrfs_set_header_nritems(parent, nritems);
4878 if (nritems == 0 && parent == root->node) {
4879 BUG_ON(btrfs_header_level(root->node) != 1);
4880 /* just turn the root into a leaf and break */
4881 btrfs_set_header_level(root->node, 0);
4882 } else if (slot == 0) {
4883 struct btrfs_disk_key disk_key;
4885 btrfs_node_key(parent, &disk_key, 0);
4886 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4888 btrfs_mark_buffer_dirty(parent);
4892 * a helper function to delete the leaf pointed to by path->slots[1] and
4895 * This deletes the pointer in path->nodes[1] and frees the leaf
4896 * block extent. zero is returned if it all worked out, < 0 otherwise.
4898 * The path must have already been setup for deleting the leaf, including
4899 * all the proper balancing. path->nodes[1] must be locked.
4901 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4902 struct btrfs_root *root,
4903 struct btrfs_path *path,
4904 struct extent_buffer *leaf)
4906 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4907 del_ptr(root, path, 1, path->slots[1]);
4910 * btrfs_free_extent is expensive, we want to make sure we
4911 * aren't holding any locks when we call it
4913 btrfs_unlock_up_safe(path, 0);
4915 root_sub_used(root, leaf->len);
4917 extent_buffer_get(leaf);
4918 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4919 free_extent_buffer_stale(leaf);
4922 * delete the item at the leaf level in path. If that empties
4923 * the leaf, remove it from the tree
4925 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4926 struct btrfs_path *path, int slot, int nr)
4928 struct extent_buffer *leaf;
4929 struct btrfs_item *item;
4936 struct btrfs_map_token token;
4938 btrfs_init_map_token(&token);
4940 leaf = path->nodes[0];
4941 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4943 for (i = 0; i < nr; i++)
4944 dsize += btrfs_item_size_nr(leaf, slot + i);
4946 nritems = btrfs_header_nritems(leaf);
4948 if (slot + nr != nritems) {
4949 int data_end = leaf_data_end(root, leaf);
4951 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4953 btrfs_leaf_data(leaf) + data_end,
4954 last_off - data_end);
4956 for (i = slot + nr; i < nritems; i++) {
4959 item = btrfs_item_nr(i);
4960 ioff = btrfs_token_item_offset(leaf, item, &token);
4961 btrfs_set_token_item_offset(leaf, item,
4962 ioff + dsize, &token);
4965 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4966 btrfs_item_nr_offset(slot + nr),
4967 sizeof(struct btrfs_item) *
4968 (nritems - slot - nr));
4970 btrfs_set_header_nritems(leaf, nritems - nr);
4973 /* delete the leaf if we've emptied it */
4975 if (leaf == root->node) {
4976 btrfs_set_header_level(leaf, 0);
4978 btrfs_set_path_blocking(path);
4979 clean_tree_block(trans, root->fs_info, leaf);
4980 btrfs_del_leaf(trans, root, path, leaf);
4983 int used = leaf_space_used(leaf, 0, nritems);
4985 struct btrfs_disk_key disk_key;
4987 btrfs_item_key(leaf, &disk_key, 0);
4988 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4991 /* delete the leaf if it is mostly empty */
4992 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4993 /* push_leaf_left fixes the path.
4994 * make sure the path still points to our leaf
4995 * for possible call to del_ptr below
4997 slot = path->slots[1];
4998 extent_buffer_get(leaf);
5000 btrfs_set_path_blocking(path);
5001 wret = push_leaf_left(trans, root, path, 1, 1,
5003 if (wret < 0 && wret != -ENOSPC)
5006 if (path->nodes[0] == leaf &&
5007 btrfs_header_nritems(leaf)) {
5008 wret = push_leaf_right(trans, root, path, 1,
5010 if (wret < 0 && wret != -ENOSPC)
5014 if (btrfs_header_nritems(leaf) == 0) {
5015 path->slots[1] = slot;
5016 btrfs_del_leaf(trans, root, path, leaf);
5017 free_extent_buffer(leaf);
5020 /* if we're still in the path, make sure
5021 * we're dirty. Otherwise, one of the
5022 * push_leaf functions must have already
5023 * dirtied this buffer
5025 if (path->nodes[0] == leaf)
5026 btrfs_mark_buffer_dirty(leaf);
5027 free_extent_buffer(leaf);
5030 btrfs_mark_buffer_dirty(leaf);
5037 * search the tree again to find a leaf with lesser keys
5038 * returns 0 if it found something or 1 if there are no lesser leaves.
5039 * returns < 0 on io errors.
5041 * This may release the path, and so you may lose any locks held at the
5044 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5046 struct btrfs_key key;
5047 struct btrfs_disk_key found_key;
5050 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5052 if (key.offset > 0) {
5054 } else if (key.type > 0) {
5056 key.offset = (u64)-1;
5057 } else if (key.objectid > 0) {
5060 key.offset = (u64)-1;
5065 btrfs_release_path(path);
5066 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5069 btrfs_item_key(path->nodes[0], &found_key, 0);
5070 ret = comp_keys(&found_key, &key);
5072 * We might have had an item with the previous key in the tree right
5073 * before we released our path. And after we released our path, that
5074 * item might have been pushed to the first slot (0) of the leaf we
5075 * were holding due to a tree balance. Alternatively, an item with the
5076 * previous key can exist as the only element of a leaf (big fat item).
5077 * Therefore account for these 2 cases, so that our callers (like
5078 * btrfs_previous_item) don't miss an existing item with a key matching
5079 * the previous key we computed above.
5087 * A helper function to walk down the tree starting at min_key, and looking
5088 * for nodes or leaves that are have a minimum transaction id.
5089 * This is used by the btree defrag code, and tree logging
5091 * This does not cow, but it does stuff the starting key it finds back
5092 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5093 * key and get a writable path.
5095 * This does lock as it descends, and path->keep_locks should be set
5096 * to 1 by the caller.
5098 * This honors path->lowest_level to prevent descent past a given level
5101 * min_trans indicates the oldest transaction that you are interested
5102 * in walking through. Any nodes or leaves older than min_trans are
5103 * skipped over (without reading them).
5105 * returns zero if something useful was found, < 0 on error and 1 if there
5106 * was nothing in the tree that matched the search criteria.
5108 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5109 struct btrfs_path *path,
5112 struct extent_buffer *cur;
5113 struct btrfs_key found_key;
5119 int keep_locks = path->keep_locks;
5121 path->keep_locks = 1;
5123 cur = btrfs_read_lock_root_node(root);
5124 level = btrfs_header_level(cur);
5125 WARN_ON(path->nodes[level]);
5126 path->nodes[level] = cur;
5127 path->locks[level] = BTRFS_READ_LOCK;
5129 if (btrfs_header_generation(cur) < min_trans) {
5134 nritems = btrfs_header_nritems(cur);
5135 level = btrfs_header_level(cur);
5136 sret = bin_search(cur, min_key, level, &slot);
5138 /* at the lowest level, we're done, setup the path and exit */
5139 if (level == path->lowest_level) {
5140 if (slot >= nritems)
5143 path->slots[level] = slot;
5144 btrfs_item_key_to_cpu(cur, &found_key, slot);
5147 if (sret && slot > 0)
5150 * check this node pointer against the min_trans parameters.
5151 * If it is too old, old, skip to the next one.
5153 while (slot < nritems) {
5156 gen = btrfs_node_ptr_generation(cur, slot);
5157 if (gen < min_trans) {
5165 * we didn't find a candidate key in this node, walk forward
5166 * and find another one
5168 if (slot >= nritems) {
5169 path->slots[level] = slot;
5170 btrfs_set_path_blocking(path);
5171 sret = btrfs_find_next_key(root, path, min_key, level,
5174 btrfs_release_path(path);
5180 /* save our key for returning back */
5181 btrfs_node_key_to_cpu(cur, &found_key, slot);
5182 path->slots[level] = slot;
5183 if (level == path->lowest_level) {
5187 btrfs_set_path_blocking(path);
5188 cur = read_node_slot(root, cur, slot);
5189 BUG_ON(!cur); /* -ENOMEM */
5191 btrfs_tree_read_lock(cur);
5193 path->locks[level - 1] = BTRFS_READ_LOCK;
5194 path->nodes[level - 1] = cur;
5195 unlock_up(path, level, 1, 0, NULL);
5196 btrfs_clear_path_blocking(path, NULL, 0);
5199 path->keep_locks = keep_locks;
5201 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5202 btrfs_set_path_blocking(path);
5203 memcpy(min_key, &found_key, sizeof(found_key));
5208 static void tree_move_down(struct btrfs_root *root,
5209 struct btrfs_path *path,
5210 int *level, int root_level)
5212 BUG_ON(*level == 0);
5213 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5214 path->slots[*level]);
5215 path->slots[*level - 1] = 0;
5219 static int tree_move_next_or_upnext(struct btrfs_root *root,
5220 struct btrfs_path *path,
5221 int *level, int root_level)
5225 nritems = btrfs_header_nritems(path->nodes[*level]);
5227 path->slots[*level]++;
5229 while (path->slots[*level] >= nritems) {
5230 if (*level == root_level)
5234 path->slots[*level] = 0;
5235 free_extent_buffer(path->nodes[*level]);
5236 path->nodes[*level] = NULL;
5238 path->slots[*level]++;
5240 nritems = btrfs_header_nritems(path->nodes[*level]);
5247 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5250 static int tree_advance(struct btrfs_root *root,
5251 struct btrfs_path *path,
5252 int *level, int root_level,
5254 struct btrfs_key *key)
5258 if (*level == 0 || !allow_down) {
5259 ret = tree_move_next_or_upnext(root, path, level, root_level);
5261 tree_move_down(root, path, level, root_level);
5266 btrfs_item_key_to_cpu(path->nodes[*level], key,
5267 path->slots[*level]);
5269 btrfs_node_key_to_cpu(path->nodes[*level], key,
5270 path->slots[*level]);
5275 static int tree_compare_item(struct btrfs_root *left_root,
5276 struct btrfs_path *left_path,
5277 struct btrfs_path *right_path,
5282 unsigned long off1, off2;
5284 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5285 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5289 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5290 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5291 right_path->slots[0]);
5293 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5295 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5302 #define ADVANCE_ONLY_NEXT -1
5305 * This function compares two trees and calls the provided callback for
5306 * every changed/new/deleted item it finds.
5307 * If shared tree blocks are encountered, whole subtrees are skipped, making
5308 * the compare pretty fast on snapshotted subvolumes.
5310 * This currently works on commit roots only. As commit roots are read only,
5311 * we don't do any locking. The commit roots are protected with transactions.
5312 * Transactions are ended and rejoined when a commit is tried in between.
5314 * This function checks for modifications done to the trees while comparing.
5315 * If it detects a change, it aborts immediately.
5317 int btrfs_compare_trees(struct btrfs_root *left_root,
5318 struct btrfs_root *right_root,
5319 btrfs_changed_cb_t changed_cb, void *ctx)
5323 struct btrfs_path *left_path = NULL;
5324 struct btrfs_path *right_path = NULL;
5325 struct btrfs_key left_key;
5326 struct btrfs_key right_key;
5327 char *tmp_buf = NULL;
5328 int left_root_level;
5329 int right_root_level;
5332 int left_end_reached;
5333 int right_end_reached;
5341 left_path = btrfs_alloc_path();
5346 right_path = btrfs_alloc_path();
5352 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5358 left_path->search_commit_root = 1;
5359 left_path->skip_locking = 1;
5360 right_path->search_commit_root = 1;
5361 right_path->skip_locking = 1;
5364 * Strategy: Go to the first items of both trees. Then do
5366 * If both trees are at level 0
5367 * Compare keys of current items
5368 * If left < right treat left item as new, advance left tree
5370 * If left > right treat right item as deleted, advance right tree
5372 * If left == right do deep compare of items, treat as changed if
5373 * needed, advance both trees and repeat
5374 * If both trees are at the same level but not at level 0
5375 * Compare keys of current nodes/leafs
5376 * If left < right advance left tree and repeat
5377 * If left > right advance right tree and repeat
5378 * If left == right compare blockptrs of the next nodes/leafs
5379 * If they match advance both trees but stay at the same level
5381 * If they don't match advance both trees while allowing to go
5383 * If tree levels are different
5384 * Advance the tree that needs it and repeat
5386 * Advancing a tree means:
5387 * If we are at level 0, try to go to the next slot. If that's not
5388 * possible, go one level up and repeat. Stop when we found a level
5389 * where we could go to the next slot. We may at this point be on a
5392 * If we are not at level 0 and not on shared tree blocks, go one
5395 * If we are not at level 0 and on shared tree blocks, go one slot to
5396 * the right if possible or go up and right.
5399 down_read(&left_root->fs_info->commit_root_sem);
5400 left_level = btrfs_header_level(left_root->commit_root);
5401 left_root_level = left_level;
5402 left_path->nodes[left_level] = left_root->commit_root;
5403 extent_buffer_get(left_path->nodes[left_level]);
5405 right_level = btrfs_header_level(right_root->commit_root);
5406 right_root_level = right_level;
5407 right_path->nodes[right_level] = right_root->commit_root;
5408 extent_buffer_get(right_path->nodes[right_level]);
5409 up_read(&left_root->fs_info->commit_root_sem);
5411 if (left_level == 0)
5412 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5413 &left_key, left_path->slots[left_level]);
5415 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5416 &left_key, left_path->slots[left_level]);
5417 if (right_level == 0)
5418 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5419 &right_key, right_path->slots[right_level]);
5421 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5422 &right_key, right_path->slots[right_level]);
5424 left_end_reached = right_end_reached = 0;
5425 advance_left = advance_right = 0;
5429 if (advance_left && !left_end_reached) {
5430 ret = tree_advance(left_root, left_path, &left_level,
5432 advance_left != ADVANCE_ONLY_NEXT,
5435 left_end_reached = ADVANCE;
5438 if (advance_right && !right_end_reached) {
5439 ret = tree_advance(right_root, right_path, &right_level,
5441 advance_right != ADVANCE_ONLY_NEXT,
5444 right_end_reached = ADVANCE;
5448 if (left_end_reached && right_end_reached) {
5451 } else if (left_end_reached) {
5452 if (right_level == 0) {
5453 ret = changed_cb(left_root, right_root,
5454 left_path, right_path,
5456 BTRFS_COMPARE_TREE_DELETED,
5461 advance_right = ADVANCE;
5463 } else if (right_end_reached) {
5464 if (left_level == 0) {
5465 ret = changed_cb(left_root, right_root,
5466 left_path, right_path,
5468 BTRFS_COMPARE_TREE_NEW,
5473 advance_left = ADVANCE;
5477 if (left_level == 0 && right_level == 0) {
5478 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5480 ret = changed_cb(left_root, right_root,
5481 left_path, right_path,
5483 BTRFS_COMPARE_TREE_NEW,
5487 advance_left = ADVANCE;
5488 } else if (cmp > 0) {
5489 ret = changed_cb(left_root, right_root,
5490 left_path, right_path,
5492 BTRFS_COMPARE_TREE_DELETED,
5496 advance_right = ADVANCE;
5498 enum btrfs_compare_tree_result result;
5500 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5501 ret = tree_compare_item(left_root, left_path,
5502 right_path, tmp_buf);
5504 result = BTRFS_COMPARE_TREE_CHANGED;
5506 result = BTRFS_COMPARE_TREE_SAME;
5507 ret = changed_cb(left_root, right_root,
5508 left_path, right_path,
5509 &left_key, result, ctx);
5512 advance_left = ADVANCE;
5513 advance_right = ADVANCE;
5515 } else if (left_level == right_level) {
5516 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5518 advance_left = ADVANCE;
5519 } else if (cmp > 0) {
5520 advance_right = ADVANCE;
5522 left_blockptr = btrfs_node_blockptr(
5523 left_path->nodes[left_level],
5524 left_path->slots[left_level]);
5525 right_blockptr = btrfs_node_blockptr(
5526 right_path->nodes[right_level],
5527 right_path->slots[right_level]);
5528 left_gen = btrfs_node_ptr_generation(
5529 left_path->nodes[left_level],
5530 left_path->slots[left_level]);
5531 right_gen = btrfs_node_ptr_generation(
5532 right_path->nodes[right_level],
5533 right_path->slots[right_level]);
5534 if (left_blockptr == right_blockptr &&
5535 left_gen == right_gen) {
5537 * As we're on a shared block, don't
5538 * allow to go deeper.
5540 advance_left = ADVANCE_ONLY_NEXT;
5541 advance_right = ADVANCE_ONLY_NEXT;
5543 advance_left = ADVANCE;
5544 advance_right = ADVANCE;
5547 } else if (left_level < right_level) {
5548 advance_right = ADVANCE;
5550 advance_left = ADVANCE;
5555 btrfs_free_path(left_path);
5556 btrfs_free_path(right_path);
5562 * this is similar to btrfs_next_leaf, but does not try to preserve
5563 * and fixup the path. It looks for and returns the next key in the
5564 * tree based on the current path and the min_trans parameters.
5566 * 0 is returned if another key is found, < 0 if there are any errors
5567 * and 1 is returned if there are no higher keys in the tree
5569 * path->keep_locks should be set to 1 on the search made before
5570 * calling this function.
5572 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5573 struct btrfs_key *key, int level, u64 min_trans)
5576 struct extent_buffer *c;
5578 WARN_ON(!path->keep_locks);
5579 while (level < BTRFS_MAX_LEVEL) {
5580 if (!path->nodes[level])
5583 slot = path->slots[level] + 1;
5584 c = path->nodes[level];
5586 if (slot >= btrfs_header_nritems(c)) {
5589 struct btrfs_key cur_key;
5590 if (level + 1 >= BTRFS_MAX_LEVEL ||
5591 !path->nodes[level + 1])
5594 if (path->locks[level + 1]) {
5599 slot = btrfs_header_nritems(c) - 1;
5601 btrfs_item_key_to_cpu(c, &cur_key, slot);
5603 btrfs_node_key_to_cpu(c, &cur_key, slot);
5605 orig_lowest = path->lowest_level;
5606 btrfs_release_path(path);
5607 path->lowest_level = level;
5608 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5610 path->lowest_level = orig_lowest;
5614 c = path->nodes[level];
5615 slot = path->slots[level];
5622 btrfs_item_key_to_cpu(c, key, slot);
5624 u64 gen = btrfs_node_ptr_generation(c, slot);
5626 if (gen < min_trans) {
5630 btrfs_node_key_to_cpu(c, key, slot);
5638 * search the tree again to find a leaf with greater keys
5639 * returns 0 if it found something or 1 if there are no greater leaves.
5640 * returns < 0 on io errors.
5642 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5644 return btrfs_next_old_leaf(root, path, 0);
5647 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5652 struct extent_buffer *c;
5653 struct extent_buffer *next;
5654 struct btrfs_key key;
5657 int old_spinning = path->leave_spinning;
5658 int next_rw_lock = 0;
5660 nritems = btrfs_header_nritems(path->nodes[0]);
5664 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5669 btrfs_release_path(path);
5671 path->keep_locks = 1;
5672 path->leave_spinning = 1;
5675 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5677 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5678 path->keep_locks = 0;
5683 nritems = btrfs_header_nritems(path->nodes[0]);
5685 * by releasing the path above we dropped all our locks. A balance
5686 * could have added more items next to the key that used to be
5687 * at the very end of the block. So, check again here and
5688 * advance the path if there are now more items available.
5690 if (nritems > 0 && path->slots[0] < nritems - 1) {
5697 * So the above check misses one case:
5698 * - after releasing the path above, someone has removed the item that
5699 * used to be at the very end of the block, and balance between leafs
5700 * gets another one with bigger key.offset to replace it.
5702 * This one should be returned as well, or we can get leaf corruption
5703 * later(esp. in __btrfs_drop_extents()).
5705 * And a bit more explanation about this check,
5706 * with ret > 0, the key isn't found, the path points to the slot
5707 * where it should be inserted, so the path->slots[0] item must be the
5710 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5715 while (level < BTRFS_MAX_LEVEL) {
5716 if (!path->nodes[level]) {
5721 slot = path->slots[level] + 1;
5722 c = path->nodes[level];
5723 if (slot >= btrfs_header_nritems(c)) {
5725 if (level == BTRFS_MAX_LEVEL) {
5733 btrfs_tree_unlock_rw(next, next_rw_lock);
5734 free_extent_buffer(next);
5738 next_rw_lock = path->locks[level];
5739 ret = read_block_for_search(NULL, root, path, &next, level,
5745 btrfs_release_path(path);
5749 if (!path->skip_locking) {
5750 ret = btrfs_try_tree_read_lock(next);
5751 if (!ret && time_seq) {
5753 * If we don't get the lock, we may be racing
5754 * with push_leaf_left, holding that lock while
5755 * itself waiting for the leaf we've currently
5756 * locked. To solve this situation, we give up
5757 * on our lock and cycle.
5759 free_extent_buffer(next);
5760 btrfs_release_path(path);
5765 btrfs_set_path_blocking(path);
5766 btrfs_tree_read_lock(next);
5767 btrfs_clear_path_blocking(path, next,
5770 next_rw_lock = BTRFS_READ_LOCK;
5774 path->slots[level] = slot;
5777 c = path->nodes[level];
5778 if (path->locks[level])
5779 btrfs_tree_unlock_rw(c, path->locks[level]);
5781 free_extent_buffer(c);
5782 path->nodes[level] = next;
5783 path->slots[level] = 0;
5784 if (!path->skip_locking)
5785 path->locks[level] = next_rw_lock;
5789 ret = read_block_for_search(NULL, root, path, &next, level,
5795 btrfs_release_path(path);
5799 if (!path->skip_locking) {
5800 ret = btrfs_try_tree_read_lock(next);
5802 btrfs_set_path_blocking(path);
5803 btrfs_tree_read_lock(next);
5804 btrfs_clear_path_blocking(path, next,
5807 next_rw_lock = BTRFS_READ_LOCK;
5812 unlock_up(path, 0, 1, 0, NULL);
5813 path->leave_spinning = old_spinning;
5815 btrfs_set_path_blocking(path);
5821 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5822 * searching until it gets past min_objectid or finds an item of 'type'
5824 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5826 int btrfs_previous_item(struct btrfs_root *root,
5827 struct btrfs_path *path, u64 min_objectid,
5830 struct btrfs_key found_key;
5831 struct extent_buffer *leaf;
5836 if (path->slots[0] == 0) {
5837 btrfs_set_path_blocking(path);
5838 ret = btrfs_prev_leaf(root, path);
5844 leaf = path->nodes[0];
5845 nritems = btrfs_header_nritems(leaf);
5848 if (path->slots[0] == nritems)
5851 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5852 if (found_key.objectid < min_objectid)
5854 if (found_key.type == type)
5856 if (found_key.objectid == min_objectid &&
5857 found_key.type < type)
5864 * search in extent tree to find a previous Metadata/Data extent item with
5867 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5869 int btrfs_previous_extent_item(struct btrfs_root *root,
5870 struct btrfs_path *path, u64 min_objectid)
5872 struct btrfs_key found_key;
5873 struct extent_buffer *leaf;
5878 if (path->slots[0] == 0) {
5879 btrfs_set_path_blocking(path);
5880 ret = btrfs_prev_leaf(root, path);
5886 leaf = path->nodes[0];
5887 nritems = btrfs_header_nritems(leaf);
5890 if (path->slots[0] == nritems)
5893 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5894 if (found_key.objectid < min_objectid)
5896 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5897 found_key.type == BTRFS_METADATA_ITEM_KEY)
5899 if (found_key.objectid == min_objectid &&
5900 found_key.type < BTRFS_EXTENT_ITEM_KEY)