1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
10 #include <linux/error-injection.h>
13 #include "transaction.h"
14 #include "print-tree.h"
18 #include "tree-mod-log.h"
19 #include "tree-checker.h"
21 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
22 *root, struct btrfs_path *path, int level);
23 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
24 const struct btrfs_key *ins_key, struct btrfs_path *path,
25 int data_size, int extend);
26 static int push_node_left(struct btrfs_trans_handle *trans,
27 struct extent_buffer *dst,
28 struct extent_buffer *src, int empty);
29 static int balance_node_right(struct btrfs_trans_handle *trans,
30 struct extent_buffer *dst_buf,
31 struct extent_buffer *src_buf);
32 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
35 static const struct btrfs_csums {
38 const char driver[12];
40 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
41 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
42 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
43 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
44 .driver = "blake2b-256" },
47 int btrfs_super_csum_size(const struct btrfs_super_block *s)
49 u16 t = btrfs_super_csum_type(s);
51 * csum type is validated at mount time
53 return btrfs_csums[t].size;
56 const char *btrfs_super_csum_name(u16 csum_type)
58 /* csum type is validated at mount time */
59 return btrfs_csums[csum_type].name;
63 * Return driver name if defined, otherwise the name that's also a valid driver
66 const char *btrfs_super_csum_driver(u16 csum_type)
68 /* csum type is validated at mount time */
69 return btrfs_csums[csum_type].driver[0] ?
70 btrfs_csums[csum_type].driver :
71 btrfs_csums[csum_type].name;
74 size_t __attribute_const__ btrfs_get_num_csums(void)
76 return ARRAY_SIZE(btrfs_csums);
79 struct btrfs_path *btrfs_alloc_path(void)
81 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
84 /* this also releases the path */
85 void btrfs_free_path(struct btrfs_path *p)
89 btrfs_release_path(p);
90 kmem_cache_free(btrfs_path_cachep, p);
94 * path release drops references on the extent buffers in the path
95 * and it drops any locks held by this path
97 * It is safe to call this on paths that no locks or extent buffers held.
99 noinline void btrfs_release_path(struct btrfs_path *p)
103 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
108 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
111 free_extent_buffer(p->nodes[i]);
117 * We want the transaction abort to print stack trace only for errors where the
118 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
119 * caused by external factors.
121 bool __cold abort_should_print_stack(int errno)
133 * safely gets a reference on the root node of a tree. A lock
134 * is not taken, so a concurrent writer may put a different node
135 * at the root of the tree. See btrfs_lock_root_node for the
138 * The extent buffer returned by this has a reference taken, so
139 * it won't disappear. It may stop being the root of the tree
140 * at any time because there are no locks held.
142 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
144 struct extent_buffer *eb;
148 eb = rcu_dereference(root->node);
151 * RCU really hurts here, we could free up the root node because
152 * it was COWed but we may not get the new root node yet so do
153 * the inc_not_zero dance and if it doesn't work then
154 * synchronize_rcu and try again.
156 if (atomic_inc_not_zero(&eb->refs)) {
167 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
168 * just get put onto a simple dirty list. Transaction walks this list to make
169 * sure they get properly updated on disk.
171 static void add_root_to_dirty_list(struct btrfs_root *root)
173 struct btrfs_fs_info *fs_info = root->fs_info;
175 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
176 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
179 spin_lock(&fs_info->trans_lock);
180 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
181 /* Want the extent tree to be the last on the list */
182 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
183 list_move_tail(&root->dirty_list,
184 &fs_info->dirty_cowonly_roots);
186 list_move(&root->dirty_list,
187 &fs_info->dirty_cowonly_roots);
189 spin_unlock(&fs_info->trans_lock);
193 * used by snapshot creation to make a copy of a root for a tree with
194 * a given objectid. The buffer with the new root node is returned in
195 * cow_ret, and this func returns zero on success or a negative error code.
197 int btrfs_copy_root(struct btrfs_trans_handle *trans,
198 struct btrfs_root *root,
199 struct extent_buffer *buf,
200 struct extent_buffer **cow_ret, u64 new_root_objectid)
202 struct btrfs_fs_info *fs_info = root->fs_info;
203 struct extent_buffer *cow;
206 struct btrfs_disk_key disk_key;
208 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
209 trans->transid != fs_info->running_transaction->transid);
210 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
211 trans->transid != root->last_trans);
213 level = btrfs_header_level(buf);
215 btrfs_item_key(buf, &disk_key, 0);
217 btrfs_node_key(buf, &disk_key, 0);
219 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
220 &disk_key, level, buf->start, 0,
221 BTRFS_NESTING_NEW_ROOT);
225 copy_extent_buffer_full(cow, buf);
226 btrfs_set_header_bytenr(cow, cow->start);
227 btrfs_set_header_generation(cow, trans->transid);
228 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
229 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
230 BTRFS_HEADER_FLAG_RELOC);
231 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
234 btrfs_set_header_owner(cow, new_root_objectid);
236 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
238 WARN_ON(btrfs_header_generation(buf) > trans->transid);
239 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
240 ret = btrfs_inc_ref(trans, root, cow, 1);
242 ret = btrfs_inc_ref(trans, root, cow, 0);
244 btrfs_tree_unlock(cow);
245 free_extent_buffer(cow);
246 btrfs_abort_transaction(trans, ret);
250 btrfs_mark_buffer_dirty(cow);
256 * check if the tree block can be shared by multiple trees
258 int btrfs_block_can_be_shared(struct btrfs_root *root,
259 struct extent_buffer *buf)
262 * Tree blocks not in shareable trees and tree roots are never shared.
263 * If a block was allocated after the last snapshot and the block was
264 * not allocated by tree relocation, we know the block is not shared.
266 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
267 buf != root->node && buf != root->commit_root &&
268 (btrfs_header_generation(buf) <=
269 btrfs_root_last_snapshot(&root->root_item) ||
270 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
276 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
277 struct btrfs_root *root,
278 struct extent_buffer *buf,
279 struct extent_buffer *cow,
282 struct btrfs_fs_info *fs_info = root->fs_info;
290 * Backrefs update rules:
292 * Always use full backrefs for extent pointers in tree block
293 * allocated by tree relocation.
295 * If a shared tree block is no longer referenced by its owner
296 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
297 * use full backrefs for extent pointers in tree block.
299 * If a tree block is been relocating
300 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
301 * use full backrefs for extent pointers in tree block.
302 * The reason for this is some operations (such as drop tree)
303 * are only allowed for blocks use full backrefs.
306 if (btrfs_block_can_be_shared(root, buf)) {
307 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
308 btrfs_header_level(buf), 1,
314 btrfs_handle_fs_error(fs_info, ret, NULL);
319 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
320 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
321 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
326 owner = btrfs_header_owner(buf);
327 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
328 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
331 if ((owner == root->root_key.objectid ||
332 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
333 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
334 ret = btrfs_inc_ref(trans, root, buf, 1);
338 if (root->root_key.objectid ==
339 BTRFS_TREE_RELOC_OBJECTID) {
340 ret = btrfs_dec_ref(trans, root, buf, 0);
343 ret = btrfs_inc_ref(trans, root, cow, 1);
347 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
350 if (root->root_key.objectid ==
351 BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
354 ret = btrfs_inc_ref(trans, root, cow, 0);
358 if (new_flags != 0) {
359 int level = btrfs_header_level(buf);
361 ret = btrfs_set_disk_extent_flags(trans, buf,
367 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
368 if (root->root_key.objectid ==
369 BTRFS_TREE_RELOC_OBJECTID)
370 ret = btrfs_inc_ref(trans, root, cow, 1);
372 ret = btrfs_inc_ref(trans, root, cow, 0);
375 ret = btrfs_dec_ref(trans, root, buf, 1);
379 btrfs_clean_tree_block(buf);
386 * does the dirty work in cow of a single block. The parent block (if
387 * supplied) is updated to point to the new cow copy. The new buffer is marked
388 * dirty and returned locked. If you modify the block it needs to be marked
391 * search_start -- an allocation hint for the new block
393 * empty_size -- a hint that you plan on doing more cow. This is the size in
394 * bytes the allocator should try to find free next to the block it returns.
395 * This is just a hint and may be ignored by the allocator.
397 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
398 struct btrfs_root *root,
399 struct extent_buffer *buf,
400 struct extent_buffer *parent, int parent_slot,
401 struct extent_buffer **cow_ret,
402 u64 search_start, u64 empty_size,
403 enum btrfs_lock_nesting nest)
405 struct btrfs_fs_info *fs_info = root->fs_info;
406 struct btrfs_disk_key disk_key;
407 struct extent_buffer *cow;
411 u64 parent_start = 0;
416 btrfs_assert_tree_write_locked(buf);
418 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
419 trans->transid != fs_info->running_transaction->transid);
420 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
421 trans->transid != root->last_trans);
423 level = btrfs_header_level(buf);
426 btrfs_item_key(buf, &disk_key, 0);
428 btrfs_node_key(buf, &disk_key, 0);
430 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
431 parent_start = parent->start;
433 cow = btrfs_alloc_tree_block(trans, root, parent_start,
434 root->root_key.objectid, &disk_key, level,
435 search_start, empty_size, nest);
439 /* cow is set to blocking by btrfs_init_new_buffer */
441 copy_extent_buffer_full(cow, buf);
442 btrfs_set_header_bytenr(cow, cow->start);
443 btrfs_set_header_generation(cow, trans->transid);
444 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
445 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
446 BTRFS_HEADER_FLAG_RELOC);
447 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
448 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
450 btrfs_set_header_owner(cow, root->root_key.objectid);
452 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
454 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
456 btrfs_tree_unlock(cow);
457 free_extent_buffer(cow);
458 btrfs_abort_transaction(trans, ret);
462 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
463 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
465 btrfs_tree_unlock(cow);
466 free_extent_buffer(cow);
467 btrfs_abort_transaction(trans, ret);
472 if (buf == root->node) {
473 WARN_ON(parent && parent != buf);
474 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
475 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
476 parent_start = buf->start;
478 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
480 btrfs_tree_unlock(cow);
481 free_extent_buffer(cow);
482 btrfs_abort_transaction(trans, ret);
485 atomic_inc(&cow->refs);
486 rcu_assign_pointer(root->node, cow);
488 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
489 parent_start, last_ref);
490 free_extent_buffer(buf);
491 add_root_to_dirty_list(root);
493 WARN_ON(trans->transid != btrfs_header_generation(parent));
494 btrfs_tree_mod_log_insert_key(parent, parent_slot,
495 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
496 btrfs_set_node_blockptr(parent, parent_slot,
498 btrfs_set_node_ptr_generation(parent, parent_slot,
500 btrfs_mark_buffer_dirty(parent);
502 ret = btrfs_tree_mod_log_free_eb(buf);
504 btrfs_tree_unlock(cow);
505 free_extent_buffer(cow);
506 btrfs_abort_transaction(trans, ret);
510 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
511 parent_start, last_ref);
514 btrfs_tree_unlock(buf);
515 free_extent_buffer_stale(buf);
516 btrfs_mark_buffer_dirty(cow);
521 static inline int should_cow_block(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct extent_buffer *buf)
525 if (btrfs_is_testing(root->fs_info))
528 /* Ensure we can see the FORCE_COW bit */
529 smp_mb__before_atomic();
532 * We do not need to cow a block if
533 * 1) this block is not created or changed in this transaction;
534 * 2) this block does not belong to TREE_RELOC tree;
535 * 3) the root is not forced COW.
537 * What is forced COW:
538 * when we create snapshot during committing the transaction,
539 * after we've finished copying src root, we must COW the shared
540 * block to ensure the metadata consistency.
542 if (btrfs_header_generation(buf) == trans->transid &&
543 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
544 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
545 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
546 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
552 * cows a single block, see __btrfs_cow_block for the real work.
553 * This version of it has extra checks so that a block isn't COWed more than
554 * once per transaction, as long as it hasn't been written yet
556 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
557 struct btrfs_root *root, struct extent_buffer *buf,
558 struct extent_buffer *parent, int parent_slot,
559 struct extent_buffer **cow_ret,
560 enum btrfs_lock_nesting nest)
562 struct btrfs_fs_info *fs_info = root->fs_info;
566 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
567 btrfs_abort_transaction(trans, -EUCLEAN);
569 "attempt to COW block %llu on root %llu that is being deleted",
570 buf->start, btrfs_root_id(root));
575 * COWing must happen through a running transaction, which always
576 * matches the current fs generation (it's a transaction with a state
577 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
578 * into error state to prevent the commit of any transaction.
580 if (unlikely(trans->transaction != fs_info->running_transaction ||
581 trans->transid != fs_info->generation)) {
582 btrfs_abort_transaction(trans, -EUCLEAN);
584 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
585 buf->start, btrfs_root_id(root), trans->transid,
586 fs_info->running_transaction->transid,
587 fs_info->generation);
591 if (!should_cow_block(trans, root, buf)) {
596 search_start = buf->start & ~((u64)SZ_1G - 1);
599 * Before CoWing this block for later modification, check if it's
600 * the subtree root and do the delayed subtree trace if needed.
602 * Also We don't care about the error, as it's handled internally.
604 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
605 ret = __btrfs_cow_block(trans, root, buf, parent,
606 parent_slot, cow_ret, search_start, 0, nest);
608 trace_btrfs_cow_block(root, buf, *cow_ret);
612 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
615 * helper function for defrag to decide if two blocks pointed to by a
616 * node are actually close by
618 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
620 if (blocknr < other && other - (blocknr + blocksize) < 32768)
622 if (blocknr > other && blocknr - (other + blocksize) < 32768)
627 #ifdef __LITTLE_ENDIAN
630 * Compare two keys, on little-endian the disk order is same as CPU order and
631 * we can avoid the conversion.
633 static int comp_keys(const struct btrfs_disk_key *disk_key,
634 const struct btrfs_key *k2)
636 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
638 return btrfs_comp_cpu_keys(k1, k2);
644 * compare two keys in a memcmp fashion
646 static int comp_keys(const struct btrfs_disk_key *disk,
647 const struct btrfs_key *k2)
651 btrfs_disk_key_to_cpu(&k1, disk);
653 return btrfs_comp_cpu_keys(&k1, k2);
658 * same as comp_keys only with two btrfs_key's
660 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
662 if (k1->objectid > k2->objectid)
664 if (k1->objectid < k2->objectid)
666 if (k1->type > k2->type)
668 if (k1->type < k2->type)
670 if (k1->offset > k2->offset)
672 if (k1->offset < k2->offset)
678 * this is used by the defrag code to go through all the
679 * leaves pointed to by a node and reallocate them so that
680 * disk order is close to key order
682 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
683 struct btrfs_root *root, struct extent_buffer *parent,
684 int start_slot, u64 *last_ret,
685 struct btrfs_key *progress)
687 struct btrfs_fs_info *fs_info = root->fs_info;
688 struct extent_buffer *cur;
690 u64 search_start = *last_ret;
698 int progress_passed = 0;
699 struct btrfs_disk_key disk_key;
702 * COWing must happen through a running transaction, which always
703 * matches the current fs generation (it's a transaction with a state
704 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
705 * into error state to prevent the commit of any transaction.
707 if (unlikely(trans->transaction != fs_info->running_transaction ||
708 trans->transid != fs_info->generation)) {
709 btrfs_abort_transaction(trans, -EUCLEAN);
711 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
712 parent->start, btrfs_root_id(root), trans->transid,
713 fs_info->running_transaction->transid,
714 fs_info->generation);
718 parent_nritems = btrfs_header_nritems(parent);
719 blocksize = fs_info->nodesize;
720 end_slot = parent_nritems - 1;
722 if (parent_nritems <= 1)
725 for (i = start_slot; i <= end_slot; i++) {
728 btrfs_node_key(parent, &disk_key, i);
729 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
733 blocknr = btrfs_node_blockptr(parent, i);
735 last_block = blocknr;
738 other = btrfs_node_blockptr(parent, i - 1);
739 close = close_blocks(blocknr, other, blocksize);
741 if (!close && i < end_slot) {
742 other = btrfs_node_blockptr(parent, i + 1);
743 close = close_blocks(blocknr, other, blocksize);
746 last_block = blocknr;
750 cur = btrfs_read_node_slot(parent, i);
753 if (search_start == 0)
754 search_start = last_block;
756 btrfs_tree_lock(cur);
757 err = __btrfs_cow_block(trans, root, cur, parent, i,
760 (end_slot - i) * blocksize),
763 btrfs_tree_unlock(cur);
764 free_extent_buffer(cur);
767 search_start = cur->start;
768 last_block = cur->start;
769 *last_ret = search_start;
770 btrfs_tree_unlock(cur);
771 free_extent_buffer(cur);
777 * Search for a key in the given extent_buffer.
779 * The lower boundary for the search is specified by the slot number @low. Use a
780 * value of 0 to search over the whole extent buffer.
782 * The slot in the extent buffer is returned via @slot. If the key exists in the
783 * extent buffer, then @slot will point to the slot where the key is, otherwise
784 * it points to the slot where you would insert the key.
786 * Slot may point to the total number of items (i.e. one position beyond the last
787 * key) if the key is bigger than the last key in the extent buffer.
789 static noinline int generic_bin_search(struct extent_buffer *eb, int low,
790 const struct btrfs_key *key, int *slot)
794 int high = btrfs_header_nritems(eb);
796 const int key_size = sizeof(struct btrfs_disk_key);
799 btrfs_err(eb->fs_info,
800 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
801 __func__, low, high, eb->start,
802 btrfs_header_owner(eb), btrfs_header_level(eb));
806 if (btrfs_header_level(eb) == 0) {
807 p = offsetof(struct btrfs_leaf, items);
808 item_size = sizeof(struct btrfs_item);
810 p = offsetof(struct btrfs_node, ptrs);
811 item_size = sizeof(struct btrfs_key_ptr);
816 unsigned long offset;
817 struct btrfs_disk_key *tmp;
818 struct btrfs_disk_key unaligned;
821 mid = (low + high) / 2;
822 offset = p + mid * item_size;
823 oip = offset_in_page(offset);
825 if (oip + key_size <= PAGE_SIZE) {
826 const unsigned long idx = get_eb_page_index(offset);
827 char *kaddr = page_address(eb->pages[idx]);
829 oip = get_eb_offset_in_page(eb, offset);
830 tmp = (struct btrfs_disk_key *)(kaddr + oip);
832 read_extent_buffer(eb, &unaligned, offset, key_size);
836 ret = comp_keys(tmp, key);
852 * Simple binary search on an extent buffer. Works for both leaves and nodes, and
853 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1').
855 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
858 return generic_bin_search(eb, 0, key, slot);
861 static void root_add_used(struct btrfs_root *root, u32 size)
863 spin_lock(&root->accounting_lock);
864 btrfs_set_root_used(&root->root_item,
865 btrfs_root_used(&root->root_item) + size);
866 spin_unlock(&root->accounting_lock);
869 static void root_sub_used(struct btrfs_root *root, u32 size)
871 spin_lock(&root->accounting_lock);
872 btrfs_set_root_used(&root->root_item,
873 btrfs_root_used(&root->root_item) - size);
874 spin_unlock(&root->accounting_lock);
877 /* given a node and slot number, this reads the blocks it points to. The
878 * extent buffer is returned with a reference taken (but unlocked).
880 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
883 int level = btrfs_header_level(parent);
884 struct extent_buffer *eb;
885 struct btrfs_key first_key;
887 if (slot < 0 || slot >= btrfs_header_nritems(parent))
888 return ERR_PTR(-ENOENT);
892 btrfs_node_key_to_cpu(parent, &first_key, slot);
893 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
894 btrfs_header_owner(parent),
895 btrfs_node_ptr_generation(parent, slot),
896 level - 1, &first_key);
899 if (!extent_buffer_uptodate(eb)) {
900 free_extent_buffer(eb);
901 return ERR_PTR(-EIO);
908 * node level balancing, used to make sure nodes are in proper order for
909 * item deletion. We balance from the top down, so we have to make sure
910 * that a deletion won't leave an node completely empty later on.
912 static noinline int balance_level(struct btrfs_trans_handle *trans,
913 struct btrfs_root *root,
914 struct btrfs_path *path, int level)
916 struct btrfs_fs_info *fs_info = root->fs_info;
917 struct extent_buffer *right = NULL;
918 struct extent_buffer *mid;
919 struct extent_buffer *left = NULL;
920 struct extent_buffer *parent = NULL;
924 int orig_slot = path->slots[level];
929 mid = path->nodes[level];
931 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
932 WARN_ON(btrfs_header_generation(mid) != trans->transid);
934 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
936 if (level < BTRFS_MAX_LEVEL - 1) {
937 parent = path->nodes[level + 1];
938 pslot = path->slots[level + 1];
942 * deal with the case where there is only one pointer in the root
943 * by promoting the node below to a root
946 struct extent_buffer *child;
948 if (btrfs_header_nritems(mid) != 1)
951 /* promote the child to a root */
952 child = btrfs_read_node_slot(mid, 0);
954 ret = PTR_ERR(child);
955 btrfs_handle_fs_error(fs_info, ret, NULL);
959 btrfs_tree_lock(child);
960 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
963 btrfs_tree_unlock(child);
964 free_extent_buffer(child);
968 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
970 btrfs_tree_unlock(child);
971 free_extent_buffer(child);
972 btrfs_abort_transaction(trans, ret);
975 rcu_assign_pointer(root->node, child);
977 add_root_to_dirty_list(root);
978 btrfs_tree_unlock(child);
980 path->locks[level] = 0;
981 path->nodes[level] = NULL;
982 btrfs_clean_tree_block(mid);
983 btrfs_tree_unlock(mid);
984 /* once for the path */
985 free_extent_buffer(mid);
987 root_sub_used(root, mid->len);
988 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
989 /* once for the root ptr */
990 free_extent_buffer_stale(mid);
993 if (btrfs_header_nritems(mid) >
994 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
997 left = btrfs_read_node_slot(parent, pslot - 1);
1002 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1003 wret = btrfs_cow_block(trans, root, left,
1004 parent, pslot - 1, &left,
1005 BTRFS_NESTING_LEFT_COW);
1012 right = btrfs_read_node_slot(parent, pslot + 1);
1017 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1018 wret = btrfs_cow_block(trans, root, right,
1019 parent, pslot + 1, &right,
1020 BTRFS_NESTING_RIGHT_COW);
1027 /* first, try to make some room in the middle buffer */
1029 orig_slot += btrfs_header_nritems(left);
1030 wret = push_node_left(trans, left, mid, 1);
1036 * then try to empty the right most buffer into the middle
1039 wret = push_node_left(trans, mid, right, 1);
1040 if (wret < 0 && wret != -ENOSPC)
1042 if (btrfs_header_nritems(right) == 0) {
1043 btrfs_clean_tree_block(right);
1044 btrfs_tree_unlock(right);
1045 del_ptr(root, path, level + 1, pslot + 1);
1046 root_sub_used(root, right->len);
1047 btrfs_free_tree_block(trans, btrfs_root_id(root), right,
1049 free_extent_buffer_stale(right);
1052 struct btrfs_disk_key right_key;
1053 btrfs_node_key(right, &right_key, 0);
1054 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1055 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1057 btrfs_abort_transaction(trans, ret);
1060 btrfs_set_node_key(parent, &right_key, pslot + 1);
1061 btrfs_mark_buffer_dirty(parent);
1064 if (btrfs_header_nritems(mid) == 1) {
1066 * we're not allowed to leave a node with one item in the
1067 * tree during a delete. A deletion from lower in the tree
1068 * could try to delete the only pointer in this node.
1069 * So, pull some keys from the left.
1070 * There has to be a left pointer at this point because
1071 * otherwise we would have pulled some pointers from the
1076 btrfs_handle_fs_error(fs_info, ret, NULL);
1079 wret = balance_node_right(trans, mid, left);
1085 wret = push_node_left(trans, left, mid, 1);
1091 if (btrfs_header_nritems(mid) == 0) {
1092 btrfs_clean_tree_block(mid);
1093 btrfs_tree_unlock(mid);
1094 del_ptr(root, path, level + 1, pslot);
1095 root_sub_used(root, mid->len);
1096 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1097 free_extent_buffer_stale(mid);
1100 /* update the parent key to reflect our changes */
1101 struct btrfs_disk_key mid_key;
1102 btrfs_node_key(mid, &mid_key, 0);
1103 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1104 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1106 btrfs_abort_transaction(trans, ret);
1109 btrfs_set_node_key(parent, &mid_key, pslot);
1110 btrfs_mark_buffer_dirty(parent);
1113 /* update the path */
1115 if (btrfs_header_nritems(left) > orig_slot) {
1116 atomic_inc(&left->refs);
1117 /* left was locked after cow */
1118 path->nodes[level] = left;
1119 path->slots[level + 1] -= 1;
1120 path->slots[level] = orig_slot;
1122 btrfs_tree_unlock(mid);
1123 free_extent_buffer(mid);
1126 orig_slot -= btrfs_header_nritems(left);
1127 path->slots[level] = orig_slot;
1130 /* double check we haven't messed things up */
1132 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1136 btrfs_tree_unlock(right);
1137 free_extent_buffer(right);
1140 if (path->nodes[level] != left)
1141 btrfs_tree_unlock(left);
1142 free_extent_buffer(left);
1147 /* Node balancing for insertion. Here we only split or push nodes around
1148 * when they are completely full. This is also done top down, so we
1149 * have to be pessimistic.
1151 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1152 struct btrfs_root *root,
1153 struct btrfs_path *path, int level)
1155 struct btrfs_fs_info *fs_info = root->fs_info;
1156 struct extent_buffer *right = NULL;
1157 struct extent_buffer *mid;
1158 struct extent_buffer *left = NULL;
1159 struct extent_buffer *parent = NULL;
1163 int orig_slot = path->slots[level];
1168 mid = path->nodes[level];
1169 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1171 if (level < BTRFS_MAX_LEVEL - 1) {
1172 parent = path->nodes[level + 1];
1173 pslot = path->slots[level + 1];
1179 left = btrfs_read_node_slot(parent, pslot - 1);
1183 /* first, try to make some room in the middle buffer */
1187 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1189 left_nr = btrfs_header_nritems(left);
1190 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1193 ret = btrfs_cow_block(trans, root, left, parent,
1195 BTRFS_NESTING_LEFT_COW);
1199 wret = push_node_left(trans, left, mid, 0);
1205 struct btrfs_disk_key disk_key;
1206 orig_slot += left_nr;
1207 btrfs_node_key(mid, &disk_key, 0);
1208 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1209 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1211 btrfs_set_node_key(parent, &disk_key, pslot);
1212 btrfs_mark_buffer_dirty(parent);
1213 if (btrfs_header_nritems(left) > orig_slot) {
1214 path->nodes[level] = left;
1215 path->slots[level + 1] -= 1;
1216 path->slots[level] = orig_slot;
1217 btrfs_tree_unlock(mid);
1218 free_extent_buffer(mid);
1221 btrfs_header_nritems(left);
1222 path->slots[level] = orig_slot;
1223 btrfs_tree_unlock(left);
1224 free_extent_buffer(left);
1228 btrfs_tree_unlock(left);
1229 free_extent_buffer(left);
1231 right = btrfs_read_node_slot(parent, pslot + 1);
1236 * then try to empty the right most buffer into the middle
1241 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1243 right_nr = btrfs_header_nritems(right);
1244 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1247 ret = btrfs_cow_block(trans, root, right,
1249 &right, BTRFS_NESTING_RIGHT_COW);
1253 wret = balance_node_right(trans, right, mid);
1259 struct btrfs_disk_key disk_key;
1261 btrfs_node_key(right, &disk_key, 0);
1262 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1263 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1265 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1266 btrfs_mark_buffer_dirty(parent);
1268 if (btrfs_header_nritems(mid) <= orig_slot) {
1269 path->nodes[level] = right;
1270 path->slots[level + 1] += 1;
1271 path->slots[level] = orig_slot -
1272 btrfs_header_nritems(mid);
1273 btrfs_tree_unlock(mid);
1274 free_extent_buffer(mid);
1276 btrfs_tree_unlock(right);
1277 free_extent_buffer(right);
1281 btrfs_tree_unlock(right);
1282 free_extent_buffer(right);
1288 * readahead one full node of leaves, finding things that are close
1289 * to the block in 'slot', and triggering ra on them.
1291 static void reada_for_search(struct btrfs_fs_info *fs_info,
1292 struct btrfs_path *path,
1293 int level, int slot, u64 objectid)
1295 struct extent_buffer *node;
1296 struct btrfs_disk_key disk_key;
1306 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1309 if (!path->nodes[level])
1312 node = path->nodes[level];
1315 * Since the time between visiting leaves is much shorter than the time
1316 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1317 * much IO at once (possibly random).
1319 if (path->reada == READA_FORWARD_ALWAYS) {
1321 nread_max = node->fs_info->nodesize;
1323 nread_max = SZ_128K;
1328 search = btrfs_node_blockptr(node, slot);
1329 blocksize = fs_info->nodesize;
1330 if (path->reada != READA_FORWARD_ALWAYS) {
1331 struct extent_buffer *eb;
1333 eb = find_extent_buffer(fs_info, search);
1335 free_extent_buffer(eb);
1342 nritems = btrfs_header_nritems(node);
1346 if (path->reada == READA_BACK) {
1350 } else if (path->reada == READA_FORWARD ||
1351 path->reada == READA_FORWARD_ALWAYS) {
1356 if (path->reada == READA_BACK && objectid) {
1357 btrfs_node_key(node, &disk_key, nr);
1358 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1361 search = btrfs_node_blockptr(node, nr);
1362 if (path->reada == READA_FORWARD_ALWAYS ||
1363 (search <= target && target - search <= 65536) ||
1364 (search > target && search - target <= 65536)) {
1365 btrfs_readahead_node_child(node, nr);
1369 if (nread > nread_max || nscan > 32)
1374 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1376 struct extent_buffer *parent;
1380 parent = path->nodes[level + 1];
1384 nritems = btrfs_header_nritems(parent);
1385 slot = path->slots[level + 1];
1388 btrfs_readahead_node_child(parent, slot - 1);
1389 if (slot + 1 < nritems)
1390 btrfs_readahead_node_child(parent, slot + 1);
1395 * when we walk down the tree, it is usually safe to unlock the higher layers
1396 * in the tree. The exceptions are when our path goes through slot 0, because
1397 * operations on the tree might require changing key pointers higher up in the
1400 * callers might also have set path->keep_locks, which tells this code to keep
1401 * the lock if the path points to the last slot in the block. This is part of
1402 * walking through the tree, and selecting the next slot in the higher block.
1404 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1405 * if lowest_unlock is 1, level 0 won't be unlocked
1407 static noinline void unlock_up(struct btrfs_path *path, int level,
1408 int lowest_unlock, int min_write_lock_level,
1409 int *write_lock_level)
1412 int skip_level = level;
1413 bool check_skip = true;
1415 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1416 if (!path->nodes[i])
1418 if (!path->locks[i])
1422 if (path->slots[i] == 0) {
1427 if (path->keep_locks) {
1430 nritems = btrfs_header_nritems(path->nodes[i]);
1431 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1438 if (i >= lowest_unlock && i > skip_level) {
1440 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1442 if (write_lock_level &&
1443 i > min_write_lock_level &&
1444 i <= *write_lock_level) {
1445 *write_lock_level = i - 1;
1452 * Helper function for btrfs_search_slot() and other functions that do a search
1453 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1454 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1455 * its pages from disk.
1457 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1458 * whole btree search, starting again from the current root node.
1461 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1462 struct extent_buffer **eb_ret, int level, int slot,
1463 const struct btrfs_key *key)
1465 struct btrfs_fs_info *fs_info = root->fs_info;
1468 struct extent_buffer *tmp;
1469 struct btrfs_key first_key;
1474 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
1475 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1476 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1477 parent_level = btrfs_header_level(*eb_ret);
1478 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1481 * If we need to read an extent buffer from disk and we are holding locks
1482 * on upper level nodes, we unlock all the upper nodes before reading the
1483 * extent buffer, and then return -EAGAIN to the caller as it needs to
1484 * restart the search. We don't release the lock on the current level
1485 * because we need to walk this node to figure out which blocks to read.
1487 tmp = find_extent_buffer(fs_info, blocknr);
1489 if (p->reada == READA_FORWARD_ALWAYS)
1490 reada_for_search(fs_info, p, level, slot, key->objectid);
1492 /* first we do an atomic uptodate check */
1493 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1495 * Do extra check for first_key, eb can be stale due to
1496 * being cached, read from scrub, or have multiple
1497 * parents (shared tree blocks).
1499 if (btrfs_verify_level_key(tmp,
1500 parent_level - 1, &first_key, gen)) {
1501 free_extent_buffer(tmp);
1509 free_extent_buffer(tmp);
1514 btrfs_unlock_up_safe(p, level + 1);
1516 /* now we're allowed to do a blocking uptodate check */
1517 ret = btrfs_read_extent_buffer(tmp, gen, parent_level - 1, &first_key);
1519 free_extent_buffer(tmp);
1520 btrfs_release_path(p);
1523 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
1524 free_extent_buffer(tmp);
1525 btrfs_release_path(p);
1533 } else if (p->nowait) {
1538 btrfs_unlock_up_safe(p, level + 1);
1544 if (p->reada != READA_NONE)
1545 reada_for_search(fs_info, p, level, slot, key->objectid);
1547 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1548 gen, parent_level - 1, &first_key);
1550 btrfs_release_path(p);
1551 return PTR_ERR(tmp);
1554 * If the read above didn't mark this buffer up to date,
1555 * it will never end up being up to date. Set ret to EIO now
1556 * and give up so that our caller doesn't loop forever
1559 if (!extent_buffer_uptodate(tmp))
1566 free_extent_buffer(tmp);
1567 btrfs_release_path(p);
1574 * helper function for btrfs_search_slot. This does all of the checks
1575 * for node-level blocks and does any balancing required based on
1578 * If no extra work was required, zero is returned. If we had to
1579 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1583 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1584 struct btrfs_root *root, struct btrfs_path *p,
1585 struct extent_buffer *b, int level, int ins_len,
1586 int *write_lock_level)
1588 struct btrfs_fs_info *fs_info = root->fs_info;
1591 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1592 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1594 if (*write_lock_level < level + 1) {
1595 *write_lock_level = level + 1;
1596 btrfs_release_path(p);
1600 reada_for_balance(p, level);
1601 ret = split_node(trans, root, p, level);
1603 b = p->nodes[level];
1604 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1605 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1607 if (*write_lock_level < level + 1) {
1608 *write_lock_level = level + 1;
1609 btrfs_release_path(p);
1613 reada_for_balance(p, level);
1614 ret = balance_level(trans, root, p, level);
1618 b = p->nodes[level];
1620 btrfs_release_path(p);
1623 BUG_ON(btrfs_header_nritems(b) == 1);
1628 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1629 u64 iobjectid, u64 ioff, u8 key_type,
1630 struct btrfs_key *found_key)
1633 struct btrfs_key key;
1634 struct extent_buffer *eb;
1639 key.type = key_type;
1640 key.objectid = iobjectid;
1643 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1647 eb = path->nodes[0];
1648 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1649 ret = btrfs_next_leaf(fs_root, path);
1652 eb = path->nodes[0];
1655 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1656 if (found_key->type != key.type ||
1657 found_key->objectid != key.objectid)
1663 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1664 struct btrfs_path *p,
1665 int write_lock_level)
1667 struct extent_buffer *b;
1671 if (p->search_commit_root) {
1672 b = root->commit_root;
1673 atomic_inc(&b->refs);
1674 level = btrfs_header_level(b);
1676 * Ensure that all callers have set skip_locking when
1677 * p->search_commit_root = 1.
1679 ASSERT(p->skip_locking == 1);
1684 if (p->skip_locking) {
1685 b = btrfs_root_node(root);
1686 level = btrfs_header_level(b);
1690 /* We try very hard to do read locks on the root */
1691 root_lock = BTRFS_READ_LOCK;
1694 * If the level is set to maximum, we can skip trying to get the read
1697 if (write_lock_level < BTRFS_MAX_LEVEL) {
1699 * We don't know the level of the root node until we actually
1700 * have it read locked
1703 b = btrfs_try_read_lock_root_node(root);
1707 b = btrfs_read_lock_root_node(root);
1709 level = btrfs_header_level(b);
1710 if (level > write_lock_level)
1713 /* Whoops, must trade for write lock */
1714 btrfs_tree_read_unlock(b);
1715 free_extent_buffer(b);
1718 b = btrfs_lock_root_node(root);
1719 root_lock = BTRFS_WRITE_LOCK;
1721 /* The level might have changed, check again */
1722 level = btrfs_header_level(b);
1726 * The root may have failed to write out at some point, and thus is no
1727 * longer valid, return an error in this case.
1729 if (!extent_buffer_uptodate(b)) {
1731 btrfs_tree_unlock_rw(b, root_lock);
1732 free_extent_buffer(b);
1733 return ERR_PTR(-EIO);
1736 p->nodes[level] = b;
1737 if (!p->skip_locking)
1738 p->locks[level] = root_lock;
1740 * Callers are responsible for dropping b's references.
1746 * Replace the extent buffer at the lowest level of the path with a cloned
1747 * version. The purpose is to be able to use it safely, after releasing the
1748 * commit root semaphore, even if relocation is happening in parallel, the
1749 * transaction used for relocation is committed and the extent buffer is
1750 * reallocated in the next transaction.
1752 * This is used in a context where the caller does not prevent transaction
1753 * commits from happening, either by holding a transaction handle or holding
1754 * some lock, while it's doing searches through a commit root.
1755 * At the moment it's only used for send operations.
1757 static int finish_need_commit_sem_search(struct btrfs_path *path)
1759 const int i = path->lowest_level;
1760 const int slot = path->slots[i];
1761 struct extent_buffer *lowest = path->nodes[i];
1762 struct extent_buffer *clone;
1764 ASSERT(path->need_commit_sem);
1769 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1771 clone = btrfs_clone_extent_buffer(lowest);
1775 btrfs_release_path(path);
1776 path->nodes[i] = clone;
1777 path->slots[i] = slot;
1782 static inline int search_for_key_slot(struct extent_buffer *eb,
1783 int search_low_slot,
1784 const struct btrfs_key *key,
1789 * If a previous call to btrfs_bin_search() on a parent node returned an
1790 * exact match (prev_cmp == 0), we can safely assume the target key will
1791 * always be at slot 0 on lower levels, since each key pointer
1792 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1793 * subtree it points to. Thus we can skip searching lower levels.
1795 if (prev_cmp == 0) {
1800 return generic_bin_search(eb, search_low_slot, key, slot);
1803 static int search_leaf(struct btrfs_trans_handle *trans,
1804 struct btrfs_root *root,
1805 const struct btrfs_key *key,
1806 struct btrfs_path *path,
1810 struct extent_buffer *leaf = path->nodes[0];
1811 int leaf_free_space = -1;
1812 int search_low_slot = 0;
1814 bool do_bin_search = true;
1817 * If we are doing an insertion, the leaf has enough free space and the
1818 * destination slot for the key is not slot 0, then we can unlock our
1819 * write lock on the parent, and any other upper nodes, before doing the
1820 * binary search on the leaf (with search_for_key_slot()), allowing other
1821 * tasks to lock the parent and any other upper nodes.
1825 * Cache the leaf free space, since we will need it later and it
1826 * will not change until then.
1828 leaf_free_space = btrfs_leaf_free_space(leaf);
1831 * !path->locks[1] means we have a single node tree, the leaf is
1832 * the root of the tree.
1834 if (path->locks[1] && leaf_free_space >= ins_len) {
1835 struct btrfs_disk_key first_key;
1837 ASSERT(btrfs_header_nritems(leaf) > 0);
1838 btrfs_item_key(leaf, &first_key, 0);
1841 * Doing the extra comparison with the first key is cheap,
1842 * taking into account that the first key is very likely
1843 * already in a cache line because it immediately follows
1844 * the extent buffer's header and we have recently accessed
1845 * the header's level field.
1847 ret = comp_keys(&first_key, key);
1850 * The first key is smaller than the key we want
1851 * to insert, so we are safe to unlock all upper
1852 * nodes and we have to do the binary search.
1854 * We do use btrfs_unlock_up_safe() and not
1855 * unlock_up() because the later does not unlock
1856 * nodes with a slot of 0 - we can safely unlock
1857 * any node even if its slot is 0 since in this
1858 * case the key does not end up at slot 0 of the
1859 * leaf and there's no need to split the leaf.
1861 btrfs_unlock_up_safe(path, 1);
1862 search_low_slot = 1;
1865 * The first key is >= then the key we want to
1866 * insert, so we can skip the binary search as
1867 * the target key will be at slot 0.
1869 * We can not unlock upper nodes when the key is
1870 * less than the first key, because we will need
1871 * to update the key at slot 0 of the parent node
1872 * and possibly of other upper nodes too.
1873 * If the key matches the first key, then we can
1874 * unlock all the upper nodes, using
1875 * btrfs_unlock_up_safe() instead of unlock_up()
1879 btrfs_unlock_up_safe(path, 1);
1881 * ret is already 0 or 1, matching the result of
1882 * a btrfs_bin_search() call, so there is no need
1885 do_bin_search = false;
1891 if (do_bin_search) {
1892 ret = search_for_key_slot(leaf, search_low_slot, key,
1893 prev_cmp, &path->slots[0]);
1900 * Item key already exists. In this case, if we are allowed to
1901 * insert the item (for example, in dir_item case, item key
1902 * collision is allowed), it will be merged with the original
1903 * item. Only the item size grows, no new btrfs item will be
1904 * added. If search_for_extension is not set, ins_len already
1905 * accounts the size btrfs_item, deduct it here so leaf space
1906 * check will be correct.
1908 if (ret == 0 && !path->search_for_extension) {
1909 ASSERT(ins_len >= sizeof(struct btrfs_item));
1910 ins_len -= sizeof(struct btrfs_item);
1913 ASSERT(leaf_free_space >= 0);
1915 if (leaf_free_space < ins_len) {
1918 err = split_leaf(trans, root, key, path, ins_len,
1921 if (WARN_ON(err > 0))
1932 * btrfs_search_slot - look for a key in a tree and perform necessary
1933 * modifications to preserve tree invariants.
1935 * @trans: Handle of transaction, used when modifying the tree
1936 * @p: Holds all btree nodes along the search path
1937 * @root: The root node of the tree
1938 * @key: The key we are looking for
1939 * @ins_len: Indicates purpose of search:
1940 * >0 for inserts it's size of item inserted (*)
1942 * 0 for plain searches, not modifying the tree
1944 * (*) If size of item inserted doesn't include
1945 * sizeof(struct btrfs_item), then p->search_for_extension must
1947 * @cow: boolean should CoW operations be performed. Must always be 1
1948 * when modifying the tree.
1950 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1951 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1953 * If @key is found, 0 is returned and you can find the item in the leaf level
1954 * of the path (level 0)
1956 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1957 * points to the slot where it should be inserted
1959 * If an error is encountered while searching the tree a negative error number
1962 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1963 const struct btrfs_key *key, struct btrfs_path *p,
1964 int ins_len, int cow)
1966 struct btrfs_fs_info *fs_info = root->fs_info;
1967 struct extent_buffer *b;
1972 int lowest_unlock = 1;
1973 /* everything at write_lock_level or lower must be write locked */
1974 int write_lock_level = 0;
1975 u8 lowest_level = 0;
1976 int min_write_lock_level;
1979 lowest_level = p->lowest_level;
1980 WARN_ON(lowest_level && ins_len > 0);
1981 WARN_ON(p->nodes[0] != NULL);
1982 BUG_ON(!cow && ins_len);
1985 * For now only allow nowait for read only operations. There's no
1986 * strict reason why we can't, we just only need it for reads so it's
1987 * only implemented for reads.
1989 ASSERT(!p->nowait || !cow);
1994 /* when we are removing items, we might have to go up to level
1995 * two as we update tree pointers Make sure we keep write
1996 * for those levels as well
1998 write_lock_level = 2;
1999 } else if (ins_len > 0) {
2001 * for inserting items, make sure we have a write lock on
2002 * level 1 so we can update keys
2004 write_lock_level = 1;
2008 write_lock_level = -1;
2010 if (cow && (p->keep_locks || p->lowest_level))
2011 write_lock_level = BTRFS_MAX_LEVEL;
2013 min_write_lock_level = write_lock_level;
2015 if (p->need_commit_sem) {
2016 ASSERT(p->search_commit_root);
2018 if (!down_read_trylock(&fs_info->commit_root_sem))
2021 down_read(&fs_info->commit_root_sem);
2027 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2036 level = btrfs_header_level(b);
2039 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2042 * if we don't really need to cow this block
2043 * then we don't want to set the path blocking,
2044 * so we test it here
2046 if (!should_cow_block(trans, root, b))
2050 * must have write locks on this node and the
2053 if (level > write_lock_level ||
2054 (level + 1 > write_lock_level &&
2055 level + 1 < BTRFS_MAX_LEVEL &&
2056 p->nodes[level + 1])) {
2057 write_lock_level = level + 1;
2058 btrfs_release_path(p);
2063 err = btrfs_cow_block(trans, root, b, NULL, 0,
2067 err = btrfs_cow_block(trans, root, b,
2068 p->nodes[level + 1],
2069 p->slots[level + 1], &b,
2077 p->nodes[level] = b;
2080 * we have a lock on b and as long as we aren't changing
2081 * the tree, there is no way to for the items in b to change.
2082 * It is safe to drop the lock on our parent before we
2083 * go through the expensive btree search on b.
2085 * If we're inserting or deleting (ins_len != 0), then we might
2086 * be changing slot zero, which may require changing the parent.
2087 * So, we can't drop the lock until after we know which slot
2088 * we're operating on.
2090 if (!ins_len && !p->keep_locks) {
2093 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2094 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2101 ASSERT(write_lock_level >= 1);
2103 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2104 if (!p->search_for_split)
2105 unlock_up(p, level, lowest_unlock,
2106 min_write_lock_level, NULL);
2110 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2115 if (ret && slot > 0) {
2119 p->slots[level] = slot;
2120 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2128 b = p->nodes[level];
2129 slot = p->slots[level];
2132 * Slot 0 is special, if we change the key we have to update
2133 * the parent pointer which means we must have a write lock on
2136 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2137 write_lock_level = level + 1;
2138 btrfs_release_path(p);
2142 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2145 if (level == lowest_level) {
2151 err = read_block_for_search(root, p, &b, level, slot, key);
2159 if (!p->skip_locking) {
2160 level = btrfs_header_level(b);
2162 btrfs_maybe_reset_lockdep_class(root, b);
2164 if (level <= write_lock_level) {
2166 p->locks[level] = BTRFS_WRITE_LOCK;
2169 if (!btrfs_try_tree_read_lock(b)) {
2170 free_extent_buffer(b);
2175 btrfs_tree_read_lock(b);
2177 p->locks[level] = BTRFS_READ_LOCK;
2179 p->nodes[level] = b;
2184 if (ret < 0 && !p->skip_release_on_error)
2185 btrfs_release_path(p);
2187 if (p->need_commit_sem) {
2190 ret2 = finish_need_commit_sem_search(p);
2191 up_read(&fs_info->commit_root_sem);
2198 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2201 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2202 * current state of the tree together with the operations recorded in the tree
2203 * modification log to search for the key in a previous version of this tree, as
2204 * denoted by the time_seq parameter.
2206 * Naturally, there is no support for insert, delete or cow operations.
2208 * The resulting path and return value will be set up as if we called
2209 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2211 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2212 struct btrfs_path *p, u64 time_seq)
2214 struct btrfs_fs_info *fs_info = root->fs_info;
2215 struct extent_buffer *b;
2220 int lowest_unlock = 1;
2221 u8 lowest_level = 0;
2223 lowest_level = p->lowest_level;
2224 WARN_ON(p->nodes[0] != NULL);
2227 if (p->search_commit_root) {
2229 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2233 b = btrfs_get_old_root(root, time_seq);
2238 level = btrfs_header_level(b);
2239 p->locks[level] = BTRFS_READ_LOCK;
2244 level = btrfs_header_level(b);
2245 p->nodes[level] = b;
2248 * we have a lock on b and as long as we aren't changing
2249 * the tree, there is no way to for the items in b to change.
2250 * It is safe to drop the lock on our parent before we
2251 * go through the expensive btree search on b.
2253 btrfs_unlock_up_safe(p, level + 1);
2255 ret = btrfs_bin_search(b, key, &slot);
2260 p->slots[level] = slot;
2261 unlock_up(p, level, lowest_unlock, 0, NULL);
2265 if (ret && slot > 0) {
2269 p->slots[level] = slot;
2270 unlock_up(p, level, lowest_unlock, 0, NULL);
2272 if (level == lowest_level) {
2278 err = read_block_for_search(root, p, &b, level, slot, key);
2286 level = btrfs_header_level(b);
2287 btrfs_tree_read_lock(b);
2288 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2293 p->locks[level] = BTRFS_READ_LOCK;
2294 p->nodes[level] = b;
2299 btrfs_release_path(p);
2305 * helper to use instead of search slot if no exact match is needed but
2306 * instead the next or previous item should be returned.
2307 * When find_higher is true, the next higher item is returned, the next lower
2309 * When return_any and find_higher are both true, and no higher item is found,
2310 * return the next lower instead.
2311 * When return_any is true and find_higher is false, and no lower item is found,
2312 * return the next higher instead.
2313 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2316 int btrfs_search_slot_for_read(struct btrfs_root *root,
2317 const struct btrfs_key *key,
2318 struct btrfs_path *p, int find_higher,
2322 struct extent_buffer *leaf;
2325 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2329 * a return value of 1 means the path is at the position where the
2330 * item should be inserted. Normally this is the next bigger item,
2331 * but in case the previous item is the last in a leaf, path points
2332 * to the first free slot in the previous leaf, i.e. at an invalid
2338 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2339 ret = btrfs_next_leaf(root, p);
2345 * no higher item found, return the next
2350 btrfs_release_path(p);
2354 if (p->slots[0] == 0) {
2355 ret = btrfs_prev_leaf(root, p);
2360 if (p->slots[0] == btrfs_header_nritems(leaf))
2367 * no lower item found, return the next
2372 btrfs_release_path(p);
2382 * Execute search and call btrfs_previous_item to traverse backwards if the item
2385 * Return 0 if found, 1 if not found and < 0 if error.
2387 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2388 struct btrfs_path *path)
2392 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2394 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2397 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2403 * Search for a valid slot for the given path.
2405 * @root: The root node of the tree.
2406 * @key: Will contain a valid item if found.
2407 * @path: The starting point to validate the slot.
2409 * Return: 0 if the item is valid
2413 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2414 struct btrfs_path *path)
2418 const int slot = path->slots[0];
2419 const struct extent_buffer *leaf = path->nodes[0];
2421 /* This is where we start walking the path. */
2422 if (slot >= btrfs_header_nritems(leaf)) {
2424 * If we've reached the last slot in this leaf we need
2425 * to go to the next leaf and reset the path.
2427 ret = btrfs_next_leaf(root, path);
2432 /* Store the found, valid item in @key. */
2433 btrfs_item_key_to_cpu(leaf, key, slot);
2440 * adjust the pointers going up the tree, starting at level
2441 * making sure the right key of each node is points to 'key'.
2442 * This is used after shifting pointers to the left, so it stops
2443 * fixing up pointers when a given leaf/node is not in slot 0 of the
2447 static void fixup_low_keys(struct btrfs_path *path,
2448 struct btrfs_disk_key *key, int level)
2451 struct extent_buffer *t;
2454 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2455 int tslot = path->slots[i];
2457 if (!path->nodes[i])
2460 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2461 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2463 btrfs_set_node_key(t, key, tslot);
2464 btrfs_mark_buffer_dirty(path->nodes[i]);
2473 * This function isn't completely safe. It's the caller's responsibility
2474 * that the new key won't break the order
2476 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2477 struct btrfs_path *path,
2478 const struct btrfs_key *new_key)
2480 struct btrfs_disk_key disk_key;
2481 struct extent_buffer *eb;
2484 eb = path->nodes[0];
2485 slot = path->slots[0];
2487 btrfs_item_key(eb, &disk_key, slot - 1);
2488 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2490 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2491 slot, btrfs_disk_key_objectid(&disk_key),
2492 btrfs_disk_key_type(&disk_key),
2493 btrfs_disk_key_offset(&disk_key),
2494 new_key->objectid, new_key->type,
2496 btrfs_print_leaf(eb);
2500 if (slot < btrfs_header_nritems(eb) - 1) {
2501 btrfs_item_key(eb, &disk_key, slot + 1);
2502 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2504 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2505 slot, btrfs_disk_key_objectid(&disk_key),
2506 btrfs_disk_key_type(&disk_key),
2507 btrfs_disk_key_offset(&disk_key),
2508 new_key->objectid, new_key->type,
2510 btrfs_print_leaf(eb);
2515 btrfs_cpu_key_to_disk(&disk_key, new_key);
2516 btrfs_set_item_key(eb, &disk_key, slot);
2517 btrfs_mark_buffer_dirty(eb);
2519 fixup_low_keys(path, &disk_key, 1);
2523 * Check key order of two sibling extent buffers.
2525 * Return true if something is wrong.
2526 * Return false if everything is fine.
2528 * Tree-checker only works inside one tree block, thus the following
2529 * corruption can not be detected by tree-checker:
2531 * Leaf @left | Leaf @right
2532 * --------------------------------------------------------------
2533 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2535 * Key f6 in leaf @left itself is valid, but not valid when the next
2536 * key in leaf @right is 7.
2537 * This can only be checked at tree block merge time.
2538 * And since tree checker has ensured all key order in each tree block
2539 * is correct, we only need to bother the last key of @left and the first
2542 static bool check_sibling_keys(struct extent_buffer *left,
2543 struct extent_buffer *right)
2545 struct btrfs_key left_last;
2546 struct btrfs_key right_first;
2547 int level = btrfs_header_level(left);
2548 int nr_left = btrfs_header_nritems(left);
2549 int nr_right = btrfs_header_nritems(right);
2551 /* No key to check in one of the tree blocks */
2552 if (!nr_left || !nr_right)
2556 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2557 btrfs_node_key_to_cpu(right, &right_first, 0);
2559 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2560 btrfs_item_key_to_cpu(right, &right_first, 0);
2563 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2564 btrfs_crit(left->fs_info,
2565 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2566 left_last.objectid, left_last.type,
2567 left_last.offset, right_first.objectid,
2568 right_first.type, right_first.offset);
2575 * try to push data from one node into the next node left in the
2578 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2579 * error, and > 0 if there was no room in the left hand block.
2581 static int push_node_left(struct btrfs_trans_handle *trans,
2582 struct extent_buffer *dst,
2583 struct extent_buffer *src, int empty)
2585 struct btrfs_fs_info *fs_info = trans->fs_info;
2591 src_nritems = btrfs_header_nritems(src);
2592 dst_nritems = btrfs_header_nritems(dst);
2593 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2594 WARN_ON(btrfs_header_generation(src) != trans->transid);
2595 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2597 if (!empty && src_nritems <= 8)
2600 if (push_items <= 0)
2604 push_items = min(src_nritems, push_items);
2605 if (push_items < src_nritems) {
2606 /* leave at least 8 pointers in the node if
2607 * we aren't going to empty it
2609 if (src_nritems - push_items < 8) {
2610 if (push_items <= 8)
2616 push_items = min(src_nritems - 8, push_items);
2618 /* dst is the left eb, src is the middle eb */
2619 if (check_sibling_keys(dst, src)) {
2621 btrfs_abort_transaction(trans, ret);
2624 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2626 btrfs_abort_transaction(trans, ret);
2629 copy_extent_buffer(dst, src,
2630 btrfs_node_key_ptr_offset(dst_nritems),
2631 btrfs_node_key_ptr_offset(0),
2632 push_items * sizeof(struct btrfs_key_ptr));
2634 if (push_items < src_nritems) {
2636 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2637 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2639 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2640 btrfs_node_key_ptr_offset(push_items),
2641 (src_nritems - push_items) *
2642 sizeof(struct btrfs_key_ptr));
2644 btrfs_set_header_nritems(src, src_nritems - push_items);
2645 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2646 btrfs_mark_buffer_dirty(src);
2647 btrfs_mark_buffer_dirty(dst);
2653 * try to push data from one node into the next node right in the
2656 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2657 * error, and > 0 if there was no room in the right hand block.
2659 * this will only push up to 1/2 the contents of the left node over
2661 static int balance_node_right(struct btrfs_trans_handle *trans,
2662 struct extent_buffer *dst,
2663 struct extent_buffer *src)
2665 struct btrfs_fs_info *fs_info = trans->fs_info;
2672 WARN_ON(btrfs_header_generation(src) != trans->transid);
2673 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2675 src_nritems = btrfs_header_nritems(src);
2676 dst_nritems = btrfs_header_nritems(dst);
2677 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2678 if (push_items <= 0)
2681 if (src_nritems < 4)
2684 max_push = src_nritems / 2 + 1;
2685 /* don't try to empty the node */
2686 if (max_push >= src_nritems)
2689 if (max_push < push_items)
2690 push_items = max_push;
2692 /* dst is the right eb, src is the middle eb */
2693 if (check_sibling_keys(src, dst)) {
2695 btrfs_abort_transaction(trans, ret);
2698 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2700 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2701 btrfs_node_key_ptr_offset(0),
2703 sizeof(struct btrfs_key_ptr));
2705 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2708 btrfs_abort_transaction(trans, ret);
2711 copy_extent_buffer(dst, src,
2712 btrfs_node_key_ptr_offset(0),
2713 btrfs_node_key_ptr_offset(src_nritems - push_items),
2714 push_items * sizeof(struct btrfs_key_ptr));
2716 btrfs_set_header_nritems(src, src_nritems - push_items);
2717 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2719 btrfs_mark_buffer_dirty(src);
2720 btrfs_mark_buffer_dirty(dst);
2726 * helper function to insert a new root level in the tree.
2727 * A new node is allocated, and a single item is inserted to
2728 * point to the existing root
2730 * returns zero on success or < 0 on failure.
2732 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2733 struct btrfs_root *root,
2734 struct btrfs_path *path, int level)
2736 struct btrfs_fs_info *fs_info = root->fs_info;
2738 struct extent_buffer *lower;
2739 struct extent_buffer *c;
2740 struct extent_buffer *old;
2741 struct btrfs_disk_key lower_key;
2744 BUG_ON(path->nodes[level]);
2745 BUG_ON(path->nodes[level-1] != root->node);
2747 lower = path->nodes[level-1];
2749 btrfs_item_key(lower, &lower_key, 0);
2751 btrfs_node_key(lower, &lower_key, 0);
2753 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2754 &lower_key, level, root->node->start, 0,
2755 BTRFS_NESTING_NEW_ROOT);
2759 root_add_used(root, fs_info->nodesize);
2761 btrfs_set_header_nritems(c, 1);
2762 btrfs_set_node_key(c, &lower_key, 0);
2763 btrfs_set_node_blockptr(c, 0, lower->start);
2764 lower_gen = btrfs_header_generation(lower);
2765 WARN_ON(lower_gen != trans->transid);
2767 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2769 btrfs_mark_buffer_dirty(c);
2772 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2774 rcu_assign_pointer(root->node, c);
2776 /* the super has an extra ref to root->node */
2777 free_extent_buffer(old);
2779 add_root_to_dirty_list(root);
2780 atomic_inc(&c->refs);
2781 path->nodes[level] = c;
2782 path->locks[level] = BTRFS_WRITE_LOCK;
2783 path->slots[level] = 0;
2788 * worker function to insert a single pointer in a node.
2789 * the node should have enough room for the pointer already
2791 * slot and level indicate where you want the key to go, and
2792 * blocknr is the block the key points to.
2794 static void insert_ptr(struct btrfs_trans_handle *trans,
2795 struct btrfs_path *path,
2796 struct btrfs_disk_key *key, u64 bytenr,
2797 int slot, int level)
2799 struct extent_buffer *lower;
2803 BUG_ON(!path->nodes[level]);
2804 btrfs_assert_tree_write_locked(path->nodes[level]);
2805 lower = path->nodes[level];
2806 nritems = btrfs_header_nritems(lower);
2807 BUG_ON(slot > nritems);
2808 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2809 if (slot != nritems) {
2811 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2812 slot, nritems - slot);
2815 memmove_extent_buffer(lower,
2816 btrfs_node_key_ptr_offset(slot + 1),
2817 btrfs_node_key_ptr_offset(slot),
2818 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2821 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2822 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2825 btrfs_set_node_key(lower, key, slot);
2826 btrfs_set_node_blockptr(lower, slot, bytenr);
2827 WARN_ON(trans->transid == 0);
2828 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2829 btrfs_set_header_nritems(lower, nritems + 1);
2830 btrfs_mark_buffer_dirty(lower);
2834 * split the node at the specified level in path in two.
2835 * The path is corrected to point to the appropriate node after the split
2837 * Before splitting this tries to make some room in the node by pushing
2838 * left and right, if either one works, it returns right away.
2840 * returns 0 on success and < 0 on failure
2842 static noinline int split_node(struct btrfs_trans_handle *trans,
2843 struct btrfs_root *root,
2844 struct btrfs_path *path, int level)
2846 struct btrfs_fs_info *fs_info = root->fs_info;
2847 struct extent_buffer *c;
2848 struct extent_buffer *split;
2849 struct btrfs_disk_key disk_key;
2854 c = path->nodes[level];
2855 WARN_ON(btrfs_header_generation(c) != trans->transid);
2856 if (c == root->node) {
2858 * trying to split the root, lets make a new one
2860 * tree mod log: We don't log_removal old root in
2861 * insert_new_root, because that root buffer will be kept as a
2862 * normal node. We are going to log removal of half of the
2863 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2864 * holding a tree lock on the buffer, which is why we cannot
2865 * race with other tree_mod_log users.
2867 ret = insert_new_root(trans, root, path, level + 1);
2871 ret = push_nodes_for_insert(trans, root, path, level);
2872 c = path->nodes[level];
2873 if (!ret && btrfs_header_nritems(c) <
2874 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2880 c_nritems = btrfs_header_nritems(c);
2881 mid = (c_nritems + 1) / 2;
2882 btrfs_node_key(c, &disk_key, mid);
2884 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2885 &disk_key, level, c->start, 0,
2886 BTRFS_NESTING_SPLIT);
2888 return PTR_ERR(split);
2890 root_add_used(root, fs_info->nodesize);
2891 ASSERT(btrfs_header_level(c) == level);
2893 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2895 btrfs_tree_unlock(split);
2896 free_extent_buffer(split);
2897 btrfs_abort_transaction(trans, ret);
2900 copy_extent_buffer(split, c,
2901 btrfs_node_key_ptr_offset(0),
2902 btrfs_node_key_ptr_offset(mid),
2903 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2904 btrfs_set_header_nritems(split, c_nritems - mid);
2905 btrfs_set_header_nritems(c, mid);
2907 btrfs_mark_buffer_dirty(c);
2908 btrfs_mark_buffer_dirty(split);
2910 insert_ptr(trans, path, &disk_key, split->start,
2911 path->slots[level + 1] + 1, level + 1);
2913 if (path->slots[level] >= mid) {
2914 path->slots[level] -= mid;
2915 btrfs_tree_unlock(c);
2916 free_extent_buffer(c);
2917 path->nodes[level] = split;
2918 path->slots[level + 1] += 1;
2920 btrfs_tree_unlock(split);
2921 free_extent_buffer(split);
2927 * how many bytes are required to store the items in a leaf. start
2928 * and nr indicate which items in the leaf to check. This totals up the
2929 * space used both by the item structs and the item data
2931 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2934 int nritems = btrfs_header_nritems(l);
2935 int end = min(nritems, start + nr) - 1;
2939 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
2940 data_len = data_len - btrfs_item_offset(l, end);
2941 data_len += sizeof(struct btrfs_item) * nr;
2942 WARN_ON(data_len < 0);
2947 * The space between the end of the leaf items and
2948 * the start of the leaf data. IOW, how much room
2949 * the leaf has left for both items and data
2951 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2953 struct btrfs_fs_info *fs_info = leaf->fs_info;
2954 int nritems = btrfs_header_nritems(leaf);
2957 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2960 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2962 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2963 leaf_space_used(leaf, 0, nritems), nritems);
2969 * min slot controls the lowest index we're willing to push to the
2970 * right. We'll push up to and including min_slot, but no lower
2972 static noinline int __push_leaf_right(struct btrfs_path *path,
2973 int data_size, int empty,
2974 struct extent_buffer *right,
2975 int free_space, u32 left_nritems,
2978 struct btrfs_fs_info *fs_info = right->fs_info;
2979 struct extent_buffer *left = path->nodes[0];
2980 struct extent_buffer *upper = path->nodes[1];
2981 struct btrfs_map_token token;
2982 struct btrfs_disk_key disk_key;
2995 nr = max_t(u32, 1, min_slot);
2997 if (path->slots[0] >= left_nritems)
2998 push_space += data_size;
3000 slot = path->slots[1];
3001 i = left_nritems - 1;
3003 if (!empty && push_items > 0) {
3004 if (path->slots[0] > i)
3006 if (path->slots[0] == i) {
3007 int space = btrfs_leaf_free_space(left);
3009 if (space + push_space * 2 > free_space)
3014 if (path->slots[0] == i)
3015 push_space += data_size;
3017 this_item_size = btrfs_item_size(left, i);
3018 if (this_item_size + sizeof(struct btrfs_item) +
3019 push_space > free_space)
3023 push_space += this_item_size + sizeof(struct btrfs_item);
3029 if (push_items == 0)
3032 WARN_ON(!empty && push_items == left_nritems);
3034 /* push left to right */
3035 right_nritems = btrfs_header_nritems(right);
3037 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3038 push_space -= leaf_data_end(left);
3040 /* make room in the right data area */
3041 data_end = leaf_data_end(right);
3042 memmove_extent_buffer(right,
3043 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3044 BTRFS_LEAF_DATA_OFFSET + data_end,
3045 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3047 /* copy from the left data area */
3048 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3049 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3050 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
3053 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3054 btrfs_item_nr_offset(0),
3055 right_nritems * sizeof(struct btrfs_item));
3057 /* copy the items from left to right */
3058 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3059 btrfs_item_nr_offset(left_nritems - push_items),
3060 push_items * sizeof(struct btrfs_item));
3062 /* update the item pointers */
3063 btrfs_init_map_token(&token, right);
3064 right_nritems += push_items;
3065 btrfs_set_header_nritems(right, right_nritems);
3066 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3067 for (i = 0; i < right_nritems; i++) {
3068 push_space -= btrfs_token_item_size(&token, i);
3069 btrfs_set_token_item_offset(&token, i, push_space);
3072 left_nritems -= push_items;
3073 btrfs_set_header_nritems(left, left_nritems);
3076 btrfs_mark_buffer_dirty(left);
3078 btrfs_clean_tree_block(left);
3080 btrfs_mark_buffer_dirty(right);
3082 btrfs_item_key(right, &disk_key, 0);
3083 btrfs_set_node_key(upper, &disk_key, slot + 1);
3084 btrfs_mark_buffer_dirty(upper);
3086 /* then fixup the leaf pointer in the path */
3087 if (path->slots[0] >= left_nritems) {
3088 path->slots[0] -= left_nritems;
3089 if (btrfs_header_nritems(path->nodes[0]) == 0)
3090 btrfs_clean_tree_block(path->nodes[0]);
3091 btrfs_tree_unlock(path->nodes[0]);
3092 free_extent_buffer(path->nodes[0]);
3093 path->nodes[0] = right;
3094 path->slots[1] += 1;
3096 btrfs_tree_unlock(right);
3097 free_extent_buffer(right);
3102 btrfs_tree_unlock(right);
3103 free_extent_buffer(right);
3108 * push some data in the path leaf to the right, trying to free up at
3109 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3111 * returns 1 if the push failed because the other node didn't have enough
3112 * room, 0 if everything worked out and < 0 if there were major errors.
3114 * this will push starting from min_slot to the end of the leaf. It won't
3115 * push any slot lower than min_slot
3117 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3118 *root, struct btrfs_path *path,
3119 int min_data_size, int data_size,
3120 int empty, u32 min_slot)
3122 struct extent_buffer *left = path->nodes[0];
3123 struct extent_buffer *right;
3124 struct extent_buffer *upper;
3130 if (!path->nodes[1])
3133 slot = path->slots[1];
3134 upper = path->nodes[1];
3135 if (slot >= btrfs_header_nritems(upper) - 1)
3138 btrfs_assert_tree_write_locked(path->nodes[1]);
3140 right = btrfs_read_node_slot(upper, slot + 1);
3142 * slot + 1 is not valid or we fail to read the right node,
3143 * no big deal, just return.
3148 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
3150 free_space = btrfs_leaf_free_space(right);
3151 if (free_space < data_size)
3154 ret = btrfs_cow_block(trans, root, right, upper,
3155 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3159 left_nritems = btrfs_header_nritems(left);
3160 if (left_nritems == 0)
3163 if (check_sibling_keys(left, right)) {
3165 btrfs_abort_transaction(trans, ret);
3166 btrfs_tree_unlock(right);
3167 free_extent_buffer(right);
3170 if (path->slots[0] == left_nritems && !empty) {
3171 /* Key greater than all keys in the leaf, right neighbor has
3172 * enough room for it and we're not emptying our leaf to delete
3173 * it, therefore use right neighbor to insert the new item and
3174 * no need to touch/dirty our left leaf. */
3175 btrfs_tree_unlock(left);
3176 free_extent_buffer(left);
3177 path->nodes[0] = right;
3183 return __push_leaf_right(path, min_data_size, empty,
3184 right, free_space, left_nritems, min_slot);
3186 btrfs_tree_unlock(right);
3187 free_extent_buffer(right);
3192 * push some data in the path leaf to the left, trying to free up at
3193 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3195 * max_slot can put a limit on how far into the leaf we'll push items. The
3196 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3199 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3200 int empty, struct extent_buffer *left,
3201 int free_space, u32 right_nritems,
3204 struct btrfs_fs_info *fs_info = left->fs_info;
3205 struct btrfs_disk_key disk_key;
3206 struct extent_buffer *right = path->nodes[0];
3210 u32 old_left_nritems;
3214 u32 old_left_item_size;
3215 struct btrfs_map_token token;
3218 nr = min(right_nritems, max_slot);
3220 nr = min(right_nritems - 1, max_slot);
3222 for (i = 0; i < nr; i++) {
3223 if (!empty && push_items > 0) {
3224 if (path->slots[0] < i)
3226 if (path->slots[0] == i) {
3227 int space = btrfs_leaf_free_space(right);
3229 if (space + push_space * 2 > free_space)
3234 if (path->slots[0] == i)
3235 push_space += data_size;
3237 this_item_size = btrfs_item_size(right, i);
3238 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3243 push_space += this_item_size + sizeof(struct btrfs_item);
3246 if (push_items == 0) {
3250 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3252 /* push data from right to left */
3253 copy_extent_buffer(left, right,
3254 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3255 btrfs_item_nr_offset(0),
3256 push_items * sizeof(struct btrfs_item));
3258 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3259 btrfs_item_offset(right, push_items - 1);
3261 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3262 leaf_data_end(left) - push_space,
3263 BTRFS_LEAF_DATA_OFFSET +
3264 btrfs_item_offset(right, push_items - 1),
3266 old_left_nritems = btrfs_header_nritems(left);
3267 BUG_ON(old_left_nritems <= 0);
3269 btrfs_init_map_token(&token, left);
3270 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3271 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3274 ioff = btrfs_token_item_offset(&token, i);
3275 btrfs_set_token_item_offset(&token, i,
3276 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3278 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3280 /* fixup right node */
3281 if (push_items > right_nritems)
3282 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3285 if (push_items < right_nritems) {
3286 push_space = btrfs_item_offset(right, push_items - 1) -
3287 leaf_data_end(right);
3288 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3289 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3290 BTRFS_LEAF_DATA_OFFSET +
3291 leaf_data_end(right), push_space);
3293 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3294 btrfs_item_nr_offset(push_items),
3295 (btrfs_header_nritems(right) - push_items) *
3296 sizeof(struct btrfs_item));
3299 btrfs_init_map_token(&token, right);
3300 right_nritems -= push_items;
3301 btrfs_set_header_nritems(right, right_nritems);
3302 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3303 for (i = 0; i < right_nritems; i++) {
3304 push_space = push_space - btrfs_token_item_size(&token, i);
3305 btrfs_set_token_item_offset(&token, i, push_space);
3308 btrfs_mark_buffer_dirty(left);
3310 btrfs_mark_buffer_dirty(right);
3312 btrfs_clean_tree_block(right);
3314 btrfs_item_key(right, &disk_key, 0);
3315 fixup_low_keys(path, &disk_key, 1);
3317 /* then fixup the leaf pointer in the path */
3318 if (path->slots[0] < push_items) {
3319 path->slots[0] += old_left_nritems;
3320 btrfs_tree_unlock(path->nodes[0]);
3321 free_extent_buffer(path->nodes[0]);
3322 path->nodes[0] = left;
3323 path->slots[1] -= 1;
3325 btrfs_tree_unlock(left);
3326 free_extent_buffer(left);
3327 path->slots[0] -= push_items;
3329 BUG_ON(path->slots[0] < 0);
3332 btrfs_tree_unlock(left);
3333 free_extent_buffer(left);
3338 * push some data in the path leaf to the left, trying to free up at
3339 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3341 * max_slot can put a limit on how far into the leaf we'll push items. The
3342 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3345 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3346 *root, struct btrfs_path *path, int min_data_size,
3347 int data_size, int empty, u32 max_slot)
3349 struct extent_buffer *right = path->nodes[0];
3350 struct extent_buffer *left;
3356 slot = path->slots[1];
3359 if (!path->nodes[1])
3362 right_nritems = btrfs_header_nritems(right);
3363 if (right_nritems == 0)
3366 btrfs_assert_tree_write_locked(path->nodes[1]);
3368 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3370 * slot - 1 is not valid or we fail to read the left node,
3371 * no big deal, just return.
3376 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3378 free_space = btrfs_leaf_free_space(left);
3379 if (free_space < data_size) {
3384 ret = btrfs_cow_block(trans, root, left,
3385 path->nodes[1], slot - 1, &left,
3386 BTRFS_NESTING_LEFT_COW);
3388 /* we hit -ENOSPC, but it isn't fatal here */
3394 if (check_sibling_keys(left, right)) {
3396 btrfs_abort_transaction(trans, ret);
3399 return __push_leaf_left(path, min_data_size,
3400 empty, left, free_space, right_nritems,
3403 btrfs_tree_unlock(left);
3404 free_extent_buffer(left);
3409 * split the path's leaf in two, making sure there is at least data_size
3410 * available for the resulting leaf level of the path.
3412 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3413 struct btrfs_path *path,
3414 struct extent_buffer *l,
3415 struct extent_buffer *right,
3416 int slot, int mid, int nritems)
3418 struct btrfs_fs_info *fs_info = trans->fs_info;
3422 struct btrfs_disk_key disk_key;
3423 struct btrfs_map_token token;
3425 nritems = nritems - mid;
3426 btrfs_set_header_nritems(right, nritems);
3427 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3429 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3430 btrfs_item_nr_offset(mid),
3431 nritems * sizeof(struct btrfs_item));
3433 copy_extent_buffer(right, l,
3434 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3435 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3436 leaf_data_end(l), data_copy_size);
3438 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3440 btrfs_init_map_token(&token, right);
3441 for (i = 0; i < nritems; i++) {
3444 ioff = btrfs_token_item_offset(&token, i);
3445 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3448 btrfs_set_header_nritems(l, mid);
3449 btrfs_item_key(right, &disk_key, 0);
3450 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3452 btrfs_mark_buffer_dirty(right);
3453 btrfs_mark_buffer_dirty(l);
3454 BUG_ON(path->slots[0] != slot);
3457 btrfs_tree_unlock(path->nodes[0]);
3458 free_extent_buffer(path->nodes[0]);
3459 path->nodes[0] = right;
3460 path->slots[0] -= mid;
3461 path->slots[1] += 1;
3463 btrfs_tree_unlock(right);
3464 free_extent_buffer(right);
3467 BUG_ON(path->slots[0] < 0);
3471 * double splits happen when we need to insert a big item in the middle
3472 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3473 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3476 * We avoid this by trying to push the items on either side of our target
3477 * into the adjacent leaves. If all goes well we can avoid the double split
3480 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3481 struct btrfs_root *root,
3482 struct btrfs_path *path,
3489 int space_needed = data_size;
3491 slot = path->slots[0];
3492 if (slot < btrfs_header_nritems(path->nodes[0]))
3493 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3496 * try to push all the items after our slot into the
3499 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3506 nritems = btrfs_header_nritems(path->nodes[0]);
3508 * our goal is to get our slot at the start or end of a leaf. If
3509 * we've done so we're done
3511 if (path->slots[0] == 0 || path->slots[0] == nritems)
3514 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3517 /* try to push all the items before our slot into the next leaf */
3518 slot = path->slots[0];
3519 space_needed = data_size;
3521 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3522 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3535 * split the path's leaf in two, making sure there is at least data_size
3536 * available for the resulting leaf level of the path.
3538 * returns 0 if all went well and < 0 on failure.
3540 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3541 struct btrfs_root *root,
3542 const struct btrfs_key *ins_key,
3543 struct btrfs_path *path, int data_size,
3546 struct btrfs_disk_key disk_key;
3547 struct extent_buffer *l;
3551 struct extent_buffer *right;
3552 struct btrfs_fs_info *fs_info = root->fs_info;
3556 int num_doubles = 0;
3557 int tried_avoid_double = 0;
3560 slot = path->slots[0];
3561 if (extend && data_size + btrfs_item_size(l, slot) +
3562 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3565 /* first try to make some room by pushing left and right */
3566 if (data_size && path->nodes[1]) {
3567 int space_needed = data_size;
3569 if (slot < btrfs_header_nritems(l))
3570 space_needed -= btrfs_leaf_free_space(l);
3572 wret = push_leaf_right(trans, root, path, space_needed,
3573 space_needed, 0, 0);
3577 space_needed = data_size;
3579 space_needed -= btrfs_leaf_free_space(l);
3580 wret = push_leaf_left(trans, root, path, space_needed,
3581 space_needed, 0, (u32)-1);
3587 /* did the pushes work? */
3588 if (btrfs_leaf_free_space(l) >= data_size)
3592 if (!path->nodes[1]) {
3593 ret = insert_new_root(trans, root, path, 1);
3600 slot = path->slots[0];
3601 nritems = btrfs_header_nritems(l);
3602 mid = (nritems + 1) / 2;
3606 leaf_space_used(l, mid, nritems - mid) + data_size >
3607 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3608 if (slot >= nritems) {
3612 if (mid != nritems &&
3613 leaf_space_used(l, mid, nritems - mid) +
3614 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3615 if (data_size && !tried_avoid_double)
3616 goto push_for_double;
3622 if (leaf_space_used(l, 0, mid) + data_size >
3623 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3624 if (!extend && data_size && slot == 0) {
3626 } else if ((extend || !data_size) && slot == 0) {
3630 if (mid != nritems &&
3631 leaf_space_used(l, mid, nritems - mid) +
3632 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3633 if (data_size && !tried_avoid_double)
3634 goto push_for_double;
3642 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3644 btrfs_item_key(l, &disk_key, mid);
3647 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3648 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3649 * subclasses, which is 8 at the time of this patch, and we've maxed it
3650 * out. In the future we could add a
3651 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3652 * use BTRFS_NESTING_NEW_ROOT.
3654 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3655 &disk_key, 0, l->start, 0,
3656 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3657 BTRFS_NESTING_SPLIT);
3659 return PTR_ERR(right);
3661 root_add_used(root, fs_info->nodesize);
3665 btrfs_set_header_nritems(right, 0);
3666 insert_ptr(trans, path, &disk_key,
3667 right->start, path->slots[1] + 1, 1);
3668 btrfs_tree_unlock(path->nodes[0]);
3669 free_extent_buffer(path->nodes[0]);
3670 path->nodes[0] = right;
3672 path->slots[1] += 1;
3674 btrfs_set_header_nritems(right, 0);
3675 insert_ptr(trans, path, &disk_key,
3676 right->start, path->slots[1], 1);
3677 btrfs_tree_unlock(path->nodes[0]);
3678 free_extent_buffer(path->nodes[0]);
3679 path->nodes[0] = right;
3681 if (path->slots[1] == 0)
3682 fixup_low_keys(path, &disk_key, 1);
3685 * We create a new leaf 'right' for the required ins_len and
3686 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3687 * the content of ins_len to 'right'.
3692 copy_for_split(trans, path, l, right, slot, mid, nritems);
3695 BUG_ON(num_doubles != 0);
3703 push_for_double_split(trans, root, path, data_size);
3704 tried_avoid_double = 1;
3705 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3710 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3711 struct btrfs_root *root,
3712 struct btrfs_path *path, int ins_len)
3714 struct btrfs_key key;
3715 struct extent_buffer *leaf;
3716 struct btrfs_file_extent_item *fi;
3721 leaf = path->nodes[0];
3722 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3724 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3725 key.type != BTRFS_EXTENT_CSUM_KEY);
3727 if (btrfs_leaf_free_space(leaf) >= ins_len)
3730 item_size = btrfs_item_size(leaf, path->slots[0]);
3731 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3732 fi = btrfs_item_ptr(leaf, path->slots[0],
3733 struct btrfs_file_extent_item);
3734 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3736 btrfs_release_path(path);
3738 path->keep_locks = 1;
3739 path->search_for_split = 1;
3740 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3741 path->search_for_split = 0;
3748 leaf = path->nodes[0];
3749 /* if our item isn't there, return now */
3750 if (item_size != btrfs_item_size(leaf, path->slots[0]))
3753 /* the leaf has changed, it now has room. return now */
3754 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3757 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3758 fi = btrfs_item_ptr(leaf, path->slots[0],
3759 struct btrfs_file_extent_item);
3760 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3764 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3768 path->keep_locks = 0;
3769 btrfs_unlock_up_safe(path, 1);
3772 path->keep_locks = 0;
3776 static noinline int split_item(struct btrfs_path *path,
3777 const struct btrfs_key *new_key,
3778 unsigned long split_offset)
3780 struct extent_buffer *leaf;
3781 int orig_slot, slot;
3786 struct btrfs_disk_key disk_key;
3788 leaf = path->nodes[0];
3789 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3791 orig_slot = path->slots[0];
3792 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
3793 item_size = btrfs_item_size(leaf, path->slots[0]);
3795 buf = kmalloc(item_size, GFP_NOFS);
3799 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3800 path->slots[0]), item_size);
3802 slot = path->slots[0] + 1;
3803 nritems = btrfs_header_nritems(leaf);
3804 if (slot != nritems) {
3805 /* shift the items */
3806 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3807 btrfs_item_nr_offset(slot),
3808 (nritems - slot) * sizeof(struct btrfs_item));
3811 btrfs_cpu_key_to_disk(&disk_key, new_key);
3812 btrfs_set_item_key(leaf, &disk_key, slot);
3814 btrfs_set_item_offset(leaf, slot, orig_offset);
3815 btrfs_set_item_size(leaf, slot, item_size - split_offset);
3817 btrfs_set_item_offset(leaf, orig_slot,
3818 orig_offset + item_size - split_offset);
3819 btrfs_set_item_size(leaf, orig_slot, split_offset);
3821 btrfs_set_header_nritems(leaf, nritems + 1);
3823 /* write the data for the start of the original item */
3824 write_extent_buffer(leaf, buf,
3825 btrfs_item_ptr_offset(leaf, path->slots[0]),
3828 /* write the data for the new item */
3829 write_extent_buffer(leaf, buf + split_offset,
3830 btrfs_item_ptr_offset(leaf, slot),
3831 item_size - split_offset);
3832 btrfs_mark_buffer_dirty(leaf);
3834 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3840 * This function splits a single item into two items,
3841 * giving 'new_key' to the new item and splitting the
3842 * old one at split_offset (from the start of the item).
3844 * The path may be released by this operation. After
3845 * the split, the path is pointing to the old item. The
3846 * new item is going to be in the same node as the old one.
3848 * Note, the item being split must be smaller enough to live alone on
3849 * a tree block with room for one extra struct btrfs_item
3851 * This allows us to split the item in place, keeping a lock on the
3852 * leaf the entire time.
3854 int btrfs_split_item(struct btrfs_trans_handle *trans,
3855 struct btrfs_root *root,
3856 struct btrfs_path *path,
3857 const struct btrfs_key *new_key,
3858 unsigned long split_offset)
3861 ret = setup_leaf_for_split(trans, root, path,
3862 sizeof(struct btrfs_item));
3866 ret = split_item(path, new_key, split_offset);
3871 * make the item pointed to by the path smaller. new_size indicates
3872 * how small to make it, and from_end tells us if we just chop bytes
3873 * off the end of the item or if we shift the item to chop bytes off
3876 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3879 struct extent_buffer *leaf;
3881 unsigned int data_end;
3882 unsigned int old_data_start;
3883 unsigned int old_size;
3884 unsigned int size_diff;
3886 struct btrfs_map_token token;
3888 leaf = path->nodes[0];
3889 slot = path->slots[0];
3891 old_size = btrfs_item_size(leaf, slot);
3892 if (old_size == new_size)
3895 nritems = btrfs_header_nritems(leaf);
3896 data_end = leaf_data_end(leaf);
3898 old_data_start = btrfs_item_offset(leaf, slot);
3900 size_diff = old_size - new_size;
3903 BUG_ON(slot >= nritems);
3906 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3908 /* first correct the data pointers */
3909 btrfs_init_map_token(&token, leaf);
3910 for (i = slot; i < nritems; i++) {
3913 ioff = btrfs_token_item_offset(&token, i);
3914 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
3917 /* shift the data */
3919 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3920 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3921 data_end, old_data_start + new_size - data_end);
3923 struct btrfs_disk_key disk_key;
3926 btrfs_item_key(leaf, &disk_key, slot);
3928 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3930 struct btrfs_file_extent_item *fi;
3932 fi = btrfs_item_ptr(leaf, slot,
3933 struct btrfs_file_extent_item);
3934 fi = (struct btrfs_file_extent_item *)(
3935 (unsigned long)fi - size_diff);
3937 if (btrfs_file_extent_type(leaf, fi) ==
3938 BTRFS_FILE_EXTENT_INLINE) {
3939 ptr = btrfs_item_ptr_offset(leaf, slot);
3940 memmove_extent_buffer(leaf, ptr,
3942 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3946 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3947 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3948 data_end, old_data_start - data_end);
3950 offset = btrfs_disk_key_offset(&disk_key);
3951 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3952 btrfs_set_item_key(leaf, &disk_key, slot);
3954 fixup_low_keys(path, &disk_key, 1);
3957 btrfs_set_item_size(leaf, slot, new_size);
3958 btrfs_mark_buffer_dirty(leaf);
3960 if (btrfs_leaf_free_space(leaf) < 0) {
3961 btrfs_print_leaf(leaf);
3967 * make the item pointed to by the path bigger, data_size is the added size.
3969 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3972 struct extent_buffer *leaf;
3974 unsigned int data_end;
3975 unsigned int old_data;
3976 unsigned int old_size;
3978 struct btrfs_map_token token;
3980 leaf = path->nodes[0];
3982 nritems = btrfs_header_nritems(leaf);
3983 data_end = leaf_data_end(leaf);
3985 if (btrfs_leaf_free_space(leaf) < data_size) {
3986 btrfs_print_leaf(leaf);
3989 slot = path->slots[0];
3990 old_data = btrfs_item_data_end(leaf, slot);
3993 if (slot >= nritems) {
3994 btrfs_print_leaf(leaf);
3995 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4001 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4003 /* first correct the data pointers */
4004 btrfs_init_map_token(&token, leaf);
4005 for (i = slot; i < nritems; i++) {
4008 ioff = btrfs_token_item_offset(&token, i);
4009 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4012 /* shift the data */
4013 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4014 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4015 data_end, old_data - data_end);
4017 data_end = old_data;
4018 old_size = btrfs_item_size(leaf, slot);
4019 btrfs_set_item_size(leaf, slot, old_size + data_size);
4020 btrfs_mark_buffer_dirty(leaf);
4022 if (btrfs_leaf_free_space(leaf) < 0) {
4023 btrfs_print_leaf(leaf);
4029 * setup_items_for_insert - Helper called before inserting one or more items
4030 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
4031 * in a function that doesn't call btrfs_search_slot
4033 * @root: root we are inserting items to
4034 * @path: points to the leaf/slot where we are going to insert new items
4035 * @batch: information about the batch of items to insert
4037 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4038 const struct btrfs_item_batch *batch)
4040 struct btrfs_fs_info *fs_info = root->fs_info;
4043 unsigned int data_end;
4044 struct btrfs_disk_key disk_key;
4045 struct extent_buffer *leaf;
4047 struct btrfs_map_token token;
4051 * Before anything else, update keys in the parent and other ancestors
4052 * if needed, then release the write locks on them, so that other tasks
4053 * can use them while we modify the leaf.
4055 if (path->slots[0] == 0) {
4056 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4057 fixup_low_keys(path, &disk_key, 1);
4059 btrfs_unlock_up_safe(path, 1);
4061 leaf = path->nodes[0];
4062 slot = path->slots[0];
4064 nritems = btrfs_header_nritems(leaf);
4065 data_end = leaf_data_end(leaf);
4066 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4068 if (btrfs_leaf_free_space(leaf) < total_size) {
4069 btrfs_print_leaf(leaf);
4070 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4071 total_size, btrfs_leaf_free_space(leaf));
4075 btrfs_init_map_token(&token, leaf);
4076 if (slot != nritems) {
4077 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4079 if (old_data < data_end) {
4080 btrfs_print_leaf(leaf);
4082 "item at slot %d with data offset %u beyond data end of leaf %u",
4083 slot, old_data, data_end);
4087 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4089 /* first correct the data pointers */
4090 for (i = slot; i < nritems; i++) {
4093 ioff = btrfs_token_item_offset(&token, i);
4094 btrfs_set_token_item_offset(&token, i,
4095 ioff - batch->total_data_size);
4097 /* shift the items */
4098 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + batch->nr),
4099 btrfs_item_nr_offset(slot),
4100 (nritems - slot) * sizeof(struct btrfs_item));
4102 /* shift the data */
4103 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4104 data_end - batch->total_data_size,
4105 BTRFS_LEAF_DATA_OFFSET + data_end,
4106 old_data - data_end);
4107 data_end = old_data;
4110 /* setup the item for the new data */
4111 for (i = 0; i < batch->nr; i++) {
4112 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4113 btrfs_set_item_key(leaf, &disk_key, slot + i);
4114 data_end -= batch->data_sizes[i];
4115 btrfs_set_token_item_offset(&token, slot + i, data_end);
4116 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4119 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4120 btrfs_mark_buffer_dirty(leaf);
4122 if (btrfs_leaf_free_space(leaf) < 0) {
4123 btrfs_print_leaf(leaf);
4129 * Insert a new item into a leaf.
4131 * @root: The root of the btree.
4132 * @path: A path pointing to the target leaf and slot.
4133 * @key: The key of the new item.
4134 * @data_size: The size of the data associated with the new key.
4136 void btrfs_setup_item_for_insert(struct btrfs_root *root,
4137 struct btrfs_path *path,
4138 const struct btrfs_key *key,
4141 struct btrfs_item_batch batch;
4144 batch.data_sizes = &data_size;
4145 batch.total_data_size = data_size;
4148 setup_items_for_insert(root, path, &batch);
4152 * Given a key and some data, insert items into the tree.
4153 * This does all the path init required, making room in the tree if needed.
4155 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4156 struct btrfs_root *root,
4157 struct btrfs_path *path,
4158 const struct btrfs_item_batch *batch)
4164 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4165 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4171 slot = path->slots[0];
4174 setup_items_for_insert(root, path, batch);
4179 * Given a key and some data, insert an item into the tree.
4180 * This does all the path init required, making room in the tree if needed.
4182 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4183 const struct btrfs_key *cpu_key, void *data,
4187 struct btrfs_path *path;
4188 struct extent_buffer *leaf;
4191 path = btrfs_alloc_path();
4194 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4196 leaf = path->nodes[0];
4197 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4198 write_extent_buffer(leaf, data, ptr, data_size);
4199 btrfs_mark_buffer_dirty(leaf);
4201 btrfs_free_path(path);
4206 * This function duplicates an item, giving 'new_key' to the new item.
4207 * It guarantees both items live in the same tree leaf and the new item is
4208 * contiguous with the original item.
4210 * This allows us to split a file extent in place, keeping a lock on the leaf
4213 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4214 struct btrfs_root *root,
4215 struct btrfs_path *path,
4216 const struct btrfs_key *new_key)
4218 struct extent_buffer *leaf;
4222 leaf = path->nodes[0];
4223 item_size = btrfs_item_size(leaf, path->slots[0]);
4224 ret = setup_leaf_for_split(trans, root, path,
4225 item_size + sizeof(struct btrfs_item));
4230 btrfs_setup_item_for_insert(root, path, new_key, item_size);
4231 leaf = path->nodes[0];
4232 memcpy_extent_buffer(leaf,
4233 btrfs_item_ptr_offset(leaf, path->slots[0]),
4234 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4240 * delete the pointer from a given node.
4242 * the tree should have been previously balanced so the deletion does not
4245 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4246 int level, int slot)
4248 struct extent_buffer *parent = path->nodes[level];
4252 nritems = btrfs_header_nritems(parent);
4253 if (slot != nritems - 1) {
4255 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4256 slot + 1, nritems - slot - 1);
4259 memmove_extent_buffer(parent,
4260 btrfs_node_key_ptr_offset(slot),
4261 btrfs_node_key_ptr_offset(slot + 1),
4262 sizeof(struct btrfs_key_ptr) *
4263 (nritems - slot - 1));
4265 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4266 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
4271 btrfs_set_header_nritems(parent, nritems);
4272 if (nritems == 0 && parent == root->node) {
4273 BUG_ON(btrfs_header_level(root->node) != 1);
4274 /* just turn the root into a leaf and break */
4275 btrfs_set_header_level(root->node, 0);
4276 } else if (slot == 0) {
4277 struct btrfs_disk_key disk_key;
4279 btrfs_node_key(parent, &disk_key, 0);
4280 fixup_low_keys(path, &disk_key, level + 1);
4282 btrfs_mark_buffer_dirty(parent);
4286 * a helper function to delete the leaf pointed to by path->slots[1] and
4289 * This deletes the pointer in path->nodes[1] and frees the leaf
4290 * block extent. zero is returned if it all worked out, < 0 otherwise.
4292 * The path must have already been setup for deleting the leaf, including
4293 * all the proper balancing. path->nodes[1] must be locked.
4295 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4296 struct btrfs_root *root,
4297 struct btrfs_path *path,
4298 struct extent_buffer *leaf)
4300 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4301 del_ptr(root, path, 1, path->slots[1]);
4304 * btrfs_free_extent is expensive, we want to make sure we
4305 * aren't holding any locks when we call it
4307 btrfs_unlock_up_safe(path, 0);
4309 root_sub_used(root, leaf->len);
4311 atomic_inc(&leaf->refs);
4312 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4313 free_extent_buffer_stale(leaf);
4316 * delete the item at the leaf level in path. If that empties
4317 * the leaf, remove it from the tree
4319 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4320 struct btrfs_path *path, int slot, int nr)
4322 struct btrfs_fs_info *fs_info = root->fs_info;
4323 struct extent_buffer *leaf;
4328 leaf = path->nodes[0];
4329 nritems = btrfs_header_nritems(leaf);
4331 if (slot + nr != nritems) {
4332 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4333 const int data_end = leaf_data_end(leaf);
4334 struct btrfs_map_token token;
4338 for (i = 0; i < nr; i++)
4339 dsize += btrfs_item_size(leaf, slot + i);
4341 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4343 BTRFS_LEAF_DATA_OFFSET + data_end,
4344 last_off - data_end);
4346 btrfs_init_map_token(&token, leaf);
4347 for (i = slot + nr; i < nritems; i++) {
4350 ioff = btrfs_token_item_offset(&token, i);
4351 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4354 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4355 btrfs_item_nr_offset(slot + nr),
4356 sizeof(struct btrfs_item) *
4357 (nritems - slot - nr));
4359 btrfs_set_header_nritems(leaf, nritems - nr);
4362 /* delete the leaf if we've emptied it */
4364 if (leaf == root->node) {
4365 btrfs_set_header_level(leaf, 0);
4367 btrfs_clean_tree_block(leaf);
4368 btrfs_del_leaf(trans, root, path, leaf);
4371 int used = leaf_space_used(leaf, 0, nritems);
4373 struct btrfs_disk_key disk_key;
4375 btrfs_item_key(leaf, &disk_key, 0);
4376 fixup_low_keys(path, &disk_key, 1);
4380 * Try to delete the leaf if it is mostly empty. We do this by
4381 * trying to move all its items into its left and right neighbours.
4382 * If we can't move all the items, then we don't delete it - it's
4383 * not ideal, but future insertions might fill the leaf with more
4384 * items, or items from other leaves might be moved later into our
4385 * leaf due to deletions on those leaves.
4387 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4390 /* push_leaf_left fixes the path.
4391 * make sure the path still points to our leaf
4392 * for possible call to del_ptr below
4394 slot = path->slots[1];
4395 atomic_inc(&leaf->refs);
4397 * We want to be able to at least push one item to the
4398 * left neighbour leaf, and that's the first item.
4400 min_push_space = sizeof(struct btrfs_item) +
4401 btrfs_item_size(leaf, 0);
4402 wret = push_leaf_left(trans, root, path, 0,
4403 min_push_space, 1, (u32)-1);
4404 if (wret < 0 && wret != -ENOSPC)
4407 if (path->nodes[0] == leaf &&
4408 btrfs_header_nritems(leaf)) {
4410 * If we were not able to push all items from our
4411 * leaf to its left neighbour, then attempt to
4412 * either push all the remaining items to the
4413 * right neighbour or none. There's no advantage
4414 * in pushing only some items, instead of all, as
4415 * it's pointless to end up with a leaf having
4416 * too few items while the neighbours can be full
4419 nritems = btrfs_header_nritems(leaf);
4420 min_push_space = leaf_space_used(leaf, 0, nritems);
4421 wret = push_leaf_right(trans, root, path, 0,
4422 min_push_space, 1, 0);
4423 if (wret < 0 && wret != -ENOSPC)
4427 if (btrfs_header_nritems(leaf) == 0) {
4428 path->slots[1] = slot;
4429 btrfs_del_leaf(trans, root, path, leaf);
4430 free_extent_buffer(leaf);
4433 /* if we're still in the path, make sure
4434 * we're dirty. Otherwise, one of the
4435 * push_leaf functions must have already
4436 * dirtied this buffer
4438 if (path->nodes[0] == leaf)
4439 btrfs_mark_buffer_dirty(leaf);
4440 free_extent_buffer(leaf);
4443 btrfs_mark_buffer_dirty(leaf);
4450 * search the tree again to find a leaf with lesser keys
4451 * returns 0 if it found something or 1 if there are no lesser leaves.
4452 * returns < 0 on io errors.
4454 * This may release the path, and so you may lose any locks held at the
4457 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4459 struct btrfs_key key;
4460 struct btrfs_key orig_key;
4461 struct btrfs_disk_key found_key;
4464 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4467 if (key.offset > 0) {
4469 } else if (key.type > 0) {
4471 key.offset = (u64)-1;
4472 } else if (key.objectid > 0) {
4475 key.offset = (u64)-1;
4480 btrfs_release_path(path);
4481 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4486 * Previous key not found. Even if we were at slot 0 of the leaf we had
4487 * before releasing the path and calling btrfs_search_slot(), we now may
4488 * be in a slot pointing to the same original key - this can happen if
4489 * after we released the path, one of more items were moved from a
4490 * sibling leaf into the front of the leaf we had due to an insertion
4491 * (see push_leaf_right()).
4492 * If we hit this case and our slot is > 0 and just decrement the slot
4493 * so that the caller does not process the same key again, which may or
4494 * may not break the caller, depending on its logic.
4496 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
4497 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
4498 ret = comp_keys(&found_key, &orig_key);
4500 if (path->slots[0] > 0) {
4505 * At slot 0, same key as before, it means orig_key is
4506 * the lowest, leftmost, key in the tree. We're done.
4512 btrfs_item_key(path->nodes[0], &found_key, 0);
4513 ret = comp_keys(&found_key, &key);
4515 * We might have had an item with the previous key in the tree right
4516 * before we released our path. And after we released our path, that
4517 * item might have been pushed to the first slot (0) of the leaf we
4518 * were holding due to a tree balance. Alternatively, an item with the
4519 * previous key can exist as the only element of a leaf (big fat item).
4520 * Therefore account for these 2 cases, so that our callers (like
4521 * btrfs_previous_item) don't miss an existing item with a key matching
4522 * the previous key we computed above.
4530 * A helper function to walk down the tree starting at min_key, and looking
4531 * for nodes or leaves that are have a minimum transaction id.
4532 * This is used by the btree defrag code, and tree logging
4534 * This does not cow, but it does stuff the starting key it finds back
4535 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4536 * key and get a writable path.
4538 * This honors path->lowest_level to prevent descent past a given level
4541 * min_trans indicates the oldest transaction that you are interested
4542 * in walking through. Any nodes or leaves older than min_trans are
4543 * skipped over (without reading them).
4545 * returns zero if something useful was found, < 0 on error and 1 if there
4546 * was nothing in the tree that matched the search criteria.
4548 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4549 struct btrfs_path *path,
4552 struct extent_buffer *cur;
4553 struct btrfs_key found_key;
4559 int keep_locks = path->keep_locks;
4561 ASSERT(!path->nowait);
4562 path->keep_locks = 1;
4564 cur = btrfs_read_lock_root_node(root);
4565 level = btrfs_header_level(cur);
4566 WARN_ON(path->nodes[level]);
4567 path->nodes[level] = cur;
4568 path->locks[level] = BTRFS_READ_LOCK;
4570 if (btrfs_header_generation(cur) < min_trans) {
4575 nritems = btrfs_header_nritems(cur);
4576 level = btrfs_header_level(cur);
4577 sret = btrfs_bin_search(cur, min_key, &slot);
4583 /* at the lowest level, we're done, setup the path and exit */
4584 if (level == path->lowest_level) {
4585 if (slot >= nritems)
4588 path->slots[level] = slot;
4589 btrfs_item_key_to_cpu(cur, &found_key, slot);
4592 if (sret && slot > 0)
4595 * check this node pointer against the min_trans parameters.
4596 * If it is too old, skip to the next one.
4598 while (slot < nritems) {
4601 gen = btrfs_node_ptr_generation(cur, slot);
4602 if (gen < min_trans) {
4610 * we didn't find a candidate key in this node, walk forward
4611 * and find another one
4613 if (slot >= nritems) {
4614 path->slots[level] = slot;
4615 sret = btrfs_find_next_key(root, path, min_key, level,
4618 btrfs_release_path(path);
4624 /* save our key for returning back */
4625 btrfs_node_key_to_cpu(cur, &found_key, slot);
4626 path->slots[level] = slot;
4627 if (level == path->lowest_level) {
4631 cur = btrfs_read_node_slot(cur, slot);
4637 btrfs_tree_read_lock(cur);
4639 path->locks[level - 1] = BTRFS_READ_LOCK;
4640 path->nodes[level - 1] = cur;
4641 unlock_up(path, level, 1, 0, NULL);
4644 path->keep_locks = keep_locks;
4646 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4647 memcpy(min_key, &found_key, sizeof(found_key));
4653 * this is similar to btrfs_next_leaf, but does not try to preserve
4654 * and fixup the path. It looks for and returns the next key in the
4655 * tree based on the current path and the min_trans parameters.
4657 * 0 is returned if another key is found, < 0 if there are any errors
4658 * and 1 is returned if there are no higher keys in the tree
4660 * path->keep_locks should be set to 1 on the search made before
4661 * calling this function.
4663 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4664 struct btrfs_key *key, int level, u64 min_trans)
4667 struct extent_buffer *c;
4669 WARN_ON(!path->keep_locks && !path->skip_locking);
4670 while (level < BTRFS_MAX_LEVEL) {
4671 if (!path->nodes[level])
4674 slot = path->slots[level] + 1;
4675 c = path->nodes[level];
4677 if (slot >= btrfs_header_nritems(c)) {
4680 struct btrfs_key cur_key;
4681 if (level + 1 >= BTRFS_MAX_LEVEL ||
4682 !path->nodes[level + 1])
4685 if (path->locks[level + 1] || path->skip_locking) {
4690 slot = btrfs_header_nritems(c) - 1;
4692 btrfs_item_key_to_cpu(c, &cur_key, slot);
4694 btrfs_node_key_to_cpu(c, &cur_key, slot);
4696 orig_lowest = path->lowest_level;
4697 btrfs_release_path(path);
4698 path->lowest_level = level;
4699 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4701 path->lowest_level = orig_lowest;
4705 c = path->nodes[level];
4706 slot = path->slots[level];
4713 btrfs_item_key_to_cpu(c, key, slot);
4715 u64 gen = btrfs_node_ptr_generation(c, slot);
4717 if (gen < min_trans) {
4721 btrfs_node_key_to_cpu(c, key, slot);
4728 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4733 struct extent_buffer *c;
4734 struct extent_buffer *next;
4735 struct btrfs_fs_info *fs_info = root->fs_info;
4736 struct btrfs_key key;
4737 bool need_commit_sem = false;
4743 * The nowait semantics are used only for write paths, where we don't
4744 * use the tree mod log and sequence numbers.
4747 ASSERT(!path->nowait);
4749 nritems = btrfs_header_nritems(path->nodes[0]);
4753 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4757 btrfs_release_path(path);
4759 path->keep_locks = 1;
4762 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4764 if (path->need_commit_sem) {
4765 path->need_commit_sem = 0;
4766 need_commit_sem = true;
4768 if (!down_read_trylock(&fs_info->commit_root_sem)) {
4773 down_read(&fs_info->commit_root_sem);
4776 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4778 path->keep_locks = 0;
4783 nritems = btrfs_header_nritems(path->nodes[0]);
4785 * by releasing the path above we dropped all our locks. A balance
4786 * could have added more items next to the key that used to be
4787 * at the very end of the block. So, check again here and
4788 * advance the path if there are now more items available.
4790 if (nritems > 0 && path->slots[0] < nritems - 1) {
4797 * So the above check misses one case:
4798 * - after releasing the path above, someone has removed the item that
4799 * used to be at the very end of the block, and balance between leafs
4800 * gets another one with bigger key.offset to replace it.
4802 * This one should be returned as well, or we can get leaf corruption
4803 * later(esp. in __btrfs_drop_extents()).
4805 * And a bit more explanation about this check,
4806 * with ret > 0, the key isn't found, the path points to the slot
4807 * where it should be inserted, so the path->slots[0] item must be the
4810 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4815 while (level < BTRFS_MAX_LEVEL) {
4816 if (!path->nodes[level]) {
4821 slot = path->slots[level] + 1;
4822 c = path->nodes[level];
4823 if (slot >= btrfs_header_nritems(c)) {
4825 if (level == BTRFS_MAX_LEVEL) {
4834 * Our current level is where we're going to start from, and to
4835 * make sure lockdep doesn't complain we need to drop our locks
4836 * and nodes from 0 to our current level.
4838 for (i = 0; i < level; i++) {
4839 if (path->locks[level]) {
4840 btrfs_tree_read_unlock(path->nodes[i]);
4843 free_extent_buffer(path->nodes[i]);
4844 path->nodes[i] = NULL;
4848 ret = read_block_for_search(root, path, &next, level,
4850 if (ret == -EAGAIN && !path->nowait)
4854 btrfs_release_path(path);
4858 if (!path->skip_locking) {
4859 ret = btrfs_try_tree_read_lock(next);
4860 if (!ret && path->nowait) {
4864 if (!ret && time_seq) {
4866 * If we don't get the lock, we may be racing
4867 * with push_leaf_left, holding that lock while
4868 * itself waiting for the leaf we've currently
4869 * locked. To solve this situation, we give up
4870 * on our lock and cycle.
4872 free_extent_buffer(next);
4873 btrfs_release_path(path);
4878 btrfs_tree_read_lock(next);
4882 path->slots[level] = slot;
4885 path->nodes[level] = next;
4886 path->slots[level] = 0;
4887 if (!path->skip_locking)
4888 path->locks[level] = BTRFS_READ_LOCK;
4892 ret = read_block_for_search(root, path, &next, level,
4894 if (ret == -EAGAIN && !path->nowait)
4898 btrfs_release_path(path);
4902 if (!path->skip_locking) {
4904 if (!btrfs_try_tree_read_lock(next)) {
4909 btrfs_tree_read_lock(next);
4915 unlock_up(path, 0, 1, 0, NULL);
4916 if (need_commit_sem) {
4919 path->need_commit_sem = 1;
4920 ret2 = finish_need_commit_sem_search(path);
4921 up_read(&fs_info->commit_root_sem);
4930 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4931 * searching until it gets past min_objectid or finds an item of 'type'
4933 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4935 int btrfs_previous_item(struct btrfs_root *root,
4936 struct btrfs_path *path, u64 min_objectid,
4939 struct btrfs_key found_key;
4940 struct extent_buffer *leaf;
4945 if (path->slots[0] == 0) {
4946 ret = btrfs_prev_leaf(root, path);
4952 leaf = path->nodes[0];
4953 nritems = btrfs_header_nritems(leaf);
4956 if (path->slots[0] == nritems)
4959 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4960 if (found_key.objectid < min_objectid)
4962 if (found_key.type == type)
4964 if (found_key.objectid == min_objectid &&
4965 found_key.type < type)
4972 * search in extent tree to find a previous Metadata/Data extent item with
4975 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4977 int btrfs_previous_extent_item(struct btrfs_root *root,
4978 struct btrfs_path *path, u64 min_objectid)
4980 struct btrfs_key found_key;
4981 struct extent_buffer *leaf;
4986 if (path->slots[0] == 0) {
4987 ret = btrfs_prev_leaf(root, path);
4993 leaf = path->nodes[0];
4994 nritems = btrfs_header_nritems(leaf);
4997 if (path->slots[0] == nritems)
5000 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5001 if (found_key.objectid < min_objectid)
5003 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5004 found_key.type == BTRFS_METADATA_ITEM_KEY)
5006 if (found_key.objectid == min_objectid &&
5007 found_key.type < BTRFS_EXTENT_ITEM_KEY)