2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
41 * compare two delayed tree backrefs with same bytenr and type
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 struct btrfs_delayed_tree_ref *ref1, int type)
46 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 if (ref1->root < ref2->root)
49 if (ref1->root > ref2->root)
52 if (ref1->parent < ref2->parent)
54 if (ref1->parent > ref2->parent)
61 * compare two delayed data backrefs with same bytenr and type
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 struct btrfs_delayed_data_ref *ref1)
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
69 if (ref1->root > ref2->root)
71 if (ref1->objectid < ref2->objectid)
73 if (ref1->objectid > ref2->objectid)
75 if (ref1->offset < ref2->offset)
77 if (ref1->offset > ref2->offset)
80 if (ref1->parent < ref2->parent)
82 if (ref1->parent > ref2->parent)
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent_node = NULL;
94 struct btrfs_delayed_ref_head *entry;
95 struct btrfs_delayed_ref_head *ins;
98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 bytenr = ins->node.bytenr;
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
105 if (bytenr < entry->node.bytenr)
107 else if (bytenr > entry->node.bytenr)
113 rb_link_node(node, parent_node, p);
114 rb_insert_color(node, root);
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
129 struct btrfs_delayed_ref_head *entry;
134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
136 if (bytenr < entry->node.bytenr)
138 else if (bytenr > entry->node.bytenr)
143 if (entry && return_bigger) {
144 if (bytenr > entry->node.bytenr) {
145 n = rb_next(&entry->href_node);
148 entry = rb_entry(n, struct btrfs_delayed_ref_head,
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 struct btrfs_delayed_ref_head *head)
160 struct btrfs_delayed_ref_root *delayed_refs;
162 delayed_refs = &trans->transaction->delayed_refs;
163 assert_spin_locked(&delayed_refs->lock);
164 if (mutex_trylock(&head->mutex))
167 atomic_inc(&head->node.refs);
168 spin_unlock(&delayed_refs->lock);
170 mutex_lock(&head->mutex);
171 spin_lock(&delayed_refs->lock);
172 if (!head->node.in_tree) {
173 mutex_unlock(&head->mutex);
174 btrfs_put_delayed_ref(&head->node);
177 btrfs_put_delayed_ref(&head->node);
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 struct btrfs_delayed_ref_root *delayed_refs,
183 struct btrfs_delayed_ref_head *head,
184 struct btrfs_delayed_ref_node *ref)
186 if (btrfs_delayed_ref_is_head(ref)) {
187 head = btrfs_delayed_node_to_head(ref);
188 rb_erase(&head->href_node, &delayed_refs->href_root);
190 assert_spin_locked(&head->lock);
191 list_del(&ref->list);
194 btrfs_put_delayed_ref(ref);
195 atomic_dec(&delayed_refs->num_entries);
198 static bool merge_ref(struct btrfs_trans_handle *trans,
199 struct btrfs_delayed_ref_root *delayed_refs,
200 struct btrfs_delayed_ref_head *head,
201 struct btrfs_delayed_ref_node *ref,
204 struct btrfs_delayed_ref_node *next;
207 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
209 while (!done && &next->list != &head->ref_list) {
211 struct btrfs_delayed_ref_node *next2;
213 next2 = list_next_entry(next, list);
218 if (seq && next->seq >= seq)
221 if (next->type != ref->type)
224 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
225 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
226 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
227 btrfs_delayed_node_to_tree_ref(next),
230 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
231 ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
232 comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
233 btrfs_delayed_node_to_data_ref(next)))
236 if (ref->action == next->action) {
239 if (ref->ref_mod < next->ref_mod) {
243 mod = -next->ref_mod;
246 drop_delayed_ref(trans, delayed_refs, head, next);
248 if (ref->ref_mod == 0) {
249 drop_delayed_ref(trans, delayed_refs, head, ref);
253 * Can't have multiples of the same ref on a tree block.
255 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
256 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
265 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
266 struct btrfs_fs_info *fs_info,
267 struct btrfs_delayed_ref_root *delayed_refs,
268 struct btrfs_delayed_ref_head *head)
270 struct btrfs_delayed_ref_node *ref;
273 assert_spin_locked(&head->lock);
275 if (list_empty(&head->ref_list))
278 /* We don't have too many refs to merge for data. */
282 read_lock(&fs_info->tree_mod_log_lock);
283 if (!list_empty(&fs_info->tree_mod_seq_list)) {
284 struct seq_list *elem;
286 elem = list_first_entry(&fs_info->tree_mod_seq_list,
287 struct seq_list, list);
290 read_unlock(&fs_info->tree_mod_log_lock);
292 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
294 while (&ref->list != &head->ref_list) {
295 if (seq && ref->seq >= seq)
298 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
299 if (list_empty(&head->ref_list))
301 ref = list_first_entry(&head->ref_list,
302 struct btrfs_delayed_ref_node,
307 ref = list_next_entry(ref, list);
311 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
312 struct btrfs_delayed_ref_root *delayed_refs,
315 struct seq_list *elem;
318 read_lock(&fs_info->tree_mod_log_lock);
319 if (!list_empty(&fs_info->tree_mod_seq_list)) {
320 elem = list_first_entry(&fs_info->tree_mod_seq_list,
321 struct seq_list, list);
322 if (seq >= elem->seq) {
324 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
325 (u32)(seq >> 32), (u32)seq,
326 (u32)(elem->seq >> 32), (u32)elem->seq,
332 read_unlock(&fs_info->tree_mod_log_lock);
336 struct btrfs_delayed_ref_head *
337 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
339 struct btrfs_delayed_ref_root *delayed_refs;
340 struct btrfs_delayed_ref_head *head;
344 delayed_refs = &trans->transaction->delayed_refs;
347 start = delayed_refs->run_delayed_start;
348 head = find_ref_head(&delayed_refs->href_root, start, 1);
349 if (!head && !loop) {
350 delayed_refs->run_delayed_start = 0;
353 head = find_ref_head(&delayed_refs->href_root, start, 1);
356 } else if (!head && loop) {
360 while (head->processing) {
361 struct rb_node *node;
363 node = rb_next(&head->href_node);
367 delayed_refs->run_delayed_start = 0;
372 head = rb_entry(node, struct btrfs_delayed_ref_head,
376 head->processing = 1;
377 WARN_ON(delayed_refs->num_heads_ready == 0);
378 delayed_refs->num_heads_ready--;
379 delayed_refs->run_delayed_start = head->node.bytenr +
380 head->node.num_bytes;
385 * Helper to insert the ref_node to the tail or merge with tail.
387 * Return 0 for insert.
388 * Return >0 for merge.
391 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
392 struct btrfs_delayed_ref_root *root,
393 struct btrfs_delayed_ref_head *href,
394 struct btrfs_delayed_ref_node *ref)
396 struct btrfs_delayed_ref_node *exist;
400 spin_lock(&href->lock);
401 /* Check whether we can merge the tail node with ref */
402 if (list_empty(&href->ref_list))
404 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406 /* No need to compare bytenr nor is_head */
407 if (exist->type != ref->type || exist->seq != ref->seq)
410 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
411 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
412 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
413 btrfs_delayed_node_to_tree_ref(ref),
416 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
417 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
418 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
419 btrfs_delayed_node_to_data_ref(ref)))
422 /* Now we are sure we can merge */
424 if (exist->action == ref->action) {
427 /* Need to change action */
428 if (exist->ref_mod < ref->ref_mod) {
429 exist->action = ref->action;
430 mod = -exist->ref_mod;
431 exist->ref_mod = ref->ref_mod;
435 exist->ref_mod += mod;
437 /* remove existing tail if its ref_mod is zero */
438 if (exist->ref_mod == 0)
439 drop_delayed_ref(trans, root, href, exist);
440 spin_unlock(&href->lock);
444 list_add_tail(&ref->list, &href->ref_list);
445 atomic_inc(&root->num_entries);
446 spin_unlock(&href->lock);
451 * helper function to update the accounting in the head ref
452 * existing and update must have the same bytenr
455 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
456 struct btrfs_delayed_ref_node *existing,
457 struct btrfs_delayed_ref_node *update)
459 struct btrfs_delayed_ref_head *existing_ref;
460 struct btrfs_delayed_ref_head *ref;
463 existing_ref = btrfs_delayed_node_to_head(existing);
464 ref = btrfs_delayed_node_to_head(update);
465 BUG_ON(existing_ref->is_data != ref->is_data);
467 spin_lock(&existing_ref->lock);
468 if (ref->must_insert_reserved) {
469 /* if the extent was freed and then
470 * reallocated before the delayed ref
471 * entries were processed, we can end up
472 * with an existing head ref without
473 * the must_insert_reserved flag set.
476 existing_ref->must_insert_reserved = ref->must_insert_reserved;
479 * update the num_bytes so we make sure the accounting
482 existing->num_bytes = update->num_bytes;
486 if (ref->extent_op) {
487 if (!existing_ref->extent_op) {
488 existing_ref->extent_op = ref->extent_op;
490 if (ref->extent_op->update_key) {
491 memcpy(&existing_ref->extent_op->key,
492 &ref->extent_op->key,
493 sizeof(ref->extent_op->key));
494 existing_ref->extent_op->update_key = true;
496 if (ref->extent_op->update_flags) {
497 existing_ref->extent_op->flags_to_set |=
498 ref->extent_op->flags_to_set;
499 existing_ref->extent_op->update_flags = true;
501 btrfs_free_delayed_extent_op(ref->extent_op);
505 * update the reference mod on the head to reflect this new operation,
506 * only need the lock for this case cause we could be processing it
507 * currently, for refs we just added we know we're a-ok.
509 old_ref_mod = existing_ref->total_ref_mod;
510 existing->ref_mod += update->ref_mod;
511 existing_ref->total_ref_mod += update->ref_mod;
514 * If we are going to from a positive ref mod to a negative or vice
515 * versa we need to make sure to adjust pending_csums accordingly.
517 if (existing_ref->is_data) {
518 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
519 delayed_refs->pending_csums -= existing->num_bytes;
520 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
521 delayed_refs->pending_csums += existing->num_bytes;
523 spin_unlock(&existing_ref->lock);
527 * helper function to actually insert a head node into the rbtree.
528 * this does all the dirty work in terms of maintaining the correct
529 * overall modification count.
531 static noinline struct btrfs_delayed_ref_head *
532 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
533 struct btrfs_trans_handle *trans,
534 struct btrfs_delayed_ref_node *ref,
535 struct btrfs_qgroup_extent_record *qrecord,
536 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
537 int action, int is_data)
539 struct btrfs_delayed_ref_head *existing;
540 struct btrfs_delayed_ref_head *head_ref = NULL;
541 struct btrfs_delayed_ref_root *delayed_refs;
543 int must_insert_reserved = 0;
545 /* If reserved is provided, it must be a data extent. */
546 BUG_ON(!is_data && reserved);
549 * the head node stores the sum of all the mods, so dropping a ref
550 * should drop the sum in the head node by one.
552 if (action == BTRFS_UPDATE_DELAYED_HEAD)
554 else if (action == BTRFS_DROP_DELAYED_REF)
558 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
559 * the reserved accounting when the extent is finally added, or
560 * if a later modification deletes the delayed ref without ever
561 * inserting the extent into the extent allocation tree.
562 * ref->must_insert_reserved is the flag used to record
563 * that accounting mods are required.
565 * Once we record must_insert_reserved, switch the action to
566 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
568 if (action == BTRFS_ADD_DELAYED_EXTENT)
569 must_insert_reserved = 1;
571 must_insert_reserved = 0;
573 delayed_refs = &trans->transaction->delayed_refs;
575 /* first set the basic ref node struct up */
576 atomic_set(&ref->refs, 1);
577 ref->bytenr = bytenr;
578 ref->num_bytes = num_bytes;
579 ref->ref_mod = count_mod;
586 head_ref = btrfs_delayed_node_to_head(ref);
587 head_ref->must_insert_reserved = must_insert_reserved;
588 head_ref->is_data = is_data;
589 INIT_LIST_HEAD(&head_ref->ref_list);
590 head_ref->processing = 0;
591 head_ref->total_ref_mod = count_mod;
592 head_ref->qgroup_reserved = 0;
593 head_ref->qgroup_ref_root = 0;
595 /* Record qgroup extent info if provided */
597 if (ref_root && reserved) {
598 head_ref->qgroup_ref_root = ref_root;
599 head_ref->qgroup_reserved = reserved;
602 qrecord->bytenr = bytenr;
603 qrecord->num_bytes = num_bytes;
604 qrecord->old_roots = NULL;
606 if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
607 delayed_refs, qrecord))
611 spin_lock_init(&head_ref->lock);
612 mutex_init(&head_ref->mutex);
614 trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
616 existing = htree_insert(&delayed_refs->href_root,
617 &head_ref->href_node);
619 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
620 && existing->qgroup_reserved);
621 update_existing_head_ref(delayed_refs, &existing->node, ref);
623 * we've updated the existing ref, free the newly
626 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
629 if (is_data && count_mod < 0)
630 delayed_refs->pending_csums += num_bytes;
631 delayed_refs->num_heads++;
632 delayed_refs->num_heads_ready++;
633 atomic_inc(&delayed_refs->num_entries);
634 trans->delayed_ref_updates++;
640 * helper to insert a delayed tree ref into the rbtree.
643 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
644 struct btrfs_trans_handle *trans,
645 struct btrfs_delayed_ref_head *head_ref,
646 struct btrfs_delayed_ref_node *ref, u64 bytenr,
647 u64 num_bytes, u64 parent, u64 ref_root, int level,
650 struct btrfs_delayed_tree_ref *full_ref;
651 struct btrfs_delayed_ref_root *delayed_refs;
655 if (action == BTRFS_ADD_DELAYED_EXTENT)
656 action = BTRFS_ADD_DELAYED_REF;
658 if (is_fstree(ref_root))
659 seq = atomic64_read(&fs_info->tree_mod_seq);
660 delayed_refs = &trans->transaction->delayed_refs;
662 /* first set the basic ref node struct up */
663 atomic_set(&ref->refs, 1);
664 ref->bytenr = bytenr;
665 ref->num_bytes = num_bytes;
667 ref->action = action;
672 full_ref = btrfs_delayed_node_to_tree_ref(ref);
673 full_ref->parent = parent;
674 full_ref->root = ref_root;
676 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
678 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
679 full_ref->level = level;
681 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
683 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
686 * XXX: memory should be freed at the same level allocated.
687 * But bad practice is anywhere... Follow it now. Need cleanup.
690 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
694 * helper to insert a delayed data ref into the rbtree.
697 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
698 struct btrfs_trans_handle *trans,
699 struct btrfs_delayed_ref_head *head_ref,
700 struct btrfs_delayed_ref_node *ref, u64 bytenr,
701 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
702 u64 offset, int action)
704 struct btrfs_delayed_data_ref *full_ref;
705 struct btrfs_delayed_ref_root *delayed_refs;
709 if (action == BTRFS_ADD_DELAYED_EXTENT)
710 action = BTRFS_ADD_DELAYED_REF;
712 delayed_refs = &trans->transaction->delayed_refs;
714 if (is_fstree(ref_root))
715 seq = atomic64_read(&fs_info->tree_mod_seq);
717 /* first set the basic ref node struct up */
718 atomic_set(&ref->refs, 1);
719 ref->bytenr = bytenr;
720 ref->num_bytes = num_bytes;
722 ref->action = action;
727 full_ref = btrfs_delayed_node_to_data_ref(ref);
728 full_ref->parent = parent;
729 full_ref->root = ref_root;
731 ref->type = BTRFS_SHARED_DATA_REF_KEY;
733 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
735 full_ref->objectid = owner;
736 full_ref->offset = offset;
738 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
740 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
743 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
747 * add a delayed tree ref. This does all of the accounting required
748 * to make sure the delayed ref is eventually processed before this
749 * transaction commits.
751 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
752 struct btrfs_trans_handle *trans,
753 u64 bytenr, u64 num_bytes, u64 parent,
754 u64 ref_root, int level, int action,
755 struct btrfs_delayed_extent_op *extent_op)
757 struct btrfs_delayed_tree_ref *ref;
758 struct btrfs_delayed_ref_head *head_ref;
759 struct btrfs_delayed_ref_root *delayed_refs;
760 struct btrfs_qgroup_extent_record *record = NULL;
762 BUG_ON(extent_op && extent_op->is_data);
763 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
767 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
771 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
772 is_fstree(ref_root)) {
773 record = kmalloc(sizeof(*record), GFP_NOFS);
778 head_ref->extent_op = extent_op;
780 delayed_refs = &trans->transaction->delayed_refs;
781 spin_lock(&delayed_refs->lock);
784 * insert both the head node and the new ref without dropping
787 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
788 bytenr, num_bytes, 0, 0, action, 0);
790 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
791 num_bytes, parent, ref_root, level, action);
792 spin_unlock(&delayed_refs->lock);
797 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
799 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
805 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
807 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
808 struct btrfs_trans_handle *trans,
809 u64 bytenr, u64 num_bytes,
810 u64 parent, u64 ref_root,
811 u64 owner, u64 offset, u64 reserved, int action,
812 struct btrfs_delayed_extent_op *extent_op)
814 struct btrfs_delayed_data_ref *ref;
815 struct btrfs_delayed_ref_head *head_ref;
816 struct btrfs_delayed_ref_root *delayed_refs;
817 struct btrfs_qgroup_extent_record *record = NULL;
819 BUG_ON(extent_op && !extent_op->is_data);
820 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
824 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
826 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
831 is_fstree(ref_root)) {
832 record = kmalloc(sizeof(*record), GFP_NOFS);
834 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
835 kmem_cache_free(btrfs_delayed_ref_head_cachep,
841 head_ref->extent_op = extent_op;
843 delayed_refs = &trans->transaction->delayed_refs;
844 spin_lock(&delayed_refs->lock);
847 * insert both the head node and the new ref without dropping
850 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
851 bytenr, num_bytes, ref_root, reserved,
854 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
855 num_bytes, parent, ref_root, owner, offset,
857 spin_unlock(&delayed_refs->lock);
862 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
863 struct btrfs_trans_handle *trans,
864 u64 bytenr, u64 num_bytes,
865 struct btrfs_delayed_extent_op *extent_op)
867 struct btrfs_delayed_ref_head *head_ref;
868 struct btrfs_delayed_ref_root *delayed_refs;
870 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
874 head_ref->extent_op = extent_op;
876 delayed_refs = &trans->transaction->delayed_refs;
877 spin_lock(&delayed_refs->lock);
879 add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
880 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
883 spin_unlock(&delayed_refs->lock);
888 * this does a simple search for the head node for a given extent.
889 * It must be called with the delayed ref spinlock held, and it returns
890 * the head node if any where found, or NULL if not.
892 struct btrfs_delayed_ref_head *
893 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
895 struct btrfs_delayed_ref_root *delayed_refs;
897 delayed_refs = &trans->transaction->delayed_refs;
898 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
901 void btrfs_delayed_ref_exit(void)
903 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
904 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
905 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
906 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
909 int btrfs_delayed_ref_init(void)
911 btrfs_delayed_ref_head_cachep = kmem_cache_create(
912 "btrfs_delayed_ref_head",
913 sizeof(struct btrfs_delayed_ref_head), 0,
914 SLAB_MEM_SPREAD, NULL);
915 if (!btrfs_delayed_ref_head_cachep)
918 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
919 "btrfs_delayed_tree_ref",
920 sizeof(struct btrfs_delayed_tree_ref), 0,
921 SLAB_MEM_SPREAD, NULL);
922 if (!btrfs_delayed_tree_ref_cachep)
925 btrfs_delayed_data_ref_cachep = kmem_cache_create(
926 "btrfs_delayed_data_ref",
927 sizeof(struct btrfs_delayed_data_ref), 0,
928 SLAB_MEM_SPREAD, NULL);
929 if (!btrfs_delayed_data_ref_cachep)
932 btrfs_delayed_extent_op_cachep = kmem_cache_create(
933 "btrfs_delayed_extent_op",
934 sizeof(struct btrfs_delayed_extent_op), 0,
935 SLAB_MEM_SPREAD, NULL);
936 if (!btrfs_delayed_extent_op_cachep)
941 btrfs_delayed_ref_exit();