1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
13 #include "delayed-inode.h"
15 #include "transaction.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
23 #define BTRFS_DELAYED_WRITEBACK 512
24 #define BTRFS_DELAYED_BACKGROUND 128
25 #define BTRFS_DELAYED_BATCH 16
27 static struct kmem_cache *delayed_node_cache;
29 int __init btrfs_delayed_inode_init(void)
31 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
32 sizeof(struct btrfs_delayed_node),
36 if (!delayed_node_cache)
41 void __cold btrfs_delayed_inode_exit(void)
43 kmem_cache_destroy(delayed_node_cache);
46 static inline void btrfs_init_delayed_node(
47 struct btrfs_delayed_node *delayed_node,
48 struct btrfs_root *root, u64 inode_id)
50 delayed_node->root = root;
51 delayed_node->inode_id = inode_id;
52 refcount_set(&delayed_node->refs, 0);
53 delayed_node->ins_root = RB_ROOT_CACHED;
54 delayed_node->del_root = RB_ROOT_CACHED;
55 mutex_init(&delayed_node->mutex);
56 INIT_LIST_HEAD(&delayed_node->n_list);
57 INIT_LIST_HEAD(&delayed_node->p_list);
60 static struct btrfs_delayed_node *btrfs_get_delayed_node(
61 struct btrfs_inode *btrfs_inode)
63 struct btrfs_root *root = btrfs_inode->root;
64 u64 ino = btrfs_ino(btrfs_inode);
65 struct btrfs_delayed_node *node;
67 node = READ_ONCE(btrfs_inode->delayed_node);
69 refcount_inc(&node->refs);
73 spin_lock(&root->inode_lock);
74 node = xa_load(&root->delayed_nodes, ino);
77 if (btrfs_inode->delayed_node) {
78 refcount_inc(&node->refs); /* can be accessed */
79 BUG_ON(btrfs_inode->delayed_node != node);
80 spin_unlock(&root->inode_lock);
85 * It's possible that we're racing into the middle of removing
86 * this node from the xarray. In this case, the refcount
87 * was zero and it should never go back to one. Just return
88 * NULL like it was never in the xarray at all; our release
89 * function is in the process of removing it.
91 * Some implementations of refcount_inc refuse to bump the
92 * refcount once it has hit zero. If we don't do this dance
93 * here, refcount_inc() may decide to just WARN_ONCE() instead
94 * of actually bumping the refcount.
96 * If this node is properly in the xarray, we want to bump the
97 * refcount twice, once for the inode and once for this get
100 if (refcount_inc_not_zero(&node->refs)) {
101 refcount_inc(&node->refs);
102 btrfs_inode->delayed_node = node;
107 spin_unlock(&root->inode_lock);
110 spin_unlock(&root->inode_lock);
115 /* Will return either the node or PTR_ERR(-ENOMEM) */
116 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
117 struct btrfs_inode *btrfs_inode)
119 struct btrfs_delayed_node *node;
120 struct btrfs_root *root = btrfs_inode->root;
121 u64 ino = btrfs_ino(btrfs_inode);
126 node = btrfs_get_delayed_node(btrfs_inode);
130 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
132 return ERR_PTR(-ENOMEM);
133 btrfs_init_delayed_node(node, root, ino);
135 /* Cached in the inode and can be accessed. */
136 refcount_set(&node->refs, 2);
138 /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
139 ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
140 if (ret == -ENOMEM) {
141 kmem_cache_free(delayed_node_cache, node);
142 return ERR_PTR(-ENOMEM);
144 spin_lock(&root->inode_lock);
145 ptr = xa_load(&root->delayed_nodes, ino);
147 /* Somebody inserted it, go back and read it. */
148 spin_unlock(&root->inode_lock);
149 kmem_cache_free(delayed_node_cache, node);
153 ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
154 ASSERT(xa_err(ptr) != -EINVAL);
155 ASSERT(xa_err(ptr) != -ENOMEM);
157 btrfs_inode->delayed_node = node;
158 spin_unlock(&root->inode_lock);
164 * Call it when holding delayed_node->mutex
166 * If mod = 1, add this node into the prepared list.
168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
169 struct btrfs_delayed_node *node,
172 spin_lock(&root->lock);
173 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
174 if (!list_empty(&node->p_list))
175 list_move_tail(&node->p_list, &root->prepare_list);
177 list_add_tail(&node->p_list, &root->prepare_list);
179 list_add_tail(&node->n_list, &root->node_list);
180 list_add_tail(&node->p_list, &root->prepare_list);
181 refcount_inc(&node->refs); /* inserted into list */
183 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
185 spin_unlock(&root->lock);
188 /* Call it when holding delayed_node->mutex */
189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
190 struct btrfs_delayed_node *node)
192 spin_lock(&root->lock);
193 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
195 refcount_dec(&node->refs); /* not in the list */
196 list_del_init(&node->n_list);
197 if (!list_empty(&node->p_list))
198 list_del_init(&node->p_list);
199 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
201 spin_unlock(&root->lock);
204 static struct btrfs_delayed_node *btrfs_first_delayed_node(
205 struct btrfs_delayed_root *delayed_root)
208 struct btrfs_delayed_node *node = NULL;
210 spin_lock(&delayed_root->lock);
211 if (list_empty(&delayed_root->node_list))
214 p = delayed_root->node_list.next;
215 node = list_entry(p, struct btrfs_delayed_node, n_list);
216 refcount_inc(&node->refs);
218 spin_unlock(&delayed_root->lock);
223 static struct btrfs_delayed_node *btrfs_next_delayed_node(
224 struct btrfs_delayed_node *node)
226 struct btrfs_delayed_root *delayed_root;
228 struct btrfs_delayed_node *next = NULL;
230 delayed_root = node->root->fs_info->delayed_root;
231 spin_lock(&delayed_root->lock);
232 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
233 /* not in the list */
234 if (list_empty(&delayed_root->node_list))
236 p = delayed_root->node_list.next;
237 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 p = node->n_list.next;
242 next = list_entry(p, struct btrfs_delayed_node, n_list);
243 refcount_inc(&next->refs);
245 spin_unlock(&delayed_root->lock);
250 static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node *delayed_node,
254 struct btrfs_delayed_root *delayed_root;
259 delayed_root = delayed_node->root->fs_info->delayed_root;
261 mutex_lock(&delayed_node->mutex);
262 if (delayed_node->count)
263 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
266 mutex_unlock(&delayed_node->mutex);
268 if (refcount_dec_and_test(&delayed_node->refs)) {
269 struct btrfs_root *root = delayed_node->root;
271 spin_lock(&root->inode_lock);
273 * Once our refcount goes to zero, nobody is allowed to bump it
274 * back up. We can delete it now.
276 ASSERT(refcount_read(&delayed_node->refs) == 0);
277 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
278 spin_unlock(&root->inode_lock);
279 kmem_cache_free(delayed_node_cache, delayed_node);
283 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
285 __btrfs_release_delayed_node(node, 0);
288 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
289 struct btrfs_delayed_root *delayed_root)
292 struct btrfs_delayed_node *node = NULL;
294 spin_lock(&delayed_root->lock);
295 if (list_empty(&delayed_root->prepare_list))
298 p = delayed_root->prepare_list.next;
300 node = list_entry(p, struct btrfs_delayed_node, p_list);
301 refcount_inc(&node->refs);
303 spin_unlock(&delayed_root->lock);
308 static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node *node)
311 __btrfs_release_delayed_node(node, 1);
314 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
315 struct btrfs_delayed_node *node,
316 enum btrfs_delayed_item_type type)
318 struct btrfs_delayed_item *item;
320 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
322 item->data_len = data_len;
324 item->bytes_reserved = 0;
325 item->delayed_node = node;
326 RB_CLEAR_NODE(&item->rb_node);
327 INIT_LIST_HEAD(&item->log_list);
328 item->logged = false;
329 refcount_set(&item->refs, 1);
335 * Look up the delayed item by key.
337 * @delayed_node: pointer to the delayed node
338 * @index: the dir index value to lookup (offset of a dir index key)
340 * Note: if we don't find the right item, we will return the prev item and
343 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
344 struct rb_root *root,
347 struct rb_node *node = root->rb_node;
348 struct btrfs_delayed_item *delayed_item = NULL;
351 delayed_item = rb_entry(node, struct btrfs_delayed_item,
353 if (delayed_item->index < index)
354 node = node->rb_right;
355 else if (delayed_item->index > index)
356 node = node->rb_left;
364 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
365 struct btrfs_delayed_item *ins)
367 struct rb_node **p, *node;
368 struct rb_node *parent_node = NULL;
369 struct rb_root_cached *root;
370 struct btrfs_delayed_item *item;
371 bool leftmost = true;
373 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
374 root = &delayed_node->ins_root;
376 root = &delayed_node->del_root;
378 p = &root->rb_root.rb_node;
379 node = &ins->rb_node;
383 item = rb_entry(parent_node, struct btrfs_delayed_item,
386 if (item->index < ins->index) {
389 } else if (item->index > ins->index) {
396 rb_link_node(node, parent_node, p);
397 rb_insert_color_cached(node, root, leftmost);
399 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
400 ins->index >= delayed_node->index_cnt)
401 delayed_node->index_cnt = ins->index + 1;
403 delayed_node->count++;
404 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
408 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
410 int seq = atomic_inc_return(&delayed_root->items_seq);
412 /* atomic_dec_return implies a barrier */
413 if ((atomic_dec_return(&delayed_root->items) <
414 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
415 cond_wake_up_nomb(&delayed_root->wait);
418 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
420 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
421 struct rb_root_cached *root;
422 struct btrfs_delayed_root *delayed_root;
424 /* Not inserted, ignore it. */
425 if (RB_EMPTY_NODE(&delayed_item->rb_node))
428 /* If it's in a rbtree, then we need to have delayed node locked. */
429 lockdep_assert_held(&delayed_node->mutex);
431 delayed_root = delayed_node->root->fs_info->delayed_root;
433 BUG_ON(!delayed_root);
435 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
436 root = &delayed_node->ins_root;
438 root = &delayed_node->del_root;
440 rb_erase_cached(&delayed_item->rb_node, root);
441 RB_CLEAR_NODE(&delayed_item->rb_node);
442 delayed_node->count--;
444 finish_one_item(delayed_root);
447 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
450 __btrfs_remove_delayed_item(item);
451 if (refcount_dec_and_test(&item->refs))
456 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
457 struct btrfs_delayed_node *delayed_node)
460 struct btrfs_delayed_item *item = NULL;
462 p = rb_first_cached(&delayed_node->ins_root);
464 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
469 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
470 struct btrfs_delayed_node *delayed_node)
473 struct btrfs_delayed_item *item = NULL;
475 p = rb_first_cached(&delayed_node->del_root);
477 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
482 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
483 struct btrfs_delayed_item *item)
486 struct btrfs_delayed_item *next = NULL;
488 p = rb_next(&item->rb_node);
490 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
495 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
496 struct btrfs_delayed_item *item)
498 struct btrfs_block_rsv *src_rsv;
499 struct btrfs_block_rsv *dst_rsv;
500 struct btrfs_fs_info *fs_info = trans->fs_info;
504 if (!trans->bytes_reserved)
507 src_rsv = trans->block_rsv;
508 dst_rsv = &fs_info->delayed_block_rsv;
510 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
513 * Here we migrate space rsv from transaction rsv, since have already
514 * reserved space when starting a transaction. So no need to reserve
517 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
519 trace_btrfs_space_reservation(fs_info, "delayed_item",
520 item->delayed_node->inode_id,
523 * For insertions we track reserved metadata space by accounting
524 * for the number of leaves that will be used, based on the delayed
525 * node's curr_index_batch_size and index_item_leaves fields.
527 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
528 item->bytes_reserved = num_bytes;
534 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
535 struct btrfs_delayed_item *item)
537 struct btrfs_block_rsv *rsv;
538 struct btrfs_fs_info *fs_info = root->fs_info;
540 if (!item->bytes_reserved)
543 rsv = &fs_info->delayed_block_rsv;
545 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
546 * to release/reserve qgroup space.
548 trace_btrfs_space_reservation(fs_info, "delayed_item",
549 item->delayed_node->inode_id,
550 item->bytes_reserved, 0);
551 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
554 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
555 unsigned int num_leaves)
557 struct btrfs_fs_info *fs_info = node->root->fs_info;
558 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
560 /* There are no space reservations during log replay, bail out. */
561 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
564 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
566 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
569 static int btrfs_delayed_inode_reserve_metadata(
570 struct btrfs_trans_handle *trans,
571 struct btrfs_root *root,
572 struct btrfs_delayed_node *node)
574 struct btrfs_fs_info *fs_info = root->fs_info;
575 struct btrfs_block_rsv *src_rsv;
576 struct btrfs_block_rsv *dst_rsv;
580 src_rsv = trans->block_rsv;
581 dst_rsv = &fs_info->delayed_block_rsv;
583 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
586 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
587 * which doesn't reserve space for speed. This is a problem since we
588 * still need to reserve space for this update, so try to reserve the
591 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
592 * we always reserve enough to update the inode item.
594 if (!src_rsv || (!trans->bytes_reserved &&
595 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
596 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
597 BTRFS_QGROUP_RSV_META_PREALLOC, true);
600 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
601 BTRFS_RESERVE_NO_FLUSH);
602 /* NO_FLUSH could only fail with -ENOSPC */
603 ASSERT(ret == 0 || ret == -ENOSPC);
605 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
607 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
611 trace_btrfs_space_reservation(fs_info, "delayed_inode",
612 node->inode_id, num_bytes, 1);
613 node->bytes_reserved = num_bytes;
619 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
620 struct btrfs_delayed_node *node,
623 struct btrfs_block_rsv *rsv;
625 if (!node->bytes_reserved)
628 rsv = &fs_info->delayed_block_rsv;
629 trace_btrfs_space_reservation(fs_info, "delayed_inode",
630 node->inode_id, node->bytes_reserved, 0);
631 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
633 btrfs_qgroup_free_meta_prealloc(node->root,
634 node->bytes_reserved);
636 btrfs_qgroup_convert_reserved_meta(node->root,
637 node->bytes_reserved);
638 node->bytes_reserved = 0;
642 * Insert a single delayed item or a batch of delayed items, as many as possible
643 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
644 * in the rbtree, and if there's a gap between two consecutive dir index items,
645 * then it means at some point we had delayed dir indexes to add but they got
646 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
647 * into the subvolume tree. Dir index keys also have their offsets coming from a
648 * monotonically increasing counter, so we can't get new keys with an offset that
649 * fits within a gap between delayed dir index items.
651 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
652 struct btrfs_root *root,
653 struct btrfs_path *path,
654 struct btrfs_delayed_item *first_item)
656 struct btrfs_fs_info *fs_info = root->fs_info;
657 struct btrfs_delayed_node *node = first_item->delayed_node;
658 LIST_HEAD(item_list);
659 struct btrfs_delayed_item *curr;
660 struct btrfs_delayed_item *next;
661 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
662 struct btrfs_item_batch batch;
663 struct btrfs_key first_key;
664 const u32 first_data_size = first_item->data_len;
666 char *ins_data = NULL;
668 bool continuous_keys_only = false;
670 lockdep_assert_held(&node->mutex);
673 * During normal operation the delayed index offset is continuously
674 * increasing, so we can batch insert all items as there will not be any
675 * overlapping keys in the tree.
677 * The exception to this is log replay, where we may have interleaved
678 * offsets in the tree, so our batch needs to be continuous keys only in
679 * order to ensure we do not end up with out of order items in our leaf.
681 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
682 continuous_keys_only = true;
685 * For delayed items to insert, we track reserved metadata bytes based
686 * on the number of leaves that we will use.
687 * See btrfs_insert_delayed_dir_index() and
688 * btrfs_delayed_item_reserve_metadata()).
690 ASSERT(first_item->bytes_reserved == 0);
692 list_add_tail(&first_item->tree_list, &item_list);
693 batch.total_data_size = first_data_size;
695 total_size = first_data_size + sizeof(struct btrfs_item);
701 next = __btrfs_next_delayed_item(curr);
706 * We cannot allow gaps in the key space if we're doing log
709 if (continuous_keys_only && (next->index != curr->index + 1))
712 ASSERT(next->bytes_reserved == 0);
714 next_size = next->data_len + sizeof(struct btrfs_item);
715 if (total_size + next_size > max_size)
718 list_add_tail(&next->tree_list, &item_list);
720 total_size += next_size;
721 batch.total_data_size += next->data_len;
726 first_key.objectid = node->inode_id;
727 first_key.type = BTRFS_DIR_INDEX_KEY;
728 first_key.offset = first_item->index;
729 batch.keys = &first_key;
730 batch.data_sizes = &first_data_size;
732 struct btrfs_key *ins_keys;
736 ins_data = kmalloc(batch.nr * sizeof(u32) +
737 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
742 ins_sizes = (u32 *)ins_data;
743 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
744 batch.keys = ins_keys;
745 batch.data_sizes = ins_sizes;
746 list_for_each_entry(curr, &item_list, tree_list) {
747 ins_keys[i].objectid = node->inode_id;
748 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
749 ins_keys[i].offset = curr->index;
750 ins_sizes[i] = curr->data_len;
755 ret = btrfs_insert_empty_items(trans, root, path, &batch);
759 list_for_each_entry(curr, &item_list, tree_list) {
762 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
763 write_extent_buffer(path->nodes[0], &curr->data,
764 (unsigned long)data_ptr, curr->data_len);
769 * Now release our path before releasing the delayed items and their
770 * metadata reservations, so that we don't block other tasks for more
773 btrfs_release_path(path);
775 ASSERT(node->index_item_leaves > 0);
778 * For normal operations we will batch an entire leaf's worth of delayed
779 * items, so if there are more items to process we can decrement
780 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
782 * However for log replay we may not have inserted an entire leaf's
783 * worth of items, we may have not had continuous items, so decrementing
784 * here would mess up the index_item_leaves accounting. For this case
785 * only clean up the accounting when there are no items left.
787 if (next && !continuous_keys_only) {
789 * We inserted one batch of items into a leaf a there are more
790 * items to flush in a future batch, now release one unit of
791 * metadata space from the delayed block reserve, corresponding
792 * the leaf we just flushed to.
794 btrfs_delayed_item_release_leaves(node, 1);
795 node->index_item_leaves--;
798 * There are no more items to insert. We can have a number of
799 * reserved leaves > 1 here - this happens when many dir index
800 * items are added and then removed before they are flushed (file
801 * names with a very short life, never span a transaction). So
802 * release all remaining leaves.
804 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
805 node->index_item_leaves = 0;
808 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
809 list_del(&curr->tree_list);
810 btrfs_release_delayed_item(curr);
817 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
818 struct btrfs_path *path,
819 struct btrfs_root *root,
820 struct btrfs_delayed_node *node)
825 struct btrfs_delayed_item *curr;
827 mutex_lock(&node->mutex);
828 curr = __btrfs_first_delayed_insertion_item(node);
830 mutex_unlock(&node->mutex);
833 ret = btrfs_insert_delayed_item(trans, root, path, curr);
834 mutex_unlock(&node->mutex);
840 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
841 struct btrfs_root *root,
842 struct btrfs_path *path,
843 struct btrfs_delayed_item *item)
845 const u64 ino = item->delayed_node->inode_id;
846 struct btrfs_fs_info *fs_info = root->fs_info;
847 struct btrfs_delayed_item *curr, *next;
848 struct extent_buffer *leaf = path->nodes[0];
849 LIST_HEAD(batch_list);
850 int nitems, slot, last_slot;
852 u64 total_reserved_size = item->bytes_reserved;
854 ASSERT(leaf != NULL);
856 slot = path->slots[0];
857 last_slot = btrfs_header_nritems(leaf) - 1;
859 * Our caller always gives us a path pointing to an existing item, so
860 * this can not happen.
862 ASSERT(slot <= last_slot);
863 if (WARN_ON(slot > last_slot))
868 list_add_tail(&curr->tree_list, &batch_list);
871 * Keep checking if the next delayed item matches the next item in the
872 * leaf - if so, we can add it to the batch of items to delete from the
875 while (slot < last_slot) {
876 struct btrfs_key key;
878 next = __btrfs_next_delayed_item(curr);
883 btrfs_item_key_to_cpu(leaf, &key, slot);
884 if (key.objectid != ino ||
885 key.type != BTRFS_DIR_INDEX_KEY ||
886 key.offset != next->index)
890 list_add_tail(&curr->tree_list, &batch_list);
891 total_reserved_size += curr->bytes_reserved;
894 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
898 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
899 if (total_reserved_size > 0) {
901 * Check btrfs_delayed_item_reserve_metadata() to see why we
902 * don't need to release/reserve qgroup space.
904 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
905 total_reserved_size, 0);
906 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
907 total_reserved_size, NULL);
910 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
911 list_del(&curr->tree_list);
912 btrfs_release_delayed_item(curr);
918 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
919 struct btrfs_path *path,
920 struct btrfs_root *root,
921 struct btrfs_delayed_node *node)
923 struct btrfs_key key;
926 key.objectid = node->inode_id;
927 key.type = BTRFS_DIR_INDEX_KEY;
930 struct btrfs_delayed_item *item;
932 mutex_lock(&node->mutex);
933 item = __btrfs_first_delayed_deletion_item(node);
935 mutex_unlock(&node->mutex);
939 key.offset = item->index;
940 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
943 * There's no matching item in the leaf. This means we
944 * have already deleted this item in a past run of the
945 * delayed items. We ignore errors when running delayed
946 * items from an async context, through a work queue job
947 * running btrfs_async_run_delayed_root(), and don't
948 * release delayed items that failed to complete. This
949 * is because we will retry later, and at transaction
950 * commit time we always run delayed items and will
951 * then deal with errors if they fail to run again.
953 * So just release delayed items for which we can't find
954 * an item in the tree, and move to the next item.
956 btrfs_release_path(path);
957 btrfs_release_delayed_item(item);
959 } else if (ret == 0) {
960 ret = btrfs_batch_delete_items(trans, root, path, item);
961 btrfs_release_path(path);
965 * We unlock and relock on each iteration, this is to prevent
966 * blocking other tasks for too long while we are being run from
967 * the async context (work queue job). Those tasks are typically
968 * running system calls like creat/mkdir/rename/unlink/etc which
969 * need to add delayed items to this delayed node.
971 mutex_unlock(&node->mutex);
977 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
979 struct btrfs_delayed_root *delayed_root;
982 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
983 BUG_ON(!delayed_node->root);
984 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
985 delayed_node->count--;
987 delayed_root = delayed_node->root->fs_info->delayed_root;
988 finish_one_item(delayed_root);
992 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
995 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
996 struct btrfs_delayed_root *delayed_root;
998 ASSERT(delayed_node->root);
999 delayed_node->count--;
1001 delayed_root = delayed_node->root->fs_info->delayed_root;
1002 finish_one_item(delayed_root);
1006 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007 struct btrfs_root *root,
1008 struct btrfs_path *path,
1009 struct btrfs_delayed_node *node)
1011 struct btrfs_fs_info *fs_info = root->fs_info;
1012 struct btrfs_key key;
1013 struct btrfs_inode_item *inode_item;
1014 struct extent_buffer *leaf;
1018 key.objectid = node->inode_id;
1019 key.type = BTRFS_INODE_ITEM_KEY;
1022 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1027 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1033 leaf = path->nodes[0];
1034 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1035 struct btrfs_inode_item);
1036 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1037 sizeof(struct btrfs_inode_item));
1038 btrfs_mark_buffer_dirty(trans, leaf);
1040 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1044 * Now we're going to delete the INODE_REF/EXTREF, which should be the
1045 * only one ref left. Check if the next item is an INODE_REF/EXTREF.
1047 * But if we're the last item already, release and search for the last
1050 if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1051 key.objectid = node->inode_id;
1052 key.type = BTRFS_INODE_EXTREF_KEY;
1053 key.offset = (u64)-1;
1055 btrfs_release_path(path);
1056 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1060 ASSERT(path->slots[0] > 0);
1063 leaf = path->nodes[0];
1067 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1068 if (key.objectid != node->inode_id)
1070 if (key.type != BTRFS_INODE_REF_KEY &&
1071 key.type != BTRFS_INODE_EXTREF_KEY)
1075 * Delayed iref deletion is for the inode who has only one link,
1076 * so there is only one iref. The case that several irefs are
1077 * in the same item doesn't exist.
1079 ret = btrfs_del_item(trans, root, path);
1081 btrfs_release_delayed_iref(node);
1082 btrfs_release_path(path);
1084 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1085 btrfs_release_delayed_inode(node);
1088 * If we fail to update the delayed inode we need to abort the
1089 * transaction, because we could leave the inode with the improper
1092 if (ret && ret != -ENOENT)
1093 btrfs_abort_transaction(trans, ret);
1098 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1099 struct btrfs_root *root,
1100 struct btrfs_path *path,
1101 struct btrfs_delayed_node *node)
1105 mutex_lock(&node->mutex);
1106 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1107 mutex_unlock(&node->mutex);
1111 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1112 mutex_unlock(&node->mutex);
1117 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1118 struct btrfs_path *path,
1119 struct btrfs_delayed_node *node)
1123 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1127 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1131 ret = btrfs_record_root_in_trans(trans, node->root);
1134 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1139 * Called when committing the transaction.
1140 * Returns 0 on success.
1141 * Returns < 0 on error and returns with an aborted transaction with any
1142 * outstanding delayed items cleaned up.
1144 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1146 struct btrfs_fs_info *fs_info = trans->fs_info;
1147 struct btrfs_delayed_root *delayed_root;
1148 struct btrfs_delayed_node *curr_node, *prev_node;
1149 struct btrfs_path *path;
1150 struct btrfs_block_rsv *block_rsv;
1152 bool count = (nr > 0);
1154 if (TRANS_ABORTED(trans))
1157 path = btrfs_alloc_path();
1161 block_rsv = trans->block_rsv;
1162 trans->block_rsv = &fs_info->delayed_block_rsv;
1164 delayed_root = fs_info->delayed_root;
1166 curr_node = btrfs_first_delayed_node(delayed_root);
1167 while (curr_node && (!count || nr--)) {
1168 ret = __btrfs_commit_inode_delayed_items(trans, path,
1171 btrfs_abort_transaction(trans, ret);
1175 prev_node = curr_node;
1176 curr_node = btrfs_next_delayed_node(curr_node);
1178 * See the comment below about releasing path before releasing
1179 * node. If the commit of delayed items was successful the path
1180 * should always be released, but in case of an error, it may
1181 * point to locked extent buffers (a leaf at the very least).
1183 ASSERT(path->nodes[0] == NULL);
1184 btrfs_release_delayed_node(prev_node);
1188 * Release the path to avoid a potential deadlock and lockdep splat when
1189 * releasing the delayed node, as that requires taking the delayed node's
1190 * mutex. If another task starts running delayed items before we take
1191 * the mutex, it will first lock the mutex and then it may try to lock
1192 * the same btree path (leaf).
1194 btrfs_free_path(path);
1197 btrfs_release_delayed_node(curr_node);
1198 trans->block_rsv = block_rsv;
1203 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1205 return __btrfs_run_delayed_items(trans, -1);
1208 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1210 return __btrfs_run_delayed_items(trans, nr);
1213 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1214 struct btrfs_inode *inode)
1216 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1217 struct btrfs_path *path;
1218 struct btrfs_block_rsv *block_rsv;
1224 mutex_lock(&delayed_node->mutex);
1225 if (!delayed_node->count) {
1226 mutex_unlock(&delayed_node->mutex);
1227 btrfs_release_delayed_node(delayed_node);
1230 mutex_unlock(&delayed_node->mutex);
1232 path = btrfs_alloc_path();
1234 btrfs_release_delayed_node(delayed_node);
1238 block_rsv = trans->block_rsv;
1239 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1241 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1243 btrfs_release_delayed_node(delayed_node);
1244 btrfs_free_path(path);
1245 trans->block_rsv = block_rsv;
1250 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1252 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1253 struct btrfs_trans_handle *trans;
1254 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1255 struct btrfs_path *path;
1256 struct btrfs_block_rsv *block_rsv;
1262 mutex_lock(&delayed_node->mutex);
1263 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1264 mutex_unlock(&delayed_node->mutex);
1265 btrfs_release_delayed_node(delayed_node);
1268 mutex_unlock(&delayed_node->mutex);
1270 trans = btrfs_join_transaction(delayed_node->root);
1271 if (IS_ERR(trans)) {
1272 ret = PTR_ERR(trans);
1276 path = btrfs_alloc_path();
1282 block_rsv = trans->block_rsv;
1283 trans->block_rsv = &fs_info->delayed_block_rsv;
1285 mutex_lock(&delayed_node->mutex);
1286 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1287 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1288 path, delayed_node);
1291 mutex_unlock(&delayed_node->mutex);
1293 btrfs_free_path(path);
1294 trans->block_rsv = block_rsv;
1296 btrfs_end_transaction(trans);
1297 btrfs_btree_balance_dirty(fs_info);
1299 btrfs_release_delayed_node(delayed_node);
1304 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1306 struct btrfs_delayed_node *delayed_node;
1308 delayed_node = READ_ONCE(inode->delayed_node);
1312 inode->delayed_node = NULL;
1313 btrfs_release_delayed_node(delayed_node);
1316 struct btrfs_async_delayed_work {
1317 struct btrfs_delayed_root *delayed_root;
1319 struct btrfs_work work;
1322 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1324 struct btrfs_async_delayed_work *async_work;
1325 struct btrfs_delayed_root *delayed_root;
1326 struct btrfs_trans_handle *trans;
1327 struct btrfs_path *path;
1328 struct btrfs_delayed_node *delayed_node = NULL;
1329 struct btrfs_root *root;
1330 struct btrfs_block_rsv *block_rsv;
1333 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1334 delayed_root = async_work->delayed_root;
1336 path = btrfs_alloc_path();
1341 if (atomic_read(&delayed_root->items) <
1342 BTRFS_DELAYED_BACKGROUND / 2)
1345 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1349 root = delayed_node->root;
1351 trans = btrfs_join_transaction(root);
1352 if (IS_ERR(trans)) {
1353 btrfs_release_path(path);
1354 btrfs_release_prepared_delayed_node(delayed_node);
1359 block_rsv = trans->block_rsv;
1360 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1362 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1364 trans->block_rsv = block_rsv;
1365 btrfs_end_transaction(trans);
1366 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1368 btrfs_release_path(path);
1369 btrfs_release_prepared_delayed_node(delayed_node);
1372 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1373 || total_done < async_work->nr);
1375 btrfs_free_path(path);
1377 wake_up(&delayed_root->wait);
1382 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1383 struct btrfs_fs_info *fs_info, int nr)
1385 struct btrfs_async_delayed_work *async_work;
1387 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1391 async_work->delayed_root = delayed_root;
1392 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1393 async_work->nr = nr;
1395 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1399 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1401 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1404 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1406 int val = atomic_read(&delayed_root->items_seq);
1408 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1411 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1417 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1419 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1421 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1422 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1425 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1429 seq = atomic_read(&delayed_root->items_seq);
1431 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1435 wait_event_interruptible(delayed_root->wait,
1436 could_end_wait(delayed_root, seq));
1440 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1443 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1445 struct btrfs_fs_info *fs_info = trans->fs_info;
1446 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1448 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1452 * Adding the new dir index item does not require touching another
1453 * leaf, so we can release 1 unit of metadata that was previously
1454 * reserved when starting the transaction. This applies only to
1455 * the case where we had a transaction start and excludes the
1456 * transaction join case (when replaying log trees).
1458 trace_btrfs_space_reservation(fs_info, "transaction",
1459 trans->transid, bytes, 0);
1460 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1461 ASSERT(trans->bytes_reserved >= bytes);
1462 trans->bytes_reserved -= bytes;
1465 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1466 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1467 const char *name, int name_len,
1468 struct btrfs_inode *dir,
1469 struct btrfs_disk_key *disk_key, u8 flags,
1472 struct btrfs_fs_info *fs_info = trans->fs_info;
1473 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1474 struct btrfs_delayed_node *delayed_node;
1475 struct btrfs_delayed_item *delayed_item;
1476 struct btrfs_dir_item *dir_item;
1477 bool reserve_leaf_space;
1481 delayed_node = btrfs_get_or_create_delayed_node(dir);
1482 if (IS_ERR(delayed_node))
1483 return PTR_ERR(delayed_node);
1485 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1487 BTRFS_DELAYED_INSERTION_ITEM);
1488 if (!delayed_item) {
1493 delayed_item->index = index;
1495 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1496 dir_item->location = *disk_key;
1497 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1498 btrfs_set_stack_dir_data_len(dir_item, 0);
1499 btrfs_set_stack_dir_name_len(dir_item, name_len);
1500 btrfs_set_stack_dir_flags(dir_item, flags);
1501 memcpy((char *)(dir_item + 1), name, name_len);
1503 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1505 mutex_lock(&delayed_node->mutex);
1508 * First attempt to insert the delayed item. This is to make the error
1509 * handling path simpler in case we fail (-EEXIST). There's no risk of
1510 * any other task coming in and running the delayed item before we do
1511 * the metadata space reservation below, because we are holding the
1512 * delayed node's mutex and that mutex must also be locked before the
1513 * node's delayed items can be run.
1515 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1516 if (unlikely(ret)) {
1517 btrfs_err(trans->fs_info,
1518 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1519 name_len, name, index, btrfs_root_id(delayed_node->root),
1520 delayed_node->inode_id, dir->index_cnt,
1521 delayed_node->index_cnt, ret);
1522 btrfs_release_delayed_item(delayed_item);
1523 btrfs_release_dir_index_item_space(trans);
1524 mutex_unlock(&delayed_node->mutex);
1528 if (delayed_node->index_item_leaves == 0 ||
1529 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1530 delayed_node->curr_index_batch_size = data_len;
1531 reserve_leaf_space = true;
1533 delayed_node->curr_index_batch_size += data_len;
1534 reserve_leaf_space = false;
1537 if (reserve_leaf_space) {
1538 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1540 * Space was reserved for a dir index item insertion when we
1541 * started the transaction, so getting a failure here should be
1545 btrfs_release_delayed_item(delayed_item);
1546 mutex_unlock(&delayed_node->mutex);
1550 delayed_node->index_item_leaves++;
1552 btrfs_release_dir_index_item_space(trans);
1554 mutex_unlock(&delayed_node->mutex);
1557 btrfs_release_delayed_node(delayed_node);
1561 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1562 struct btrfs_delayed_node *node,
1565 struct btrfs_delayed_item *item;
1567 mutex_lock(&node->mutex);
1568 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1570 mutex_unlock(&node->mutex);
1575 * For delayed items to insert, we track reserved metadata bytes based
1576 * on the number of leaves that we will use.
1577 * See btrfs_insert_delayed_dir_index() and
1578 * btrfs_delayed_item_reserve_metadata()).
1580 ASSERT(item->bytes_reserved == 0);
1581 ASSERT(node->index_item_leaves > 0);
1584 * If there's only one leaf reserved, we can decrement this item from the
1585 * current batch, otherwise we can not because we don't know which leaf
1586 * it belongs to. With the current limit on delayed items, we rarely
1587 * accumulate enough dir index items to fill more than one leaf (even
1588 * when using a leaf size of 4K).
1590 if (node->index_item_leaves == 1) {
1591 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1593 ASSERT(node->curr_index_batch_size >= data_len);
1594 node->curr_index_batch_size -= data_len;
1597 btrfs_release_delayed_item(item);
1599 /* If we now have no more dir index items, we can release all leaves. */
1600 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1601 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1602 node->index_item_leaves = 0;
1605 mutex_unlock(&node->mutex);
1609 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1610 struct btrfs_inode *dir, u64 index)
1612 struct btrfs_delayed_node *node;
1613 struct btrfs_delayed_item *item;
1616 node = btrfs_get_or_create_delayed_node(dir);
1618 return PTR_ERR(node);
1620 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1624 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1630 item->index = index;
1632 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1634 * we have reserved enough space when we start a new transaction,
1635 * so reserving metadata failure is impossible.
1638 btrfs_err(trans->fs_info,
1639 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1640 btrfs_release_delayed_item(item);
1644 mutex_lock(&node->mutex);
1645 ret = __btrfs_add_delayed_item(node, item);
1646 if (unlikely(ret)) {
1647 btrfs_err(trans->fs_info,
1648 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1649 index, node->root->root_key.objectid,
1650 node->inode_id, ret);
1651 btrfs_delayed_item_release_metadata(dir->root, item);
1652 btrfs_release_delayed_item(item);
1654 mutex_unlock(&node->mutex);
1656 btrfs_release_delayed_node(node);
1660 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1662 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1668 * Since we have held i_mutex of this directory, it is impossible that
1669 * a new directory index is added into the delayed node and index_cnt
1670 * is updated now. So we needn't lock the delayed node.
1672 if (!delayed_node->index_cnt) {
1673 btrfs_release_delayed_node(delayed_node);
1677 inode->index_cnt = delayed_node->index_cnt;
1678 btrfs_release_delayed_node(delayed_node);
1682 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1684 struct list_head *ins_list,
1685 struct list_head *del_list)
1687 struct btrfs_delayed_node *delayed_node;
1688 struct btrfs_delayed_item *item;
1690 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1695 * We can only do one readdir with delayed items at a time because of
1696 * item->readdir_list.
1698 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1699 btrfs_inode_lock(BTRFS_I(inode), 0);
1701 mutex_lock(&delayed_node->mutex);
1702 item = __btrfs_first_delayed_insertion_item(delayed_node);
1703 while (item && item->index <= last_index) {
1704 refcount_inc(&item->refs);
1705 list_add_tail(&item->readdir_list, ins_list);
1706 item = __btrfs_next_delayed_item(item);
1709 item = __btrfs_first_delayed_deletion_item(delayed_node);
1710 while (item && item->index <= last_index) {
1711 refcount_inc(&item->refs);
1712 list_add_tail(&item->readdir_list, del_list);
1713 item = __btrfs_next_delayed_item(item);
1715 mutex_unlock(&delayed_node->mutex);
1717 * This delayed node is still cached in the btrfs inode, so refs
1718 * must be > 1 now, and we needn't check it is going to be freed
1721 * Besides that, this function is used to read dir, we do not
1722 * insert/delete delayed items in this period. So we also needn't
1723 * requeue or dequeue this delayed node.
1725 refcount_dec(&delayed_node->refs);
1730 void btrfs_readdir_put_delayed_items(struct inode *inode,
1731 struct list_head *ins_list,
1732 struct list_head *del_list)
1734 struct btrfs_delayed_item *curr, *next;
1736 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1737 list_del(&curr->readdir_list);
1738 if (refcount_dec_and_test(&curr->refs))
1742 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1743 list_del(&curr->readdir_list);
1744 if (refcount_dec_and_test(&curr->refs))
1749 * The VFS is going to do up_read(), so we need to downgrade back to a
1752 downgrade_write(&inode->i_rwsem);
1755 int btrfs_should_delete_dir_index(struct list_head *del_list,
1758 struct btrfs_delayed_item *curr;
1761 list_for_each_entry(curr, del_list, readdir_list) {
1762 if (curr->index > index)
1764 if (curr->index == index) {
1773 * Read dir info stored in the delayed tree.
1775 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1776 struct list_head *ins_list)
1778 struct btrfs_dir_item *di;
1779 struct btrfs_delayed_item *curr, *next;
1780 struct btrfs_key location;
1784 unsigned char d_type;
1787 * Changing the data of the delayed item is impossible. So
1788 * we needn't lock them. And we have held i_mutex of the
1789 * directory, nobody can delete any directory indexes now.
1791 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1792 list_del(&curr->readdir_list);
1794 if (curr->index < ctx->pos) {
1795 if (refcount_dec_and_test(&curr->refs))
1800 ctx->pos = curr->index;
1802 di = (struct btrfs_dir_item *)curr->data;
1803 name = (char *)(di + 1);
1804 name_len = btrfs_stack_dir_name_len(di);
1806 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1807 btrfs_disk_key_to_cpu(&location, &di->location);
1809 over = !dir_emit(ctx, name, name_len,
1810 location.objectid, d_type);
1812 if (refcount_dec_and_test(&curr->refs))
1822 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1823 struct btrfs_inode_item *inode_item,
1824 struct inode *inode)
1828 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1829 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1830 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1831 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1832 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1833 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1834 btrfs_set_stack_inode_generation(inode_item,
1835 BTRFS_I(inode)->generation);
1836 btrfs_set_stack_inode_sequence(inode_item,
1837 inode_peek_iversion(inode));
1838 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1839 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1840 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1841 BTRFS_I(inode)->ro_flags);
1842 btrfs_set_stack_inode_flags(inode_item, flags);
1843 btrfs_set_stack_inode_block_group(inode_item, 0);
1845 btrfs_set_stack_timespec_sec(&inode_item->atime,
1846 inode_get_atime_sec(inode));
1847 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1848 inode_get_atime_nsec(inode));
1850 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1851 inode_get_mtime_sec(inode));
1852 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1853 inode_get_mtime_nsec(inode));
1855 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1856 inode_get_ctime_sec(inode));
1857 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1858 inode_get_ctime_nsec(inode));
1860 btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1861 btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1864 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1866 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1867 struct btrfs_delayed_node *delayed_node;
1868 struct btrfs_inode_item *inode_item;
1870 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1874 mutex_lock(&delayed_node->mutex);
1875 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1876 mutex_unlock(&delayed_node->mutex);
1877 btrfs_release_delayed_node(delayed_node);
1881 inode_item = &delayed_node->inode_item;
1883 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1884 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1885 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1886 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1887 round_up(i_size_read(inode), fs_info->sectorsize));
1888 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1889 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1890 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1891 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1892 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1894 inode_set_iversion_queried(inode,
1895 btrfs_stack_inode_sequence(inode_item));
1897 *rdev = btrfs_stack_inode_rdev(inode_item);
1898 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1899 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1901 inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1902 btrfs_stack_timespec_nsec(&inode_item->atime));
1904 inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1905 btrfs_stack_timespec_nsec(&inode_item->mtime));
1907 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1908 btrfs_stack_timespec_nsec(&inode_item->ctime));
1910 BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1911 BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1913 inode->i_generation = BTRFS_I(inode)->generation;
1914 BTRFS_I(inode)->index_cnt = (u64)-1;
1916 mutex_unlock(&delayed_node->mutex);
1917 btrfs_release_delayed_node(delayed_node);
1921 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1922 struct btrfs_inode *inode)
1924 struct btrfs_root *root = inode->root;
1925 struct btrfs_delayed_node *delayed_node;
1928 delayed_node = btrfs_get_or_create_delayed_node(inode);
1929 if (IS_ERR(delayed_node))
1930 return PTR_ERR(delayed_node);
1932 mutex_lock(&delayed_node->mutex);
1933 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1934 fill_stack_inode_item(trans, &delayed_node->inode_item,
1939 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1943 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1944 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1945 delayed_node->count++;
1946 atomic_inc(&root->fs_info->delayed_root->items);
1948 mutex_unlock(&delayed_node->mutex);
1949 btrfs_release_delayed_node(delayed_node);
1953 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1955 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1956 struct btrfs_delayed_node *delayed_node;
1959 * we don't do delayed inode updates during log recovery because it
1960 * leads to enospc problems. This means we also can't do
1961 * delayed inode refs
1963 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1966 delayed_node = btrfs_get_or_create_delayed_node(inode);
1967 if (IS_ERR(delayed_node))
1968 return PTR_ERR(delayed_node);
1971 * We don't reserve space for inode ref deletion is because:
1972 * - We ONLY do async inode ref deletion for the inode who has only
1973 * one link(i_nlink == 1), it means there is only one inode ref.
1974 * And in most case, the inode ref and the inode item are in the
1975 * same leaf, and we will deal with them at the same time.
1976 * Since we are sure we will reserve the space for the inode item,
1977 * it is unnecessary to reserve space for inode ref deletion.
1978 * - If the inode ref and the inode item are not in the same leaf,
1979 * We also needn't worry about enospc problem, because we reserve
1980 * much more space for the inode update than it needs.
1981 * - At the worst, we can steal some space from the global reservation.
1984 mutex_lock(&delayed_node->mutex);
1985 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1988 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1989 delayed_node->count++;
1990 atomic_inc(&fs_info->delayed_root->items);
1992 mutex_unlock(&delayed_node->mutex);
1993 btrfs_release_delayed_node(delayed_node);
1997 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1999 struct btrfs_root *root = delayed_node->root;
2000 struct btrfs_fs_info *fs_info = root->fs_info;
2001 struct btrfs_delayed_item *curr_item, *prev_item;
2003 mutex_lock(&delayed_node->mutex);
2004 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2006 prev_item = curr_item;
2007 curr_item = __btrfs_next_delayed_item(prev_item);
2008 btrfs_release_delayed_item(prev_item);
2011 if (delayed_node->index_item_leaves > 0) {
2012 btrfs_delayed_item_release_leaves(delayed_node,
2013 delayed_node->index_item_leaves);
2014 delayed_node->index_item_leaves = 0;
2017 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2019 btrfs_delayed_item_release_metadata(root, curr_item);
2020 prev_item = curr_item;
2021 curr_item = __btrfs_next_delayed_item(prev_item);
2022 btrfs_release_delayed_item(prev_item);
2025 btrfs_release_delayed_iref(delayed_node);
2027 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2028 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2029 btrfs_release_delayed_inode(delayed_node);
2031 mutex_unlock(&delayed_node->mutex);
2034 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2036 struct btrfs_delayed_node *delayed_node;
2038 delayed_node = btrfs_get_delayed_node(inode);
2042 __btrfs_kill_delayed_node(delayed_node);
2043 btrfs_release_delayed_node(delayed_node);
2046 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2048 unsigned long index = 0;
2049 struct btrfs_delayed_node *delayed_nodes[8];
2052 struct btrfs_delayed_node *node;
2055 spin_lock(&root->inode_lock);
2056 if (xa_empty(&root->delayed_nodes)) {
2057 spin_unlock(&root->inode_lock);
2062 xa_for_each_start(&root->delayed_nodes, index, node, index) {
2064 * Don't increase refs in case the node is dead and
2065 * about to be removed from the tree in the loop below
2067 if (refcount_inc_not_zero(&node->refs)) {
2068 delayed_nodes[count] = node;
2071 if (count >= ARRAY_SIZE(delayed_nodes))
2074 spin_unlock(&root->inode_lock);
2077 for (int i = 0; i < count; i++) {
2078 __btrfs_kill_delayed_node(delayed_nodes[i]);
2079 btrfs_release_delayed_node(delayed_nodes[i]);
2084 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2086 struct btrfs_delayed_node *curr_node, *prev_node;
2088 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2090 __btrfs_kill_delayed_node(curr_node);
2092 prev_node = curr_node;
2093 curr_node = btrfs_next_delayed_node(curr_node);
2094 btrfs_release_delayed_node(prev_node);
2098 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2099 struct list_head *ins_list,
2100 struct list_head *del_list)
2102 struct btrfs_delayed_node *node;
2103 struct btrfs_delayed_item *item;
2105 node = btrfs_get_delayed_node(inode);
2109 mutex_lock(&node->mutex);
2110 item = __btrfs_first_delayed_insertion_item(node);
2113 * It's possible that the item is already in a log list. This
2114 * can happen in case two tasks are trying to log the same
2115 * directory. For example if we have tasks A and task B:
2117 * Task A collected the delayed items into a log list while
2118 * under the inode's log_mutex (at btrfs_log_inode()), but it
2119 * only releases the items after logging the inodes they point
2120 * to (if they are new inodes), which happens after unlocking
2123 * Task B enters btrfs_log_inode() and acquires the log_mutex
2124 * of the same directory inode, before task B releases the
2125 * delayed items. This can happen for example when logging some
2126 * inode we need to trigger logging of its parent directory, so
2127 * logging two files that have the same parent directory can
2130 * If this happens, just ignore delayed items already in a log
2131 * list. All the tasks logging the directory are under a log
2132 * transaction and whichever finishes first can not sync the log
2133 * before the other completes and leaves the log transaction.
2135 if (!item->logged && list_empty(&item->log_list)) {
2136 refcount_inc(&item->refs);
2137 list_add_tail(&item->log_list, ins_list);
2139 item = __btrfs_next_delayed_item(item);
2142 item = __btrfs_first_delayed_deletion_item(node);
2144 /* It may be non-empty, for the same reason mentioned above. */
2145 if (!item->logged && list_empty(&item->log_list)) {
2146 refcount_inc(&item->refs);
2147 list_add_tail(&item->log_list, del_list);
2149 item = __btrfs_next_delayed_item(item);
2151 mutex_unlock(&node->mutex);
2154 * We are called during inode logging, which means the inode is in use
2155 * and can not be evicted before we finish logging the inode. So we never
2156 * have the last reference on the delayed inode.
2157 * Also, we don't use btrfs_release_delayed_node() because that would
2158 * requeue the delayed inode (change its order in the list of prepared
2159 * nodes) and we don't want to do such change because we don't create or
2160 * delete delayed items.
2162 ASSERT(refcount_read(&node->refs) > 1);
2163 refcount_dec(&node->refs);
2166 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2167 struct list_head *ins_list,
2168 struct list_head *del_list)
2170 struct btrfs_delayed_node *node;
2171 struct btrfs_delayed_item *item;
2172 struct btrfs_delayed_item *next;
2174 node = btrfs_get_delayed_node(inode);
2178 mutex_lock(&node->mutex);
2180 list_for_each_entry_safe(item, next, ins_list, log_list) {
2181 item->logged = true;
2182 list_del_init(&item->log_list);
2183 if (refcount_dec_and_test(&item->refs))
2187 list_for_each_entry_safe(item, next, del_list, log_list) {
2188 item->logged = true;
2189 list_del_init(&item->log_list);
2190 if (refcount_dec_and_test(&item->refs))
2194 mutex_unlock(&node->mutex);
2197 * We are called during inode logging, which means the inode is in use
2198 * and can not be evicted before we finish logging the inode. So we never
2199 * have the last reference on the delayed inode.
2200 * Also, we don't use btrfs_release_delayed_node() because that would
2201 * requeue the delayed inode (change its order in the list of prepared
2202 * nodes) and we don't want to do such change because we don't create or
2203 * delete delayed items.
2205 ASSERT(refcount_read(&node->refs) > 1);
2206 refcount_dec(&node->refs);