1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
11 #include "delayed-inode.h"
13 #include "transaction.h"
17 #define BTRFS_DELAYED_WRITEBACK 512
18 #define BTRFS_DELAYED_BACKGROUND 128
19 #define BTRFS_DELAYED_BATCH 16
21 static struct kmem_cache *delayed_node_cache;
23 int __init btrfs_delayed_inode_init(void)
25 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
26 sizeof(struct btrfs_delayed_node),
30 if (!delayed_node_cache)
35 void __cold btrfs_delayed_inode_exit(void)
37 kmem_cache_destroy(delayed_node_cache);
40 static inline void btrfs_init_delayed_node(
41 struct btrfs_delayed_node *delayed_node,
42 struct btrfs_root *root, u64 inode_id)
44 delayed_node->root = root;
45 delayed_node->inode_id = inode_id;
46 refcount_set(&delayed_node->refs, 0);
47 delayed_node->ins_root = RB_ROOT_CACHED;
48 delayed_node->del_root = RB_ROOT_CACHED;
49 mutex_init(&delayed_node->mutex);
50 INIT_LIST_HEAD(&delayed_node->n_list);
51 INIT_LIST_HEAD(&delayed_node->p_list);
54 static inline int btrfs_is_continuous_delayed_item(
55 struct btrfs_delayed_item *item1,
56 struct btrfs_delayed_item *item2)
58 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
59 item1->key.objectid == item2->key.objectid &&
60 item1->key.type == item2->key.type &&
61 item1->key.offset + 1 == item2->key.offset)
66 static struct btrfs_delayed_node *btrfs_get_delayed_node(
67 struct btrfs_inode *btrfs_inode)
69 struct btrfs_root *root = btrfs_inode->root;
70 u64 ino = btrfs_ino(btrfs_inode);
71 struct btrfs_delayed_node *node;
73 node = READ_ONCE(btrfs_inode->delayed_node);
75 refcount_inc(&node->refs);
79 spin_lock(&root->inode_lock);
80 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
83 if (btrfs_inode->delayed_node) {
84 refcount_inc(&node->refs); /* can be accessed */
85 BUG_ON(btrfs_inode->delayed_node != node);
86 spin_unlock(&root->inode_lock);
91 * It's possible that we're racing into the middle of removing
92 * this node from the radix tree. In this case, the refcount
93 * was zero and it should never go back to one. Just return
94 * NULL like it was never in the radix at all; our release
95 * function is in the process of removing it.
97 * Some implementations of refcount_inc refuse to bump the
98 * refcount once it has hit zero. If we don't do this dance
99 * here, refcount_inc() may decide to just WARN_ONCE() instead
100 * of actually bumping the refcount.
102 * If this node is properly in the radix, we want to bump the
103 * refcount twice, once for the inode and once for this get
106 if (refcount_inc_not_zero(&node->refs)) {
107 refcount_inc(&node->refs);
108 btrfs_inode->delayed_node = node;
113 spin_unlock(&root->inode_lock);
116 spin_unlock(&root->inode_lock);
121 /* Will return either the node or PTR_ERR(-ENOMEM) */
122 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
123 struct btrfs_inode *btrfs_inode)
125 struct btrfs_delayed_node *node;
126 struct btrfs_root *root = btrfs_inode->root;
127 u64 ino = btrfs_ino(btrfs_inode);
131 node = btrfs_get_delayed_node(btrfs_inode);
135 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
137 return ERR_PTR(-ENOMEM);
138 btrfs_init_delayed_node(node, root, ino);
140 /* cached in the btrfs inode and can be accessed */
141 refcount_set(&node->refs, 2);
143 ret = radix_tree_preload(GFP_NOFS);
145 kmem_cache_free(delayed_node_cache, node);
149 spin_lock(&root->inode_lock);
150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
151 if (ret == -EEXIST) {
152 spin_unlock(&root->inode_lock);
153 kmem_cache_free(delayed_node_cache, node);
154 radix_tree_preload_end();
157 btrfs_inode->delayed_node = node;
158 spin_unlock(&root->inode_lock);
159 radix_tree_preload_end();
165 * Call it when holding delayed_node->mutex
167 * If mod = 1, add this node into the prepared list.
169 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
170 struct btrfs_delayed_node *node,
173 spin_lock(&root->lock);
174 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
175 if (!list_empty(&node->p_list))
176 list_move_tail(&node->p_list, &root->prepare_list);
178 list_add_tail(&node->p_list, &root->prepare_list);
180 list_add_tail(&node->n_list, &root->node_list);
181 list_add_tail(&node->p_list, &root->prepare_list);
182 refcount_inc(&node->refs); /* inserted into list */
184 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 spin_unlock(&root->lock);
189 /* Call it when holding delayed_node->mutex */
190 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
191 struct btrfs_delayed_node *node)
193 spin_lock(&root->lock);
194 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 refcount_dec(&node->refs); /* not in the list */
197 list_del_init(&node->n_list);
198 if (!list_empty(&node->p_list))
199 list_del_init(&node->p_list);
200 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 spin_unlock(&root->lock);
205 static struct btrfs_delayed_node *btrfs_first_delayed_node(
206 struct btrfs_delayed_root *delayed_root)
209 struct btrfs_delayed_node *node = NULL;
211 spin_lock(&delayed_root->lock);
212 if (list_empty(&delayed_root->node_list))
215 p = delayed_root->node_list.next;
216 node = list_entry(p, struct btrfs_delayed_node, n_list);
217 refcount_inc(&node->refs);
219 spin_unlock(&delayed_root->lock);
224 static struct btrfs_delayed_node *btrfs_next_delayed_node(
225 struct btrfs_delayed_node *node)
227 struct btrfs_delayed_root *delayed_root;
229 struct btrfs_delayed_node *next = NULL;
231 delayed_root = node->root->fs_info->delayed_root;
232 spin_lock(&delayed_root->lock);
233 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
234 /* not in the list */
235 if (list_empty(&delayed_root->node_list))
237 p = delayed_root->node_list.next;
238 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
241 p = node->n_list.next;
243 next = list_entry(p, struct btrfs_delayed_node, n_list);
244 refcount_inc(&next->refs);
246 spin_unlock(&delayed_root->lock);
251 static void __btrfs_release_delayed_node(
252 struct btrfs_delayed_node *delayed_node,
255 struct btrfs_delayed_root *delayed_root;
260 delayed_root = delayed_node->root->fs_info->delayed_root;
262 mutex_lock(&delayed_node->mutex);
263 if (delayed_node->count)
264 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 mutex_unlock(&delayed_node->mutex);
269 if (refcount_dec_and_test(&delayed_node->refs)) {
270 struct btrfs_root *root = delayed_node->root;
272 spin_lock(&root->inode_lock);
274 * Once our refcount goes to zero, nobody is allowed to bump it
275 * back up. We can delete it now.
277 ASSERT(refcount_read(&delayed_node->refs) == 0);
278 radix_tree_delete(&root->delayed_nodes_tree,
279 delayed_node->inode_id);
280 spin_unlock(&root->inode_lock);
281 kmem_cache_free(delayed_node_cache, delayed_node);
285 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287 __btrfs_release_delayed_node(node, 0);
290 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
291 struct btrfs_delayed_root *delayed_root)
294 struct btrfs_delayed_node *node = NULL;
296 spin_lock(&delayed_root->lock);
297 if (list_empty(&delayed_root->prepare_list))
300 p = delayed_root->prepare_list.next;
302 node = list_entry(p, struct btrfs_delayed_node, p_list);
303 refcount_inc(&node->refs);
305 spin_unlock(&delayed_root->lock);
310 static inline void btrfs_release_prepared_delayed_node(
311 struct btrfs_delayed_node *node)
313 __btrfs_release_delayed_node(node, 1);
316 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
318 struct btrfs_delayed_item *item;
319 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 item->data_len = data_len;
322 item->ins_or_del = 0;
323 item->bytes_reserved = 0;
324 item->delayed_node = NULL;
325 refcount_set(&item->refs, 1);
331 * __btrfs_lookup_delayed_item - look up the delayed item by key
332 * @delayed_node: pointer to the delayed node
333 * @key: the key to look up
334 * @prev: used to store the prev item if the right item isn't found
335 * @next: used to store the next item if the right item isn't found
337 * Note: if we don't find the right item, we will return the prev item and
340 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
341 struct rb_root *root,
342 struct btrfs_key *key,
343 struct btrfs_delayed_item **prev,
344 struct btrfs_delayed_item **next)
346 struct rb_node *node, *prev_node = NULL;
347 struct btrfs_delayed_item *delayed_item = NULL;
350 node = root->rb_node;
353 delayed_item = rb_entry(node, struct btrfs_delayed_item,
356 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 node = node->rb_right;
360 node = node->rb_left;
369 *prev = delayed_item;
370 else if ((node = rb_prev(prev_node)) != NULL) {
371 *prev = rb_entry(node, struct btrfs_delayed_item,
381 *next = delayed_item;
382 else if ((node = rb_next(prev_node)) != NULL) {
383 *next = rb_entry(node, struct btrfs_delayed_item,
391 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
392 struct btrfs_delayed_node *delayed_node,
393 struct btrfs_key *key)
395 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
399 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
400 struct btrfs_delayed_item *ins,
403 struct rb_node **p, *node;
404 struct rb_node *parent_node = NULL;
405 struct rb_root_cached *root;
406 struct btrfs_delayed_item *item;
408 bool leftmost = true;
410 if (action == BTRFS_DELAYED_INSERTION_ITEM)
411 root = &delayed_node->ins_root;
412 else if (action == BTRFS_DELAYED_DELETION_ITEM)
413 root = &delayed_node->del_root;
416 p = &root->rb_root.rb_node;
417 node = &ins->rb_node;
421 item = rb_entry(parent_node, struct btrfs_delayed_item,
424 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
428 } else if (cmp > 0) {
435 rb_link_node(node, parent_node, p);
436 rb_insert_color_cached(node, root, leftmost);
437 ins->delayed_node = delayed_node;
438 ins->ins_or_del = action;
440 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
441 action == BTRFS_DELAYED_INSERTION_ITEM &&
442 ins->key.offset >= delayed_node->index_cnt)
443 delayed_node->index_cnt = ins->key.offset + 1;
445 delayed_node->count++;
446 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
450 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
451 struct btrfs_delayed_item *item)
453 return __btrfs_add_delayed_item(node, item,
454 BTRFS_DELAYED_INSERTION_ITEM);
457 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
458 struct btrfs_delayed_item *item)
460 return __btrfs_add_delayed_item(node, item,
461 BTRFS_DELAYED_DELETION_ITEM);
464 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466 int seq = atomic_inc_return(&delayed_root->items_seq);
468 /* atomic_dec_return implies a barrier */
469 if ((atomic_dec_return(&delayed_root->items) <
470 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
471 cond_wake_up_nomb(&delayed_root->wait);
474 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476 struct rb_root_cached *root;
477 struct btrfs_delayed_root *delayed_root;
479 /* Not associated with any delayed_node */
480 if (!delayed_item->delayed_node)
482 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484 BUG_ON(!delayed_root);
485 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
486 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
489 root = &delayed_item->delayed_node->ins_root;
491 root = &delayed_item->delayed_node->del_root;
493 rb_erase_cached(&delayed_item->rb_node, root);
494 delayed_item->delayed_node->count--;
496 finish_one_item(delayed_root);
499 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
502 __btrfs_remove_delayed_item(item);
503 if (refcount_dec_and_test(&item->refs))
508 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
509 struct btrfs_delayed_node *delayed_node)
512 struct btrfs_delayed_item *item = NULL;
514 p = rb_first_cached(&delayed_node->ins_root);
516 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
521 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
522 struct btrfs_delayed_node *delayed_node)
525 struct btrfs_delayed_item *item = NULL;
527 p = rb_first_cached(&delayed_node->del_root);
529 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
534 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
535 struct btrfs_delayed_item *item)
538 struct btrfs_delayed_item *next = NULL;
540 p = rb_next(&item->rb_node);
542 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
547 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
548 struct btrfs_root *root,
549 struct btrfs_delayed_item *item)
551 struct btrfs_block_rsv *src_rsv;
552 struct btrfs_block_rsv *dst_rsv;
553 struct btrfs_fs_info *fs_info = root->fs_info;
557 if (!trans->bytes_reserved)
560 src_rsv = trans->block_rsv;
561 dst_rsv = &fs_info->delayed_block_rsv;
563 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
566 * Here we migrate space rsv from transaction rsv, since have already
567 * reserved space when starting a transaction. So no need to reserve
570 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
572 trace_btrfs_space_reservation(fs_info, "delayed_item",
575 item->bytes_reserved = num_bytes;
581 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
582 struct btrfs_delayed_item *item)
584 struct btrfs_block_rsv *rsv;
585 struct btrfs_fs_info *fs_info = root->fs_info;
587 if (!item->bytes_reserved)
590 rsv = &fs_info->delayed_block_rsv;
592 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
593 * to release/reserve qgroup space.
595 trace_btrfs_space_reservation(fs_info, "delayed_item",
596 item->key.objectid, item->bytes_reserved,
598 btrfs_block_rsv_release(fs_info, rsv,
599 item->bytes_reserved);
602 static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 struct btrfs_inode *inode,
606 struct btrfs_delayed_node *node)
608 struct btrfs_fs_info *fs_info = root->fs_info;
609 struct btrfs_block_rsv *src_rsv;
610 struct btrfs_block_rsv *dst_rsv;
614 src_rsv = trans->block_rsv;
615 dst_rsv = &fs_info->delayed_block_rsv;
617 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
620 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 * which doesn't reserve space for speed. This is a problem since we
622 * still need to reserve space for this update, so try to reserve the
625 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626 * we always reserve enough to update the inode item.
628 if (!src_rsv || (!trans->bytes_reserved &&
629 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
631 BTRFS_QGROUP_RSV_META_PREALLOC, true);
634 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
635 BTRFS_RESERVE_NO_FLUSH);
637 * Since we're under a transaction reserve_metadata_bytes could
638 * try to commit the transaction which will make it return
639 * EAGAIN to make us stop the transaction we have, so return
640 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
642 if (ret == -EAGAIN) {
644 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
647 node->bytes_reserved = num_bytes;
648 trace_btrfs_space_reservation(fs_info,
653 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
658 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
660 trace_btrfs_space_reservation(fs_info, "delayed_inode",
661 btrfs_ino(inode), num_bytes, 1);
662 node->bytes_reserved = num_bytes;
668 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
669 struct btrfs_delayed_node *node,
672 struct btrfs_block_rsv *rsv;
674 if (!node->bytes_reserved)
677 rsv = &fs_info->delayed_block_rsv;
678 trace_btrfs_space_reservation(fs_info, "delayed_inode",
679 node->inode_id, node->bytes_reserved, 0);
680 btrfs_block_rsv_release(fs_info, rsv,
681 node->bytes_reserved);
683 btrfs_qgroup_free_meta_prealloc(node->root,
684 node->bytes_reserved);
686 btrfs_qgroup_convert_reserved_meta(node->root,
687 node->bytes_reserved);
688 node->bytes_reserved = 0;
692 * This helper will insert some continuous items into the same leaf according
693 * to the free space of the leaf.
695 static int btrfs_batch_insert_items(struct btrfs_root *root,
696 struct btrfs_path *path,
697 struct btrfs_delayed_item *item)
699 struct btrfs_delayed_item *curr, *next;
701 int total_data_size = 0, total_size = 0;
702 struct extent_buffer *leaf;
704 struct btrfs_key *keys;
706 struct list_head head;
712 BUG_ON(!path->nodes[0]);
714 leaf = path->nodes[0];
715 free_space = btrfs_leaf_free_space(leaf);
716 INIT_LIST_HEAD(&head);
722 * count the number of the continuous items that we can insert in batch
724 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
726 total_data_size += next->data_len;
727 total_size += next->data_len + sizeof(struct btrfs_item);
728 list_add_tail(&next->tree_list, &head);
732 next = __btrfs_next_delayed_item(curr);
736 if (!btrfs_is_continuous_delayed_item(curr, next))
746 * we need allocate some memory space, but it might cause the task
747 * to sleep, so we set all locked nodes in the path to blocking locks
750 btrfs_set_path_blocking(path);
752 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
758 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
764 /* get keys of all the delayed items */
766 list_for_each_entry(next, &head, tree_list) {
768 data_size[i] = next->data_len;
772 /* insert the keys of the items */
773 setup_items_for_insert(root, path, keys, data_size,
774 total_data_size, total_size, nitems);
776 /* insert the dir index items */
777 slot = path->slots[0];
778 list_for_each_entry_safe(curr, next, &head, tree_list) {
779 data_ptr = btrfs_item_ptr(leaf, slot, char);
780 write_extent_buffer(leaf, &curr->data,
781 (unsigned long)data_ptr,
785 btrfs_delayed_item_release_metadata(root, curr);
787 list_del(&curr->tree_list);
788 btrfs_release_delayed_item(curr);
799 * This helper can just do simple insertion that needn't extend item for new
800 * data, such as directory name index insertion, inode insertion.
802 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
803 struct btrfs_root *root,
804 struct btrfs_path *path,
805 struct btrfs_delayed_item *delayed_item)
807 struct extent_buffer *leaf;
808 unsigned int nofs_flag;
812 nofs_flag = memalloc_nofs_save();
813 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
814 delayed_item->data_len);
815 memalloc_nofs_restore(nofs_flag);
816 if (ret < 0 && ret != -EEXIST)
819 leaf = path->nodes[0];
821 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
823 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
824 delayed_item->data_len);
825 btrfs_mark_buffer_dirty(leaf);
827 btrfs_delayed_item_release_metadata(root, delayed_item);
832 * we insert an item first, then if there are some continuous items, we try
833 * to insert those items into the same leaf.
835 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
836 struct btrfs_path *path,
837 struct btrfs_root *root,
838 struct btrfs_delayed_node *node)
840 struct btrfs_delayed_item *curr, *prev;
844 mutex_lock(&node->mutex);
845 curr = __btrfs_first_delayed_insertion_item(node);
849 ret = btrfs_insert_delayed_item(trans, root, path, curr);
851 btrfs_release_path(path);
856 curr = __btrfs_next_delayed_item(prev);
857 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
858 /* insert the continuous items into the same leaf */
860 btrfs_batch_insert_items(root, path, curr);
862 btrfs_release_delayed_item(prev);
863 btrfs_mark_buffer_dirty(path->nodes[0]);
865 btrfs_release_path(path);
866 mutex_unlock(&node->mutex);
870 mutex_unlock(&node->mutex);
874 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
875 struct btrfs_root *root,
876 struct btrfs_path *path,
877 struct btrfs_delayed_item *item)
879 struct btrfs_delayed_item *curr, *next;
880 struct extent_buffer *leaf;
881 struct btrfs_key key;
882 struct list_head head;
883 int nitems, i, last_item;
886 BUG_ON(!path->nodes[0]);
888 leaf = path->nodes[0];
891 last_item = btrfs_header_nritems(leaf) - 1;
893 return -ENOENT; /* FIXME: Is errno suitable? */
896 INIT_LIST_HEAD(&head);
897 btrfs_item_key_to_cpu(leaf, &key, i);
900 * count the number of the dir index items that we can delete in batch
902 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
903 list_add_tail(&next->tree_list, &head);
907 next = __btrfs_next_delayed_item(curr);
911 if (!btrfs_is_continuous_delayed_item(curr, next))
917 btrfs_item_key_to_cpu(leaf, &key, i);
923 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
927 list_for_each_entry_safe(curr, next, &head, tree_list) {
928 btrfs_delayed_item_release_metadata(root, curr);
929 list_del(&curr->tree_list);
930 btrfs_release_delayed_item(curr);
937 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
938 struct btrfs_path *path,
939 struct btrfs_root *root,
940 struct btrfs_delayed_node *node)
942 struct btrfs_delayed_item *curr, *prev;
943 unsigned int nofs_flag;
947 mutex_lock(&node->mutex);
948 curr = __btrfs_first_delayed_deletion_item(node);
952 nofs_flag = memalloc_nofs_save();
953 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
954 memalloc_nofs_restore(nofs_flag);
959 * can't find the item which the node points to, so this node
960 * is invalid, just drop it.
963 curr = __btrfs_next_delayed_item(prev);
964 btrfs_release_delayed_item(prev);
966 btrfs_release_path(path);
968 mutex_unlock(&node->mutex);
974 btrfs_batch_delete_items(trans, root, path, curr);
975 btrfs_release_path(path);
976 mutex_unlock(&node->mutex);
980 btrfs_release_path(path);
981 mutex_unlock(&node->mutex);
985 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
987 struct btrfs_delayed_root *delayed_root;
990 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
991 BUG_ON(!delayed_node->root);
992 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
993 delayed_node->count--;
995 delayed_root = delayed_node->root->fs_info->delayed_root;
996 finish_one_item(delayed_root);
1000 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1002 struct btrfs_delayed_root *delayed_root;
1004 ASSERT(delayed_node->root);
1005 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1006 delayed_node->count--;
1008 delayed_root = delayed_node->root->fs_info->delayed_root;
1009 finish_one_item(delayed_root);
1012 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1013 struct btrfs_root *root,
1014 struct btrfs_path *path,
1015 struct btrfs_delayed_node *node)
1017 struct btrfs_fs_info *fs_info = root->fs_info;
1018 struct btrfs_key key;
1019 struct btrfs_inode_item *inode_item;
1020 struct extent_buffer *leaf;
1021 unsigned int nofs_flag;
1025 key.objectid = node->inode_id;
1026 key.type = BTRFS_INODE_ITEM_KEY;
1029 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1034 nofs_flag = memalloc_nofs_save();
1035 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1036 memalloc_nofs_restore(nofs_flag);
1042 leaf = path->nodes[0];
1043 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044 struct btrfs_inode_item);
1045 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046 sizeof(struct btrfs_inode_item));
1047 btrfs_mark_buffer_dirty(leaf);
1049 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1053 if (path->slots[0] >= btrfs_header_nritems(leaf))
1056 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 if (key.objectid != node->inode_id)
1060 if (key.type != BTRFS_INODE_REF_KEY &&
1061 key.type != BTRFS_INODE_EXTREF_KEY)
1065 * Delayed iref deletion is for the inode who has only one link,
1066 * so there is only one iref. The case that several irefs are
1067 * in the same item doesn't exist.
1069 btrfs_del_item(trans, root, path);
1071 btrfs_release_delayed_iref(node);
1073 btrfs_release_path(path);
1075 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1076 btrfs_release_delayed_inode(node);
1079 * If we fail to update the delayed inode we need to abort the
1080 * transaction, because we could leave the inode with the improper
1083 if (ret && ret != -ENOENT)
1084 btrfs_abort_transaction(trans, ret);
1089 btrfs_release_path(path);
1091 key.type = BTRFS_INODE_EXTREF_KEY;
1094 nofs_flag = memalloc_nofs_save();
1095 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096 memalloc_nofs_restore(nofs_flag);
1102 leaf = path->nodes[0];
1107 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1108 struct btrfs_root *root,
1109 struct btrfs_path *path,
1110 struct btrfs_delayed_node *node)
1114 mutex_lock(&node->mutex);
1115 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1116 mutex_unlock(&node->mutex);
1120 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1121 mutex_unlock(&node->mutex);
1126 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1127 struct btrfs_path *path,
1128 struct btrfs_delayed_node *node)
1132 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1136 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1140 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1145 * Called when committing the transaction.
1146 * Returns 0 on success.
1147 * Returns < 0 on error and returns with an aborted transaction with any
1148 * outstanding delayed items cleaned up.
1150 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1152 struct btrfs_fs_info *fs_info = trans->fs_info;
1153 struct btrfs_delayed_root *delayed_root;
1154 struct btrfs_delayed_node *curr_node, *prev_node;
1155 struct btrfs_path *path;
1156 struct btrfs_block_rsv *block_rsv;
1158 bool count = (nr > 0);
1160 if (TRANS_ABORTED(trans))
1163 path = btrfs_alloc_path();
1166 path->leave_spinning = 1;
1168 block_rsv = trans->block_rsv;
1169 trans->block_rsv = &fs_info->delayed_block_rsv;
1171 delayed_root = fs_info->delayed_root;
1173 curr_node = btrfs_first_delayed_node(delayed_root);
1174 while (curr_node && (!count || (count && nr--))) {
1175 ret = __btrfs_commit_inode_delayed_items(trans, path,
1178 btrfs_abort_transaction(trans, ret);
1182 prev_node = curr_node;
1183 curr_node = btrfs_next_delayed_node(curr_node);
1185 * See the comment below about releasing path before releasing
1186 * node. If the commit of delayed items was successful the path
1187 * should always be released, but in case of an error, it may
1188 * point to locked extent buffers (a leaf at the very least).
1190 ASSERT(path->nodes[0] == NULL);
1191 btrfs_release_delayed_node(prev_node);
1195 * Release the path to avoid a potential deadlock and lockdep splat when
1196 * releasing the delayed node, as that requires taking the delayed node's
1197 * mutex. If another task starts running delayed items before we take
1198 * the mutex, it will first lock the mutex and then it may try to lock
1199 * the same btree path (leaf).
1201 btrfs_free_path(path);
1204 btrfs_release_delayed_node(curr_node);
1205 trans->block_rsv = block_rsv;
1210 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1212 return __btrfs_run_delayed_items(trans, -1);
1215 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1217 return __btrfs_run_delayed_items(trans, nr);
1220 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1221 struct btrfs_inode *inode)
1223 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1224 struct btrfs_path *path;
1225 struct btrfs_block_rsv *block_rsv;
1231 mutex_lock(&delayed_node->mutex);
1232 if (!delayed_node->count) {
1233 mutex_unlock(&delayed_node->mutex);
1234 btrfs_release_delayed_node(delayed_node);
1237 mutex_unlock(&delayed_node->mutex);
1239 path = btrfs_alloc_path();
1241 btrfs_release_delayed_node(delayed_node);
1244 path->leave_spinning = 1;
1246 block_rsv = trans->block_rsv;
1247 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1249 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1251 btrfs_release_delayed_node(delayed_node);
1252 btrfs_free_path(path);
1253 trans->block_rsv = block_rsv;
1258 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1260 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1261 struct btrfs_trans_handle *trans;
1262 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1263 struct btrfs_path *path;
1264 struct btrfs_block_rsv *block_rsv;
1270 mutex_lock(&delayed_node->mutex);
1271 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1272 mutex_unlock(&delayed_node->mutex);
1273 btrfs_release_delayed_node(delayed_node);
1276 mutex_unlock(&delayed_node->mutex);
1278 trans = btrfs_join_transaction(delayed_node->root);
1279 if (IS_ERR(trans)) {
1280 ret = PTR_ERR(trans);
1284 path = btrfs_alloc_path();
1289 path->leave_spinning = 1;
1291 block_rsv = trans->block_rsv;
1292 trans->block_rsv = &fs_info->delayed_block_rsv;
1294 mutex_lock(&delayed_node->mutex);
1295 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1296 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1297 path, delayed_node);
1300 mutex_unlock(&delayed_node->mutex);
1302 btrfs_free_path(path);
1303 trans->block_rsv = block_rsv;
1305 btrfs_end_transaction(trans);
1306 btrfs_btree_balance_dirty(fs_info);
1308 btrfs_release_delayed_node(delayed_node);
1313 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1315 struct btrfs_delayed_node *delayed_node;
1317 delayed_node = READ_ONCE(inode->delayed_node);
1321 inode->delayed_node = NULL;
1322 btrfs_release_delayed_node(delayed_node);
1325 struct btrfs_async_delayed_work {
1326 struct btrfs_delayed_root *delayed_root;
1328 struct btrfs_work work;
1331 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1333 struct btrfs_async_delayed_work *async_work;
1334 struct btrfs_delayed_root *delayed_root;
1335 struct btrfs_trans_handle *trans;
1336 struct btrfs_path *path;
1337 struct btrfs_delayed_node *delayed_node = NULL;
1338 struct btrfs_root *root;
1339 struct btrfs_block_rsv *block_rsv;
1342 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1343 delayed_root = async_work->delayed_root;
1345 path = btrfs_alloc_path();
1350 if (atomic_read(&delayed_root->items) <
1351 BTRFS_DELAYED_BACKGROUND / 2)
1354 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1358 path->leave_spinning = 1;
1359 root = delayed_node->root;
1361 trans = btrfs_join_transaction(root);
1362 if (IS_ERR(trans)) {
1363 btrfs_release_path(path);
1364 btrfs_release_prepared_delayed_node(delayed_node);
1369 block_rsv = trans->block_rsv;
1370 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1372 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1374 trans->block_rsv = block_rsv;
1375 btrfs_end_transaction(trans);
1376 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1378 btrfs_release_path(path);
1379 btrfs_release_prepared_delayed_node(delayed_node);
1382 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1383 || total_done < async_work->nr);
1385 btrfs_free_path(path);
1387 wake_up(&delayed_root->wait);
1392 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1393 struct btrfs_fs_info *fs_info, int nr)
1395 struct btrfs_async_delayed_work *async_work;
1397 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1401 async_work->delayed_root = delayed_root;
1402 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1404 async_work->nr = nr;
1406 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1410 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1412 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1415 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1417 int val = atomic_read(&delayed_root->items_seq);
1419 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1422 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1428 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1430 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1432 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1433 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1436 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1440 seq = atomic_read(&delayed_root->items_seq);
1442 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1446 wait_event_interruptible(delayed_root->wait,
1447 could_end_wait(delayed_root, seq));
1451 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1454 /* Will return 0 or -ENOMEM */
1455 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1456 const char *name, int name_len,
1457 struct btrfs_inode *dir,
1458 struct btrfs_disk_key *disk_key, u8 type,
1461 struct btrfs_delayed_node *delayed_node;
1462 struct btrfs_delayed_item *delayed_item;
1463 struct btrfs_dir_item *dir_item;
1466 delayed_node = btrfs_get_or_create_delayed_node(dir);
1467 if (IS_ERR(delayed_node))
1468 return PTR_ERR(delayed_node);
1470 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1471 if (!delayed_item) {
1476 delayed_item->key.objectid = btrfs_ino(dir);
1477 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1478 delayed_item->key.offset = index;
1480 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1481 dir_item->location = *disk_key;
1482 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1483 btrfs_set_stack_dir_data_len(dir_item, 0);
1484 btrfs_set_stack_dir_name_len(dir_item, name_len);
1485 btrfs_set_stack_dir_type(dir_item, type);
1486 memcpy((char *)(dir_item + 1), name, name_len);
1488 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1490 * we have reserved enough space when we start a new transaction,
1491 * so reserving metadata failure is impossible
1495 mutex_lock(&delayed_node->mutex);
1496 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1497 if (unlikely(ret)) {
1498 btrfs_err(trans->fs_info,
1499 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1500 name_len, name, delayed_node->root->root_key.objectid,
1501 delayed_node->inode_id, ret);
1504 mutex_unlock(&delayed_node->mutex);
1507 btrfs_release_delayed_node(delayed_node);
1511 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1512 struct btrfs_delayed_node *node,
1513 struct btrfs_key *key)
1515 struct btrfs_delayed_item *item;
1517 mutex_lock(&node->mutex);
1518 item = __btrfs_lookup_delayed_insertion_item(node, key);
1520 mutex_unlock(&node->mutex);
1524 btrfs_delayed_item_release_metadata(node->root, item);
1525 btrfs_release_delayed_item(item);
1526 mutex_unlock(&node->mutex);
1530 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1531 struct btrfs_inode *dir, u64 index)
1533 struct btrfs_delayed_node *node;
1534 struct btrfs_delayed_item *item;
1535 struct btrfs_key item_key;
1538 node = btrfs_get_or_create_delayed_node(dir);
1540 return PTR_ERR(node);
1542 item_key.objectid = btrfs_ino(dir);
1543 item_key.type = BTRFS_DIR_INDEX_KEY;
1544 item_key.offset = index;
1546 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1551 item = btrfs_alloc_delayed_item(0);
1557 item->key = item_key;
1559 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1561 * we have reserved enough space when we start a new transaction,
1562 * so reserving metadata failure is impossible.
1565 btrfs_err(trans->fs_info,
1566 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1567 btrfs_release_delayed_item(item);
1571 mutex_lock(&node->mutex);
1572 ret = __btrfs_add_delayed_deletion_item(node, item);
1573 if (unlikely(ret)) {
1574 btrfs_err(trans->fs_info,
1575 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1576 index, node->root->root_key.objectid,
1577 node->inode_id, ret);
1578 btrfs_delayed_item_release_metadata(dir->root, item);
1579 btrfs_release_delayed_item(item);
1581 mutex_unlock(&node->mutex);
1583 btrfs_release_delayed_node(node);
1587 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1589 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1595 * Since we have held i_mutex of this directory, it is impossible that
1596 * a new directory index is added into the delayed node and index_cnt
1597 * is updated now. So we needn't lock the delayed node.
1599 if (!delayed_node->index_cnt) {
1600 btrfs_release_delayed_node(delayed_node);
1604 inode->index_cnt = delayed_node->index_cnt;
1605 btrfs_release_delayed_node(delayed_node);
1609 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1610 struct list_head *ins_list,
1611 struct list_head *del_list)
1613 struct btrfs_delayed_node *delayed_node;
1614 struct btrfs_delayed_item *item;
1616 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1621 * We can only do one readdir with delayed items at a time because of
1622 * item->readdir_list.
1624 inode_unlock_shared(inode);
1627 mutex_lock(&delayed_node->mutex);
1628 item = __btrfs_first_delayed_insertion_item(delayed_node);
1630 refcount_inc(&item->refs);
1631 list_add_tail(&item->readdir_list, ins_list);
1632 item = __btrfs_next_delayed_item(item);
1635 item = __btrfs_first_delayed_deletion_item(delayed_node);
1637 refcount_inc(&item->refs);
1638 list_add_tail(&item->readdir_list, del_list);
1639 item = __btrfs_next_delayed_item(item);
1641 mutex_unlock(&delayed_node->mutex);
1643 * This delayed node is still cached in the btrfs inode, so refs
1644 * must be > 1 now, and we needn't check it is going to be freed
1647 * Besides that, this function is used to read dir, we do not
1648 * insert/delete delayed items in this period. So we also needn't
1649 * requeue or dequeue this delayed node.
1651 refcount_dec(&delayed_node->refs);
1656 void btrfs_readdir_put_delayed_items(struct inode *inode,
1657 struct list_head *ins_list,
1658 struct list_head *del_list)
1660 struct btrfs_delayed_item *curr, *next;
1662 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1663 list_del(&curr->readdir_list);
1664 if (refcount_dec_and_test(&curr->refs))
1668 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1669 list_del(&curr->readdir_list);
1670 if (refcount_dec_and_test(&curr->refs))
1675 * The VFS is going to do up_read(), so we need to downgrade back to a
1678 downgrade_write(&inode->i_rwsem);
1681 int btrfs_should_delete_dir_index(struct list_head *del_list,
1684 struct btrfs_delayed_item *curr;
1687 list_for_each_entry(curr, del_list, readdir_list) {
1688 if (curr->key.offset > index)
1690 if (curr->key.offset == index) {
1699 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1702 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1703 struct list_head *ins_list)
1705 struct btrfs_dir_item *di;
1706 struct btrfs_delayed_item *curr, *next;
1707 struct btrfs_key location;
1711 unsigned char d_type;
1713 if (list_empty(ins_list))
1717 * Changing the data of the delayed item is impossible. So
1718 * we needn't lock them. And we have held i_mutex of the
1719 * directory, nobody can delete any directory indexes now.
1721 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1722 list_del(&curr->readdir_list);
1724 if (curr->key.offset < ctx->pos) {
1725 if (refcount_dec_and_test(&curr->refs))
1730 ctx->pos = curr->key.offset;
1732 di = (struct btrfs_dir_item *)curr->data;
1733 name = (char *)(di + 1);
1734 name_len = btrfs_stack_dir_name_len(di);
1736 d_type = fs_ftype_to_dtype(di->type);
1737 btrfs_disk_key_to_cpu(&location, &di->location);
1739 over = !dir_emit(ctx, name, name_len,
1740 location.objectid, d_type);
1742 if (refcount_dec_and_test(&curr->refs))
1752 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1753 struct btrfs_inode_item *inode_item,
1754 struct inode *inode)
1756 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1757 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1758 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1759 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1760 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1761 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1762 btrfs_set_stack_inode_generation(inode_item,
1763 BTRFS_I(inode)->generation);
1764 btrfs_set_stack_inode_sequence(inode_item,
1765 inode_peek_iversion(inode));
1766 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1767 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1768 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1769 btrfs_set_stack_inode_block_group(inode_item, 0);
1771 btrfs_set_stack_timespec_sec(&inode_item->atime,
1772 inode->i_atime.tv_sec);
1773 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1774 inode->i_atime.tv_nsec);
1776 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1777 inode->i_mtime.tv_sec);
1778 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1779 inode->i_mtime.tv_nsec);
1781 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1782 inode->i_ctime.tv_sec);
1783 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1784 inode->i_ctime.tv_nsec);
1786 btrfs_set_stack_timespec_sec(&inode_item->otime,
1787 BTRFS_I(inode)->i_otime.tv_sec);
1788 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1789 BTRFS_I(inode)->i_otime.tv_nsec);
1792 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1794 struct btrfs_delayed_node *delayed_node;
1795 struct btrfs_inode_item *inode_item;
1797 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1801 mutex_lock(&delayed_node->mutex);
1802 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1803 mutex_unlock(&delayed_node->mutex);
1804 btrfs_release_delayed_node(delayed_node);
1808 inode_item = &delayed_node->inode_item;
1810 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1811 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1812 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1813 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1814 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1815 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1816 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1817 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1819 inode_set_iversion_queried(inode,
1820 btrfs_stack_inode_sequence(inode_item));
1822 *rdev = btrfs_stack_inode_rdev(inode_item);
1823 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1825 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1826 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1828 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1829 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1831 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1832 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1834 BTRFS_I(inode)->i_otime.tv_sec =
1835 btrfs_stack_timespec_sec(&inode_item->otime);
1836 BTRFS_I(inode)->i_otime.tv_nsec =
1837 btrfs_stack_timespec_nsec(&inode_item->otime);
1839 inode->i_generation = BTRFS_I(inode)->generation;
1840 BTRFS_I(inode)->index_cnt = (u64)-1;
1842 mutex_unlock(&delayed_node->mutex);
1843 btrfs_release_delayed_node(delayed_node);
1847 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1848 struct btrfs_root *root, struct inode *inode)
1850 struct btrfs_delayed_node *delayed_node;
1853 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1854 if (IS_ERR(delayed_node))
1855 return PTR_ERR(delayed_node);
1857 mutex_lock(&delayed_node->mutex);
1858 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1859 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1863 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1868 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1869 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1870 delayed_node->count++;
1871 atomic_inc(&root->fs_info->delayed_root->items);
1873 mutex_unlock(&delayed_node->mutex);
1874 btrfs_release_delayed_node(delayed_node);
1878 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1880 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1881 struct btrfs_delayed_node *delayed_node;
1884 * we don't do delayed inode updates during log recovery because it
1885 * leads to enospc problems. This means we also can't do
1886 * delayed inode refs
1888 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1891 delayed_node = btrfs_get_or_create_delayed_node(inode);
1892 if (IS_ERR(delayed_node))
1893 return PTR_ERR(delayed_node);
1896 * We don't reserve space for inode ref deletion is because:
1897 * - We ONLY do async inode ref deletion for the inode who has only
1898 * one link(i_nlink == 1), it means there is only one inode ref.
1899 * And in most case, the inode ref and the inode item are in the
1900 * same leaf, and we will deal with them at the same time.
1901 * Since we are sure we will reserve the space for the inode item,
1902 * it is unnecessary to reserve space for inode ref deletion.
1903 * - If the inode ref and the inode item are not in the same leaf,
1904 * We also needn't worry about enospc problem, because we reserve
1905 * much more space for the inode update than it needs.
1906 * - At the worst, we can steal some space from the global reservation.
1909 mutex_lock(&delayed_node->mutex);
1910 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1913 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1914 delayed_node->count++;
1915 atomic_inc(&fs_info->delayed_root->items);
1917 mutex_unlock(&delayed_node->mutex);
1918 btrfs_release_delayed_node(delayed_node);
1922 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1924 struct btrfs_root *root = delayed_node->root;
1925 struct btrfs_fs_info *fs_info = root->fs_info;
1926 struct btrfs_delayed_item *curr_item, *prev_item;
1928 mutex_lock(&delayed_node->mutex);
1929 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1931 btrfs_delayed_item_release_metadata(root, curr_item);
1932 prev_item = curr_item;
1933 curr_item = __btrfs_next_delayed_item(prev_item);
1934 btrfs_release_delayed_item(prev_item);
1937 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1939 btrfs_delayed_item_release_metadata(root, curr_item);
1940 prev_item = curr_item;
1941 curr_item = __btrfs_next_delayed_item(prev_item);
1942 btrfs_release_delayed_item(prev_item);
1945 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1946 btrfs_release_delayed_iref(delayed_node);
1948 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1949 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1950 btrfs_release_delayed_inode(delayed_node);
1952 mutex_unlock(&delayed_node->mutex);
1955 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1957 struct btrfs_delayed_node *delayed_node;
1959 delayed_node = btrfs_get_delayed_node(inode);
1963 __btrfs_kill_delayed_node(delayed_node);
1964 btrfs_release_delayed_node(delayed_node);
1967 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1970 struct btrfs_delayed_node *delayed_nodes[8];
1974 spin_lock(&root->inode_lock);
1975 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1976 (void **)delayed_nodes, inode_id,
1977 ARRAY_SIZE(delayed_nodes));
1979 spin_unlock(&root->inode_lock);
1983 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1984 for (i = 0; i < n; i++) {
1986 * Don't increase refs in case the node is dead and
1987 * about to be removed from the tree in the loop below
1989 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1990 delayed_nodes[i] = NULL;
1992 spin_unlock(&root->inode_lock);
1994 for (i = 0; i < n; i++) {
1995 if (!delayed_nodes[i])
1997 __btrfs_kill_delayed_node(delayed_nodes[i]);
1998 btrfs_release_delayed_node(delayed_nodes[i]);
2003 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2005 struct btrfs_delayed_node *curr_node, *prev_node;
2007 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2009 __btrfs_kill_delayed_node(curr_node);
2011 prev_node = curr_node;
2012 curr_node = btrfs_next_delayed_node(curr_node);
2013 btrfs_release_delayed_node(prev_node);