1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "delayed-inode.h"
12 #include "transaction.h"
16 #define BTRFS_DELAYED_WRITEBACK 512
17 #define BTRFS_DELAYED_BACKGROUND 128
18 #define BTRFS_DELAYED_BATCH 16
20 static struct kmem_cache *delayed_node_cache;
22 int __init btrfs_delayed_inode_init(void)
24 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
25 sizeof(struct btrfs_delayed_node),
29 if (!delayed_node_cache)
34 void __cold btrfs_delayed_inode_exit(void)
36 kmem_cache_destroy(delayed_node_cache);
39 static inline void btrfs_init_delayed_node(
40 struct btrfs_delayed_node *delayed_node,
41 struct btrfs_root *root, u64 inode_id)
43 delayed_node->root = root;
44 delayed_node->inode_id = inode_id;
45 refcount_set(&delayed_node->refs, 0);
46 delayed_node->ins_root = RB_ROOT;
47 delayed_node->del_root = RB_ROOT;
48 mutex_init(&delayed_node->mutex);
49 INIT_LIST_HEAD(&delayed_node->n_list);
50 INIT_LIST_HEAD(&delayed_node->p_list);
53 static inline int btrfs_is_continuous_delayed_item(
54 struct btrfs_delayed_item *item1,
55 struct btrfs_delayed_item *item2)
57 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
58 item1->key.objectid == item2->key.objectid &&
59 item1->key.type == item2->key.type &&
60 item1->key.offset + 1 == item2->key.offset)
65 static struct btrfs_delayed_node *btrfs_get_delayed_node(
66 struct btrfs_inode *btrfs_inode)
68 struct btrfs_root *root = btrfs_inode->root;
69 u64 ino = btrfs_ino(btrfs_inode);
70 struct btrfs_delayed_node *node;
72 node = READ_ONCE(btrfs_inode->delayed_node);
74 refcount_inc(&node->refs);
78 spin_lock(&root->inode_lock);
79 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
82 if (btrfs_inode->delayed_node) {
83 refcount_inc(&node->refs); /* can be accessed */
84 BUG_ON(btrfs_inode->delayed_node != node);
85 spin_unlock(&root->inode_lock);
90 * It's possible that we're racing into the middle of removing
91 * this node from the radix tree. In this case, the refcount
92 * was zero and it should never go back to one. Just return
93 * NULL like it was never in the radix at all; our release
94 * function is in the process of removing it.
96 * Some implementations of refcount_inc refuse to bump the
97 * refcount once it has hit zero. If we don't do this dance
98 * here, refcount_inc() may decide to just WARN_ONCE() instead
99 * of actually bumping the refcount.
101 * If this node is properly in the radix, we want to bump the
102 * refcount twice, once for the inode and once for this get
105 if (refcount_inc_not_zero(&node->refs)) {
106 refcount_inc(&node->refs);
107 btrfs_inode->delayed_node = node;
112 spin_unlock(&root->inode_lock);
115 spin_unlock(&root->inode_lock);
120 /* Will return either the node or PTR_ERR(-ENOMEM) */
121 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
122 struct btrfs_inode *btrfs_inode)
124 struct btrfs_delayed_node *node;
125 struct btrfs_root *root = btrfs_inode->root;
126 u64 ino = btrfs_ino(btrfs_inode);
130 node = btrfs_get_delayed_node(btrfs_inode);
134 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
136 return ERR_PTR(-ENOMEM);
137 btrfs_init_delayed_node(node, root, ino);
139 /* cached in the btrfs inode and can be accessed */
140 refcount_set(&node->refs, 2);
142 ret = radix_tree_preload(GFP_NOFS);
144 kmem_cache_free(delayed_node_cache, node);
148 spin_lock(&root->inode_lock);
149 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
150 if (ret == -EEXIST) {
151 spin_unlock(&root->inode_lock);
152 kmem_cache_free(delayed_node_cache, node);
153 radix_tree_preload_end();
156 btrfs_inode->delayed_node = node;
157 spin_unlock(&root->inode_lock);
158 radix_tree_preload_end();
164 * Call it when holding delayed_node->mutex
166 * If mod = 1, add this node into the prepared list.
168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
169 struct btrfs_delayed_node *node,
172 spin_lock(&root->lock);
173 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
174 if (!list_empty(&node->p_list))
175 list_move_tail(&node->p_list, &root->prepare_list);
177 list_add_tail(&node->p_list, &root->prepare_list);
179 list_add_tail(&node->n_list, &root->node_list);
180 list_add_tail(&node->p_list, &root->prepare_list);
181 refcount_inc(&node->refs); /* inserted into list */
183 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
185 spin_unlock(&root->lock);
188 /* Call it when holding delayed_node->mutex */
189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
190 struct btrfs_delayed_node *node)
192 spin_lock(&root->lock);
193 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
195 refcount_dec(&node->refs); /* not in the list */
196 list_del_init(&node->n_list);
197 if (!list_empty(&node->p_list))
198 list_del_init(&node->p_list);
199 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
201 spin_unlock(&root->lock);
204 static struct btrfs_delayed_node *btrfs_first_delayed_node(
205 struct btrfs_delayed_root *delayed_root)
208 struct btrfs_delayed_node *node = NULL;
210 spin_lock(&delayed_root->lock);
211 if (list_empty(&delayed_root->node_list))
214 p = delayed_root->node_list.next;
215 node = list_entry(p, struct btrfs_delayed_node, n_list);
216 refcount_inc(&node->refs);
218 spin_unlock(&delayed_root->lock);
223 static struct btrfs_delayed_node *btrfs_next_delayed_node(
224 struct btrfs_delayed_node *node)
226 struct btrfs_delayed_root *delayed_root;
228 struct btrfs_delayed_node *next = NULL;
230 delayed_root = node->root->fs_info->delayed_root;
231 spin_lock(&delayed_root->lock);
232 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
233 /* not in the list */
234 if (list_empty(&delayed_root->node_list))
236 p = delayed_root->node_list.next;
237 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 p = node->n_list.next;
242 next = list_entry(p, struct btrfs_delayed_node, n_list);
243 refcount_inc(&next->refs);
245 spin_unlock(&delayed_root->lock);
250 static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node *delayed_node,
254 struct btrfs_delayed_root *delayed_root;
259 delayed_root = delayed_node->root->fs_info->delayed_root;
261 mutex_lock(&delayed_node->mutex);
262 if (delayed_node->count)
263 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
266 mutex_unlock(&delayed_node->mutex);
268 if (refcount_dec_and_test(&delayed_node->refs)) {
269 struct btrfs_root *root = delayed_node->root;
271 spin_lock(&root->inode_lock);
273 * Once our refcount goes to zero, nobody is allowed to bump it
274 * back up. We can delete it now.
276 ASSERT(refcount_read(&delayed_node->refs) == 0);
277 radix_tree_delete(&root->delayed_nodes_tree,
278 delayed_node->inode_id);
279 spin_unlock(&root->inode_lock);
280 kmem_cache_free(delayed_node_cache, delayed_node);
284 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
286 __btrfs_release_delayed_node(node, 0);
289 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
290 struct btrfs_delayed_root *delayed_root)
293 struct btrfs_delayed_node *node = NULL;
295 spin_lock(&delayed_root->lock);
296 if (list_empty(&delayed_root->prepare_list))
299 p = delayed_root->prepare_list.next;
301 node = list_entry(p, struct btrfs_delayed_node, p_list);
302 refcount_inc(&node->refs);
304 spin_unlock(&delayed_root->lock);
309 static inline void btrfs_release_prepared_delayed_node(
310 struct btrfs_delayed_node *node)
312 __btrfs_release_delayed_node(node, 1);
315 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
317 struct btrfs_delayed_item *item;
318 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
320 item->data_len = data_len;
321 item->ins_or_del = 0;
322 item->bytes_reserved = 0;
323 item->delayed_node = NULL;
324 refcount_set(&item->refs, 1);
330 * __btrfs_lookup_delayed_item - look up the delayed item by key
331 * @delayed_node: pointer to the delayed node
332 * @key: the key to look up
333 * @prev: used to store the prev item if the right item isn't found
334 * @next: used to store the next item if the right item isn't found
336 * Note: if we don't find the right item, we will return the prev item and
339 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
340 struct rb_root *root,
341 struct btrfs_key *key,
342 struct btrfs_delayed_item **prev,
343 struct btrfs_delayed_item **next)
345 struct rb_node *node, *prev_node = NULL;
346 struct btrfs_delayed_item *delayed_item = NULL;
349 node = root->rb_node;
352 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
357 node = node->rb_right;
359 node = node->rb_left;
368 *prev = delayed_item;
369 else if ((node = rb_prev(prev_node)) != NULL) {
370 *prev = rb_entry(node, struct btrfs_delayed_item,
380 *next = delayed_item;
381 else if ((node = rb_next(prev_node)) != NULL) {
382 *next = rb_entry(node, struct btrfs_delayed_item,
390 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
391 struct btrfs_delayed_node *delayed_node,
392 struct btrfs_key *key)
394 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
399 struct btrfs_delayed_item *ins,
402 struct rb_node **p, *node;
403 struct rb_node *parent_node = NULL;
404 struct rb_root *root;
405 struct btrfs_delayed_item *item;
408 if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 root = &delayed_node->ins_root;
410 else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 root = &delayed_node->del_root;
415 node = &ins->rb_node;
419 item = rb_entry(parent_node, struct btrfs_delayed_item,
422 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
431 rb_link_node(node, parent_node, p);
432 rb_insert_color(node, root);
433 ins->delayed_node = delayed_node;
434 ins->ins_or_del = action;
436 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
437 action == BTRFS_DELAYED_INSERTION_ITEM &&
438 ins->key.offset >= delayed_node->index_cnt)
439 delayed_node->index_cnt = ins->key.offset + 1;
441 delayed_node->count++;
442 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
447 struct btrfs_delayed_item *item)
449 return __btrfs_add_delayed_item(node, item,
450 BTRFS_DELAYED_INSERTION_ITEM);
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
454 struct btrfs_delayed_item *item)
456 return __btrfs_add_delayed_item(node, item,
457 BTRFS_DELAYED_DELETION_ITEM);
460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
462 int seq = atomic_inc_return(&delayed_root->items_seq);
464 /* atomic_dec_return implies a barrier */
465 if ((atomic_dec_return(&delayed_root->items) <
466 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
467 cond_wake_up_nomb(&delayed_root->wait);
470 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
472 struct rb_root *root;
473 struct btrfs_delayed_root *delayed_root;
475 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
477 BUG_ON(!delayed_root);
478 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
479 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
481 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
482 root = &delayed_item->delayed_node->ins_root;
484 root = &delayed_item->delayed_node->del_root;
486 rb_erase(&delayed_item->rb_node, root);
487 delayed_item->delayed_node->count--;
489 finish_one_item(delayed_root);
492 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
495 __btrfs_remove_delayed_item(item);
496 if (refcount_dec_and_test(&item->refs))
501 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
502 struct btrfs_delayed_node *delayed_node)
505 struct btrfs_delayed_item *item = NULL;
507 p = rb_first(&delayed_node->ins_root);
509 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
514 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
515 struct btrfs_delayed_node *delayed_node)
518 struct btrfs_delayed_item *item = NULL;
520 p = rb_first(&delayed_node->del_root);
522 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
527 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
528 struct btrfs_delayed_item *item)
531 struct btrfs_delayed_item *next = NULL;
533 p = rb_next(&item->rb_node);
535 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
540 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_delayed_item *item)
544 struct btrfs_block_rsv *src_rsv;
545 struct btrfs_block_rsv *dst_rsv;
546 struct btrfs_fs_info *fs_info = root->fs_info;
550 if (!trans->bytes_reserved)
553 src_rsv = trans->block_rsv;
554 dst_rsv = &fs_info->delayed_block_rsv;
556 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
559 * Here we migrate space rsv from transaction rsv, since have already
560 * reserved space when starting a transaction. So no need to reserve
563 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
565 trace_btrfs_space_reservation(fs_info, "delayed_item",
568 item->bytes_reserved = num_bytes;
574 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
575 struct btrfs_delayed_item *item)
577 struct btrfs_block_rsv *rsv;
578 struct btrfs_fs_info *fs_info = root->fs_info;
580 if (!item->bytes_reserved)
583 rsv = &fs_info->delayed_block_rsv;
585 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
586 * to release/reserve qgroup space.
588 trace_btrfs_space_reservation(fs_info, "delayed_item",
589 item->key.objectid, item->bytes_reserved,
591 btrfs_block_rsv_release(fs_info, rsv,
592 item->bytes_reserved);
595 static int btrfs_delayed_inode_reserve_metadata(
596 struct btrfs_trans_handle *trans,
597 struct btrfs_root *root,
598 struct btrfs_inode *inode,
599 struct btrfs_delayed_node *node)
601 struct btrfs_fs_info *fs_info = root->fs_info;
602 struct btrfs_block_rsv *src_rsv;
603 struct btrfs_block_rsv *dst_rsv;
607 src_rsv = trans->block_rsv;
608 dst_rsv = &fs_info->delayed_block_rsv;
610 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
613 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
614 * which doesn't reserve space for speed. This is a problem since we
615 * still need to reserve space for this update, so try to reserve the
618 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
619 * we always reserve enough to update the inode item.
621 if (!src_rsv || (!trans->bytes_reserved &&
622 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
623 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
626 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
629 * Since we're under a transaction reserve_metadata_bytes could
630 * try to commit the transaction which will make it return
631 * EAGAIN to make us stop the transaction we have, so return
632 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
634 if (ret == -EAGAIN) {
636 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
639 node->bytes_reserved = num_bytes;
640 trace_btrfs_space_reservation(fs_info,
645 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
650 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
652 trace_btrfs_space_reservation(fs_info, "delayed_inode",
653 btrfs_ino(inode), num_bytes, 1);
654 node->bytes_reserved = num_bytes;
660 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
661 struct btrfs_delayed_node *node,
664 struct btrfs_block_rsv *rsv;
666 if (!node->bytes_reserved)
669 rsv = &fs_info->delayed_block_rsv;
670 trace_btrfs_space_reservation(fs_info, "delayed_inode",
671 node->inode_id, node->bytes_reserved, 0);
672 btrfs_block_rsv_release(fs_info, rsv,
673 node->bytes_reserved);
675 btrfs_qgroup_free_meta_prealloc(node->root,
676 node->bytes_reserved);
678 btrfs_qgroup_convert_reserved_meta(node->root,
679 node->bytes_reserved);
680 node->bytes_reserved = 0;
684 * This helper will insert some continuous items into the same leaf according
685 * to the free space of the leaf.
687 static int btrfs_batch_insert_items(struct btrfs_root *root,
688 struct btrfs_path *path,
689 struct btrfs_delayed_item *item)
691 struct btrfs_fs_info *fs_info = root->fs_info;
692 struct btrfs_delayed_item *curr, *next;
694 int total_data_size = 0, total_size = 0;
695 struct extent_buffer *leaf;
697 struct btrfs_key *keys;
699 struct list_head head;
705 BUG_ON(!path->nodes[0]);
707 leaf = path->nodes[0];
708 free_space = btrfs_leaf_free_space(fs_info, leaf);
709 INIT_LIST_HEAD(&head);
715 * count the number of the continuous items that we can insert in batch
717 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
719 total_data_size += next->data_len;
720 total_size += next->data_len + sizeof(struct btrfs_item);
721 list_add_tail(&next->tree_list, &head);
725 next = __btrfs_next_delayed_item(curr);
729 if (!btrfs_is_continuous_delayed_item(curr, next))
739 * we need allocate some memory space, but it might cause the task
740 * to sleep, so we set all locked nodes in the path to blocking locks
743 btrfs_set_path_blocking(path);
745 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
751 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
757 /* get keys of all the delayed items */
759 list_for_each_entry(next, &head, tree_list) {
761 data_size[i] = next->data_len;
765 /* reset all the locked nodes in the patch to spinning locks. */
766 btrfs_clear_path_blocking(path, NULL, 0);
768 /* insert the keys of the items */
769 setup_items_for_insert(root, path, keys, data_size,
770 total_data_size, total_size, nitems);
772 /* insert the dir index items */
773 slot = path->slots[0];
774 list_for_each_entry_safe(curr, next, &head, tree_list) {
775 data_ptr = btrfs_item_ptr(leaf, slot, char);
776 write_extent_buffer(leaf, &curr->data,
777 (unsigned long)data_ptr,
781 btrfs_delayed_item_release_metadata(root, curr);
783 list_del(&curr->tree_list);
784 btrfs_release_delayed_item(curr);
795 * This helper can just do simple insertion that needn't extend item for new
796 * data, such as directory name index insertion, inode insertion.
798 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 struct btrfs_path *path,
801 struct btrfs_delayed_item *delayed_item)
803 struct extent_buffer *leaf;
804 unsigned int nofs_flag;
808 nofs_flag = memalloc_nofs_save();
809 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
810 delayed_item->data_len);
811 memalloc_nofs_restore(nofs_flag);
812 if (ret < 0 && ret != -EEXIST)
815 leaf = path->nodes[0];
817 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
819 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
820 delayed_item->data_len);
821 btrfs_mark_buffer_dirty(leaf);
823 btrfs_delayed_item_release_metadata(root, delayed_item);
828 * we insert an item first, then if there are some continuous items, we try
829 * to insert those items into the same leaf.
831 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
832 struct btrfs_path *path,
833 struct btrfs_root *root,
834 struct btrfs_delayed_node *node)
836 struct btrfs_delayed_item *curr, *prev;
840 mutex_lock(&node->mutex);
841 curr = __btrfs_first_delayed_insertion_item(node);
845 ret = btrfs_insert_delayed_item(trans, root, path, curr);
847 btrfs_release_path(path);
852 curr = __btrfs_next_delayed_item(prev);
853 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
854 /* insert the continuous items into the same leaf */
856 btrfs_batch_insert_items(root, path, curr);
858 btrfs_release_delayed_item(prev);
859 btrfs_mark_buffer_dirty(path->nodes[0]);
861 btrfs_release_path(path);
862 mutex_unlock(&node->mutex);
866 mutex_unlock(&node->mutex);
870 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
871 struct btrfs_root *root,
872 struct btrfs_path *path,
873 struct btrfs_delayed_item *item)
875 struct btrfs_delayed_item *curr, *next;
876 struct extent_buffer *leaf;
877 struct btrfs_key key;
878 struct list_head head;
879 int nitems, i, last_item;
882 BUG_ON(!path->nodes[0]);
884 leaf = path->nodes[0];
887 last_item = btrfs_header_nritems(leaf) - 1;
889 return -ENOENT; /* FIXME: Is errno suitable? */
892 INIT_LIST_HEAD(&head);
893 btrfs_item_key_to_cpu(leaf, &key, i);
896 * count the number of the dir index items that we can delete in batch
898 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
899 list_add_tail(&next->tree_list, &head);
903 next = __btrfs_next_delayed_item(curr);
907 if (!btrfs_is_continuous_delayed_item(curr, next))
913 btrfs_item_key_to_cpu(leaf, &key, i);
919 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
923 list_for_each_entry_safe(curr, next, &head, tree_list) {
924 btrfs_delayed_item_release_metadata(root, curr);
925 list_del(&curr->tree_list);
926 btrfs_release_delayed_item(curr);
933 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
934 struct btrfs_path *path,
935 struct btrfs_root *root,
936 struct btrfs_delayed_node *node)
938 struct btrfs_delayed_item *curr, *prev;
939 unsigned int nofs_flag;
943 mutex_lock(&node->mutex);
944 curr = __btrfs_first_delayed_deletion_item(node);
948 nofs_flag = memalloc_nofs_save();
949 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
950 memalloc_nofs_restore(nofs_flag);
955 * can't find the item which the node points to, so this node
956 * is invalid, just drop it.
959 curr = __btrfs_next_delayed_item(prev);
960 btrfs_release_delayed_item(prev);
962 btrfs_release_path(path);
964 mutex_unlock(&node->mutex);
970 btrfs_batch_delete_items(trans, root, path, curr);
971 btrfs_release_path(path);
972 mutex_unlock(&node->mutex);
976 btrfs_release_path(path);
977 mutex_unlock(&node->mutex);
981 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
983 struct btrfs_delayed_root *delayed_root;
986 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
987 BUG_ON(!delayed_node->root);
988 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
989 delayed_node->count--;
991 delayed_root = delayed_node->root->fs_info->delayed_root;
992 finish_one_item(delayed_root);
996 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
998 struct btrfs_delayed_root *delayed_root;
1000 ASSERT(delayed_node->root);
1001 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1002 delayed_node->count--;
1004 delayed_root = delayed_node->root->fs_info->delayed_root;
1005 finish_one_item(delayed_root);
1008 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1009 struct btrfs_root *root,
1010 struct btrfs_path *path,
1011 struct btrfs_delayed_node *node)
1013 struct btrfs_fs_info *fs_info = root->fs_info;
1014 struct btrfs_key key;
1015 struct btrfs_inode_item *inode_item;
1016 struct extent_buffer *leaf;
1017 unsigned int nofs_flag;
1021 key.objectid = node->inode_id;
1022 key.type = BTRFS_INODE_ITEM_KEY;
1025 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1030 nofs_flag = memalloc_nofs_save();
1031 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1032 memalloc_nofs_restore(nofs_flag);
1038 leaf = path->nodes[0];
1039 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1040 struct btrfs_inode_item);
1041 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1042 sizeof(struct btrfs_inode_item));
1043 btrfs_mark_buffer_dirty(leaf);
1045 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1049 if (path->slots[0] >= btrfs_header_nritems(leaf))
1052 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1053 if (key.objectid != node->inode_id)
1056 if (key.type != BTRFS_INODE_REF_KEY &&
1057 key.type != BTRFS_INODE_EXTREF_KEY)
1061 * Delayed iref deletion is for the inode who has only one link,
1062 * so there is only one iref. The case that several irefs are
1063 * in the same item doesn't exist.
1065 btrfs_del_item(trans, root, path);
1067 btrfs_release_delayed_iref(node);
1069 btrfs_release_path(path);
1071 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1072 btrfs_release_delayed_inode(node);
1075 * If we fail to update the delayed inode we need to abort the
1076 * transaction, because we could leave the inode with the improper
1079 if (ret && ret != -ENOENT)
1080 btrfs_abort_transaction(trans, ret);
1085 btrfs_release_path(path);
1087 key.type = BTRFS_INODE_EXTREF_KEY;
1090 nofs_flag = memalloc_nofs_save();
1091 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1092 memalloc_nofs_restore(nofs_flag);
1098 leaf = path->nodes[0];
1103 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1104 struct btrfs_root *root,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1110 mutex_lock(&node->mutex);
1111 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1112 mutex_unlock(&node->mutex);
1116 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1117 mutex_unlock(&node->mutex);
1122 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1123 struct btrfs_path *path,
1124 struct btrfs_delayed_node *node)
1128 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1132 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1136 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1141 * Called when committing the transaction.
1142 * Returns 0 on success.
1143 * Returns < 0 on error and returns with an aborted transaction with any
1144 * outstanding delayed items cleaned up.
1146 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1148 struct btrfs_fs_info *fs_info = trans->fs_info;
1149 struct btrfs_delayed_root *delayed_root;
1150 struct btrfs_delayed_node *curr_node, *prev_node;
1151 struct btrfs_path *path;
1152 struct btrfs_block_rsv *block_rsv;
1154 bool count = (nr > 0);
1159 path = btrfs_alloc_path();
1162 path->leave_spinning = 1;
1164 block_rsv = trans->block_rsv;
1165 trans->block_rsv = &fs_info->delayed_block_rsv;
1167 delayed_root = fs_info->delayed_root;
1169 curr_node = btrfs_first_delayed_node(delayed_root);
1170 while (curr_node && (!count || (count && nr--))) {
1171 ret = __btrfs_commit_inode_delayed_items(trans, path,
1174 btrfs_release_delayed_node(curr_node);
1176 btrfs_abort_transaction(trans, ret);
1180 prev_node = curr_node;
1181 curr_node = btrfs_next_delayed_node(curr_node);
1182 btrfs_release_delayed_node(prev_node);
1186 btrfs_release_delayed_node(curr_node);
1187 btrfs_free_path(path);
1188 trans->block_rsv = block_rsv;
1193 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1195 return __btrfs_run_delayed_items(trans, -1);
1198 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1200 return __btrfs_run_delayed_items(trans, nr);
1203 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1204 struct btrfs_inode *inode)
1206 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1207 struct btrfs_path *path;
1208 struct btrfs_block_rsv *block_rsv;
1214 mutex_lock(&delayed_node->mutex);
1215 if (!delayed_node->count) {
1216 mutex_unlock(&delayed_node->mutex);
1217 btrfs_release_delayed_node(delayed_node);
1220 mutex_unlock(&delayed_node->mutex);
1222 path = btrfs_alloc_path();
1224 btrfs_release_delayed_node(delayed_node);
1227 path->leave_spinning = 1;
1229 block_rsv = trans->block_rsv;
1230 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1232 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1234 btrfs_release_delayed_node(delayed_node);
1235 btrfs_free_path(path);
1236 trans->block_rsv = block_rsv;
1241 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1243 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1244 struct btrfs_trans_handle *trans;
1245 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1246 struct btrfs_path *path;
1247 struct btrfs_block_rsv *block_rsv;
1253 mutex_lock(&delayed_node->mutex);
1254 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1255 mutex_unlock(&delayed_node->mutex);
1256 btrfs_release_delayed_node(delayed_node);
1259 mutex_unlock(&delayed_node->mutex);
1261 trans = btrfs_join_transaction(delayed_node->root);
1262 if (IS_ERR(trans)) {
1263 ret = PTR_ERR(trans);
1267 path = btrfs_alloc_path();
1272 path->leave_spinning = 1;
1274 block_rsv = trans->block_rsv;
1275 trans->block_rsv = &fs_info->delayed_block_rsv;
1277 mutex_lock(&delayed_node->mutex);
1278 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1279 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1280 path, delayed_node);
1283 mutex_unlock(&delayed_node->mutex);
1285 btrfs_free_path(path);
1286 trans->block_rsv = block_rsv;
1288 btrfs_end_transaction(trans);
1289 btrfs_btree_balance_dirty(fs_info);
1291 btrfs_release_delayed_node(delayed_node);
1296 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1298 struct btrfs_delayed_node *delayed_node;
1300 delayed_node = READ_ONCE(inode->delayed_node);
1304 inode->delayed_node = NULL;
1305 btrfs_release_delayed_node(delayed_node);
1308 struct btrfs_async_delayed_work {
1309 struct btrfs_delayed_root *delayed_root;
1311 struct btrfs_work work;
1314 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1316 struct btrfs_async_delayed_work *async_work;
1317 struct btrfs_delayed_root *delayed_root;
1318 struct btrfs_trans_handle *trans;
1319 struct btrfs_path *path;
1320 struct btrfs_delayed_node *delayed_node = NULL;
1321 struct btrfs_root *root;
1322 struct btrfs_block_rsv *block_rsv;
1325 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1326 delayed_root = async_work->delayed_root;
1328 path = btrfs_alloc_path();
1333 if (atomic_read(&delayed_root->items) <
1334 BTRFS_DELAYED_BACKGROUND / 2)
1337 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1341 path->leave_spinning = 1;
1342 root = delayed_node->root;
1344 trans = btrfs_join_transaction(root);
1345 if (IS_ERR(trans)) {
1346 btrfs_release_path(path);
1347 btrfs_release_prepared_delayed_node(delayed_node);
1352 block_rsv = trans->block_rsv;
1353 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1355 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1357 trans->block_rsv = block_rsv;
1358 btrfs_end_transaction(trans);
1359 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1361 btrfs_release_path(path);
1362 btrfs_release_prepared_delayed_node(delayed_node);
1365 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1366 || total_done < async_work->nr);
1368 btrfs_free_path(path);
1370 wake_up(&delayed_root->wait);
1375 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1376 struct btrfs_fs_info *fs_info, int nr)
1378 struct btrfs_async_delayed_work *async_work;
1380 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1384 async_work->delayed_root = delayed_root;
1385 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1386 btrfs_async_run_delayed_root, NULL, NULL);
1387 async_work->nr = nr;
1389 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1393 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1395 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1398 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1400 int val = atomic_read(&delayed_root->items_seq);
1402 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1405 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1411 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1413 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1415 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1416 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1419 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1423 seq = atomic_read(&delayed_root->items_seq);
1425 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1429 wait_event_interruptible(delayed_root->wait,
1430 could_end_wait(delayed_root, seq));
1434 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1437 /* Will return 0 or -ENOMEM */
1438 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1439 const char *name, int name_len,
1440 struct btrfs_inode *dir,
1441 struct btrfs_disk_key *disk_key, u8 type,
1444 struct btrfs_delayed_node *delayed_node;
1445 struct btrfs_delayed_item *delayed_item;
1446 struct btrfs_dir_item *dir_item;
1449 delayed_node = btrfs_get_or_create_delayed_node(dir);
1450 if (IS_ERR(delayed_node))
1451 return PTR_ERR(delayed_node);
1453 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1454 if (!delayed_item) {
1459 delayed_item->key.objectid = btrfs_ino(dir);
1460 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1461 delayed_item->key.offset = index;
1463 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1464 dir_item->location = *disk_key;
1465 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1466 btrfs_set_stack_dir_data_len(dir_item, 0);
1467 btrfs_set_stack_dir_name_len(dir_item, name_len);
1468 btrfs_set_stack_dir_type(dir_item, type);
1469 memcpy((char *)(dir_item + 1), name, name_len);
1471 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1473 * we have reserved enough space when we start a new transaction,
1474 * so reserving metadata failure is impossible
1478 mutex_lock(&delayed_node->mutex);
1479 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1480 if (unlikely(ret)) {
1481 btrfs_err(trans->fs_info,
1482 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1483 name_len, name, delayed_node->root->objectid,
1484 delayed_node->inode_id, ret);
1487 mutex_unlock(&delayed_node->mutex);
1490 btrfs_release_delayed_node(delayed_node);
1494 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1495 struct btrfs_delayed_node *node,
1496 struct btrfs_key *key)
1498 struct btrfs_delayed_item *item;
1500 mutex_lock(&node->mutex);
1501 item = __btrfs_lookup_delayed_insertion_item(node, key);
1503 mutex_unlock(&node->mutex);
1507 btrfs_delayed_item_release_metadata(node->root, item);
1508 btrfs_release_delayed_item(item);
1509 mutex_unlock(&node->mutex);
1513 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1514 struct btrfs_inode *dir, u64 index)
1516 struct btrfs_delayed_node *node;
1517 struct btrfs_delayed_item *item;
1518 struct btrfs_key item_key;
1521 node = btrfs_get_or_create_delayed_node(dir);
1523 return PTR_ERR(node);
1525 item_key.objectid = btrfs_ino(dir);
1526 item_key.type = BTRFS_DIR_INDEX_KEY;
1527 item_key.offset = index;
1529 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1534 item = btrfs_alloc_delayed_item(0);
1540 item->key = item_key;
1542 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1544 * we have reserved enough space when we start a new transaction,
1545 * so reserving metadata failure is impossible.
1549 mutex_lock(&node->mutex);
1550 ret = __btrfs_add_delayed_deletion_item(node, item);
1551 if (unlikely(ret)) {
1552 btrfs_err(trans->fs_info,
1553 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1554 index, node->root->objectid, node->inode_id, ret);
1557 mutex_unlock(&node->mutex);
1559 btrfs_release_delayed_node(node);
1563 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1565 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1571 * Since we have held i_mutex of this directory, it is impossible that
1572 * a new directory index is added into the delayed node and index_cnt
1573 * is updated now. So we needn't lock the delayed node.
1575 if (!delayed_node->index_cnt) {
1576 btrfs_release_delayed_node(delayed_node);
1580 inode->index_cnt = delayed_node->index_cnt;
1581 btrfs_release_delayed_node(delayed_node);
1585 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1586 struct list_head *ins_list,
1587 struct list_head *del_list)
1589 struct btrfs_delayed_node *delayed_node;
1590 struct btrfs_delayed_item *item;
1592 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1597 * We can only do one readdir with delayed items at a time because of
1598 * item->readdir_list.
1600 inode_unlock_shared(inode);
1603 mutex_lock(&delayed_node->mutex);
1604 item = __btrfs_first_delayed_insertion_item(delayed_node);
1606 refcount_inc(&item->refs);
1607 list_add_tail(&item->readdir_list, ins_list);
1608 item = __btrfs_next_delayed_item(item);
1611 item = __btrfs_first_delayed_deletion_item(delayed_node);
1613 refcount_inc(&item->refs);
1614 list_add_tail(&item->readdir_list, del_list);
1615 item = __btrfs_next_delayed_item(item);
1617 mutex_unlock(&delayed_node->mutex);
1619 * This delayed node is still cached in the btrfs inode, so refs
1620 * must be > 1 now, and we needn't check it is going to be freed
1623 * Besides that, this function is used to read dir, we do not
1624 * insert/delete delayed items in this period. So we also needn't
1625 * requeue or dequeue this delayed node.
1627 refcount_dec(&delayed_node->refs);
1632 void btrfs_readdir_put_delayed_items(struct inode *inode,
1633 struct list_head *ins_list,
1634 struct list_head *del_list)
1636 struct btrfs_delayed_item *curr, *next;
1638 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1639 list_del(&curr->readdir_list);
1640 if (refcount_dec_and_test(&curr->refs))
1644 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1645 list_del(&curr->readdir_list);
1646 if (refcount_dec_and_test(&curr->refs))
1651 * The VFS is going to do up_read(), so we need to downgrade back to a
1654 downgrade_write(&inode->i_rwsem);
1657 int btrfs_should_delete_dir_index(struct list_head *del_list,
1660 struct btrfs_delayed_item *curr;
1663 list_for_each_entry(curr, del_list, readdir_list) {
1664 if (curr->key.offset > index)
1666 if (curr->key.offset == index) {
1675 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1678 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1679 struct list_head *ins_list)
1681 struct btrfs_dir_item *di;
1682 struct btrfs_delayed_item *curr, *next;
1683 struct btrfs_key location;
1687 unsigned char d_type;
1689 if (list_empty(ins_list))
1693 * Changing the data of the delayed item is impossible. So
1694 * we needn't lock them. And we have held i_mutex of the
1695 * directory, nobody can delete any directory indexes now.
1697 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1698 list_del(&curr->readdir_list);
1700 if (curr->key.offset < ctx->pos) {
1701 if (refcount_dec_and_test(&curr->refs))
1706 ctx->pos = curr->key.offset;
1708 di = (struct btrfs_dir_item *)curr->data;
1709 name = (char *)(di + 1);
1710 name_len = btrfs_stack_dir_name_len(di);
1712 d_type = btrfs_filetype_table[di->type];
1713 btrfs_disk_key_to_cpu(&location, &di->location);
1715 over = !dir_emit(ctx, name, name_len,
1716 location.objectid, d_type);
1718 if (refcount_dec_and_test(&curr->refs))
1728 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1729 struct btrfs_inode_item *inode_item,
1730 struct inode *inode)
1732 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1733 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1734 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1735 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1736 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1737 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1738 btrfs_set_stack_inode_generation(inode_item,
1739 BTRFS_I(inode)->generation);
1740 btrfs_set_stack_inode_sequence(inode_item,
1741 inode_peek_iversion(inode));
1742 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1743 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1744 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1745 btrfs_set_stack_inode_block_group(inode_item, 0);
1747 btrfs_set_stack_timespec_sec(&inode_item->atime,
1748 inode->i_atime.tv_sec);
1749 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1750 inode->i_atime.tv_nsec);
1752 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1753 inode->i_mtime.tv_sec);
1754 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1755 inode->i_mtime.tv_nsec);
1757 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1758 inode->i_ctime.tv_sec);
1759 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1760 inode->i_ctime.tv_nsec);
1762 btrfs_set_stack_timespec_sec(&inode_item->otime,
1763 BTRFS_I(inode)->i_otime.tv_sec);
1764 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1765 BTRFS_I(inode)->i_otime.tv_nsec);
1768 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1770 struct btrfs_delayed_node *delayed_node;
1771 struct btrfs_inode_item *inode_item;
1773 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1777 mutex_lock(&delayed_node->mutex);
1778 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1779 mutex_unlock(&delayed_node->mutex);
1780 btrfs_release_delayed_node(delayed_node);
1784 inode_item = &delayed_node->inode_item;
1786 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1787 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1788 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1789 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1790 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1791 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1792 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1793 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1795 inode_set_iversion_queried(inode,
1796 btrfs_stack_inode_sequence(inode_item));
1798 *rdev = btrfs_stack_inode_rdev(inode_item);
1799 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1801 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1802 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1804 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1805 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1807 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1808 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1810 BTRFS_I(inode)->i_otime.tv_sec =
1811 btrfs_stack_timespec_sec(&inode_item->otime);
1812 BTRFS_I(inode)->i_otime.tv_nsec =
1813 btrfs_stack_timespec_nsec(&inode_item->otime);
1815 inode->i_generation = BTRFS_I(inode)->generation;
1816 BTRFS_I(inode)->index_cnt = (u64)-1;
1818 mutex_unlock(&delayed_node->mutex);
1819 btrfs_release_delayed_node(delayed_node);
1823 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1824 struct btrfs_root *root, struct inode *inode)
1826 struct btrfs_delayed_node *delayed_node;
1829 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1830 if (IS_ERR(delayed_node))
1831 return PTR_ERR(delayed_node);
1833 mutex_lock(&delayed_node->mutex);
1834 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1835 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1839 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1844 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1845 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1846 delayed_node->count++;
1847 atomic_inc(&root->fs_info->delayed_root->items);
1849 mutex_unlock(&delayed_node->mutex);
1850 btrfs_release_delayed_node(delayed_node);
1854 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1856 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1857 struct btrfs_delayed_node *delayed_node;
1860 * we don't do delayed inode updates during log recovery because it
1861 * leads to enospc problems. This means we also can't do
1862 * delayed inode refs
1864 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1867 delayed_node = btrfs_get_or_create_delayed_node(inode);
1868 if (IS_ERR(delayed_node))
1869 return PTR_ERR(delayed_node);
1872 * We don't reserve space for inode ref deletion is because:
1873 * - We ONLY do async inode ref deletion for the inode who has only
1874 * one link(i_nlink == 1), it means there is only one inode ref.
1875 * And in most case, the inode ref and the inode item are in the
1876 * same leaf, and we will deal with them at the same time.
1877 * Since we are sure we will reserve the space for the inode item,
1878 * it is unnecessary to reserve space for inode ref deletion.
1879 * - If the inode ref and the inode item are not in the same leaf,
1880 * We also needn't worry about enospc problem, because we reserve
1881 * much more space for the inode update than it needs.
1882 * - At the worst, we can steal some space from the global reservation.
1885 mutex_lock(&delayed_node->mutex);
1886 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1889 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1890 delayed_node->count++;
1891 atomic_inc(&fs_info->delayed_root->items);
1893 mutex_unlock(&delayed_node->mutex);
1894 btrfs_release_delayed_node(delayed_node);
1898 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1900 struct btrfs_root *root = delayed_node->root;
1901 struct btrfs_fs_info *fs_info = root->fs_info;
1902 struct btrfs_delayed_item *curr_item, *prev_item;
1904 mutex_lock(&delayed_node->mutex);
1905 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1907 btrfs_delayed_item_release_metadata(root, curr_item);
1908 prev_item = curr_item;
1909 curr_item = __btrfs_next_delayed_item(prev_item);
1910 btrfs_release_delayed_item(prev_item);
1913 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1915 btrfs_delayed_item_release_metadata(root, curr_item);
1916 prev_item = curr_item;
1917 curr_item = __btrfs_next_delayed_item(prev_item);
1918 btrfs_release_delayed_item(prev_item);
1921 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1922 btrfs_release_delayed_iref(delayed_node);
1924 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1925 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1926 btrfs_release_delayed_inode(delayed_node);
1928 mutex_unlock(&delayed_node->mutex);
1931 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1933 struct btrfs_delayed_node *delayed_node;
1935 delayed_node = btrfs_get_delayed_node(inode);
1939 __btrfs_kill_delayed_node(delayed_node);
1940 btrfs_release_delayed_node(delayed_node);
1943 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1946 struct btrfs_delayed_node *delayed_nodes[8];
1950 spin_lock(&root->inode_lock);
1951 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1952 (void **)delayed_nodes, inode_id,
1953 ARRAY_SIZE(delayed_nodes));
1955 spin_unlock(&root->inode_lock);
1959 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1960 for (i = 0; i < n; i++) {
1962 * Don't increase refs in case the node is dead and
1963 * about to be removed from the tree in the loop below
1965 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1966 delayed_nodes[i] = NULL;
1968 spin_unlock(&root->inode_lock);
1970 for (i = 0; i < n; i++) {
1971 if (!delayed_nodes[i])
1973 __btrfs_kill_delayed_node(delayed_nodes[i]);
1974 btrfs_release_delayed_node(delayed_nodes[i]);
1979 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1981 struct btrfs_delayed_node *curr_node, *prev_node;
1983 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1985 __btrfs_kill_delayed_node(curr_node);
1987 prev_node = curr_node;
1988 curr_node = btrfs_next_delayed_node(curr_node);
1989 btrfs_release_delayed_node(prev_node);