2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include "delayed-inode.h"
24 #include "transaction.h"
27 #define BTRFS_DELAYED_WRITEBACK 512
28 #define BTRFS_DELAYED_BACKGROUND 128
29 #define BTRFS_DELAYED_BATCH 16
31 static struct kmem_cache *delayed_node_cache;
33 int __init btrfs_delayed_inode_init(void)
35 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
36 sizeof(struct btrfs_delayed_node),
40 if (!delayed_node_cache)
45 void btrfs_delayed_inode_exit(void)
47 kmem_cache_destroy(delayed_node_cache);
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node *delayed_node,
52 struct btrfs_root *root, u64 inode_id)
54 delayed_node->root = root;
55 delayed_node->inode_id = inode_id;
56 refcount_set(&delayed_node->refs, 0);
57 delayed_node->ins_root = RB_ROOT;
58 delayed_node->del_root = RB_ROOT;
59 mutex_init(&delayed_node->mutex);
60 INIT_LIST_HEAD(&delayed_node->n_list);
61 INIT_LIST_HEAD(&delayed_node->p_list);
64 static inline int btrfs_is_continuous_delayed_item(
65 struct btrfs_delayed_item *item1,
66 struct btrfs_delayed_item *item2)
68 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
69 item1->key.objectid == item2->key.objectid &&
70 item1->key.type == item2->key.type &&
71 item1->key.offset + 1 == item2->key.offset)
76 static struct btrfs_delayed_node *btrfs_get_delayed_node(
77 struct btrfs_inode *btrfs_inode)
79 struct btrfs_root *root = btrfs_inode->root;
80 u64 ino = btrfs_ino(btrfs_inode);
81 struct btrfs_delayed_node *node;
83 node = READ_ONCE(btrfs_inode->delayed_node);
85 refcount_inc(&node->refs);
89 spin_lock(&root->inode_lock);
90 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
93 if (btrfs_inode->delayed_node) {
94 refcount_inc(&node->refs); /* can be accessed */
95 BUG_ON(btrfs_inode->delayed_node != node);
96 spin_unlock(&root->inode_lock);
101 * It's possible that we're racing into the middle of removing
102 * this node from the radix tree. In this case, the refcount
103 * was zero and it should never go back to one. Just return
104 * NULL like it was never in the radix at all; our release
105 * function is in the process of removing it.
107 * Some implementations of refcount_inc refuse to bump the
108 * refcount once it has hit zero. If we don't do this dance
109 * here, refcount_inc() may decide to just WARN_ONCE() instead
110 * of actually bumping the refcount.
112 * If this node is properly in the radix, we want to bump the
113 * refcount twice, once for the inode and once for this get
116 if (refcount_inc_not_zero(&node->refs)) {
117 refcount_inc(&node->refs);
118 btrfs_inode->delayed_node = node;
123 spin_unlock(&root->inode_lock);
126 spin_unlock(&root->inode_lock);
131 /* Will return either the node or PTR_ERR(-ENOMEM) */
132 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
133 struct btrfs_inode *btrfs_inode)
135 struct btrfs_delayed_node *node;
136 struct btrfs_root *root = btrfs_inode->root;
137 u64 ino = btrfs_ino(btrfs_inode);
141 node = btrfs_get_delayed_node(btrfs_inode);
145 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
147 return ERR_PTR(-ENOMEM);
148 btrfs_init_delayed_node(node, root, ino);
150 /* cached in the btrfs inode and can be accessed */
151 refcount_set(&node->refs, 2);
153 ret = radix_tree_preload(GFP_NOFS);
155 kmem_cache_free(delayed_node_cache, node);
159 spin_lock(&root->inode_lock);
160 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
161 if (ret == -EEXIST) {
162 spin_unlock(&root->inode_lock);
163 kmem_cache_free(delayed_node_cache, node);
164 radix_tree_preload_end();
167 btrfs_inode->delayed_node = node;
168 spin_unlock(&root->inode_lock);
169 radix_tree_preload_end();
175 * Call it when holding delayed_node->mutex
177 * If mod = 1, add this node into the prepared list.
179 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
180 struct btrfs_delayed_node *node,
183 spin_lock(&root->lock);
184 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
185 if (!list_empty(&node->p_list))
186 list_move_tail(&node->p_list, &root->prepare_list);
188 list_add_tail(&node->p_list, &root->prepare_list);
190 list_add_tail(&node->n_list, &root->node_list);
191 list_add_tail(&node->p_list, &root->prepare_list);
192 refcount_inc(&node->refs); /* inserted into list */
194 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
196 spin_unlock(&root->lock);
199 /* Call it when holding delayed_node->mutex */
200 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
201 struct btrfs_delayed_node *node)
203 spin_lock(&root->lock);
204 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
206 refcount_dec(&node->refs); /* not in the list */
207 list_del_init(&node->n_list);
208 if (!list_empty(&node->p_list))
209 list_del_init(&node->p_list);
210 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
212 spin_unlock(&root->lock);
215 static struct btrfs_delayed_node *btrfs_first_delayed_node(
216 struct btrfs_delayed_root *delayed_root)
219 struct btrfs_delayed_node *node = NULL;
221 spin_lock(&delayed_root->lock);
222 if (list_empty(&delayed_root->node_list))
225 p = delayed_root->node_list.next;
226 node = list_entry(p, struct btrfs_delayed_node, n_list);
227 refcount_inc(&node->refs);
229 spin_unlock(&delayed_root->lock);
234 static struct btrfs_delayed_node *btrfs_next_delayed_node(
235 struct btrfs_delayed_node *node)
237 struct btrfs_delayed_root *delayed_root;
239 struct btrfs_delayed_node *next = NULL;
241 delayed_root = node->root->fs_info->delayed_root;
242 spin_lock(&delayed_root->lock);
243 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
244 /* not in the list */
245 if (list_empty(&delayed_root->node_list))
247 p = delayed_root->node_list.next;
248 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
251 p = node->n_list.next;
253 next = list_entry(p, struct btrfs_delayed_node, n_list);
254 refcount_inc(&next->refs);
256 spin_unlock(&delayed_root->lock);
261 static void __btrfs_release_delayed_node(
262 struct btrfs_delayed_node *delayed_node,
265 struct btrfs_delayed_root *delayed_root;
270 delayed_root = delayed_node->root->fs_info->delayed_root;
272 mutex_lock(&delayed_node->mutex);
273 if (delayed_node->count)
274 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
276 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
277 mutex_unlock(&delayed_node->mutex);
279 if (refcount_dec_and_test(&delayed_node->refs)) {
280 struct btrfs_root *root = delayed_node->root;
282 spin_lock(&root->inode_lock);
284 * Once our refcount goes to zero, nobody is allowed to bump it
285 * back up. We can delete it now.
287 ASSERT(refcount_read(&delayed_node->refs) == 0);
288 radix_tree_delete(&root->delayed_nodes_tree,
289 delayed_node->inode_id);
290 spin_unlock(&root->inode_lock);
291 kmem_cache_free(delayed_node_cache, delayed_node);
295 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
297 __btrfs_release_delayed_node(node, 0);
300 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
301 struct btrfs_delayed_root *delayed_root)
304 struct btrfs_delayed_node *node = NULL;
306 spin_lock(&delayed_root->lock);
307 if (list_empty(&delayed_root->prepare_list))
310 p = delayed_root->prepare_list.next;
312 node = list_entry(p, struct btrfs_delayed_node, p_list);
313 refcount_inc(&node->refs);
315 spin_unlock(&delayed_root->lock);
320 static inline void btrfs_release_prepared_delayed_node(
321 struct btrfs_delayed_node *node)
323 __btrfs_release_delayed_node(node, 1);
326 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
328 struct btrfs_delayed_item *item;
329 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
331 item->data_len = data_len;
332 item->ins_or_del = 0;
333 item->bytes_reserved = 0;
334 item->delayed_node = NULL;
335 refcount_set(&item->refs, 1);
341 * __btrfs_lookup_delayed_item - look up the delayed item by key
342 * @delayed_node: pointer to the delayed node
343 * @key: the key to look up
344 * @prev: used to store the prev item if the right item isn't found
345 * @next: used to store the next item if the right item isn't found
347 * Note: if we don't find the right item, we will return the prev item and
350 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
351 struct rb_root *root,
352 struct btrfs_key *key,
353 struct btrfs_delayed_item **prev,
354 struct btrfs_delayed_item **next)
356 struct rb_node *node, *prev_node = NULL;
357 struct btrfs_delayed_item *delayed_item = NULL;
360 node = root->rb_node;
363 delayed_item = rb_entry(node, struct btrfs_delayed_item,
366 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
368 node = node->rb_right;
370 node = node->rb_left;
379 *prev = delayed_item;
380 else if ((node = rb_prev(prev_node)) != NULL) {
381 *prev = rb_entry(node, struct btrfs_delayed_item,
391 *next = delayed_item;
392 else if ((node = rb_next(prev_node)) != NULL) {
393 *next = rb_entry(node, struct btrfs_delayed_item,
401 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
402 struct btrfs_delayed_node *delayed_node,
403 struct btrfs_key *key)
405 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
409 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
410 struct btrfs_delayed_item *ins,
413 struct rb_node **p, *node;
414 struct rb_node *parent_node = NULL;
415 struct rb_root *root;
416 struct btrfs_delayed_item *item;
419 if (action == BTRFS_DELAYED_INSERTION_ITEM)
420 root = &delayed_node->ins_root;
421 else if (action == BTRFS_DELAYED_DELETION_ITEM)
422 root = &delayed_node->del_root;
426 node = &ins->rb_node;
430 item = rb_entry(parent_node, struct btrfs_delayed_item,
433 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
442 rb_link_node(node, parent_node, p);
443 rb_insert_color(node, root);
444 ins->delayed_node = delayed_node;
445 ins->ins_or_del = action;
447 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
448 action == BTRFS_DELAYED_INSERTION_ITEM &&
449 ins->key.offset >= delayed_node->index_cnt)
450 delayed_node->index_cnt = ins->key.offset + 1;
452 delayed_node->count++;
453 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
457 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
458 struct btrfs_delayed_item *item)
460 return __btrfs_add_delayed_item(node, item,
461 BTRFS_DELAYED_INSERTION_ITEM);
464 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
465 struct btrfs_delayed_item *item)
467 return __btrfs_add_delayed_item(node, item,
468 BTRFS_DELAYED_DELETION_ITEM);
471 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
473 int seq = atomic_inc_return(&delayed_root->items_seq);
476 * atomic_dec_return implies a barrier for waitqueue_active
478 if ((atomic_dec_return(&delayed_root->items) <
479 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
480 waitqueue_active(&delayed_root->wait))
481 wake_up(&delayed_root->wait);
484 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
486 struct rb_root *root;
487 struct btrfs_delayed_root *delayed_root;
489 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
491 BUG_ON(!delayed_root);
492 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
493 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
495 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
496 root = &delayed_item->delayed_node->ins_root;
498 root = &delayed_item->delayed_node->del_root;
500 rb_erase(&delayed_item->rb_node, root);
501 delayed_item->delayed_node->count--;
503 finish_one_item(delayed_root);
506 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
509 __btrfs_remove_delayed_item(item);
510 if (refcount_dec_and_test(&item->refs))
515 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
516 struct btrfs_delayed_node *delayed_node)
519 struct btrfs_delayed_item *item = NULL;
521 p = rb_first(&delayed_node->ins_root);
523 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
528 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
529 struct btrfs_delayed_node *delayed_node)
532 struct btrfs_delayed_item *item = NULL;
534 p = rb_first(&delayed_node->del_root);
536 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
541 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
542 struct btrfs_delayed_item *item)
545 struct btrfs_delayed_item *next = NULL;
547 p = rb_next(&item->rb_node);
549 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
554 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
555 struct btrfs_fs_info *fs_info,
556 struct btrfs_delayed_item *item)
558 struct btrfs_block_rsv *src_rsv;
559 struct btrfs_block_rsv *dst_rsv;
563 if (!trans->bytes_reserved)
566 src_rsv = trans->block_rsv;
567 dst_rsv = &fs_info->delayed_block_rsv;
569 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
570 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
572 trace_btrfs_space_reservation(fs_info, "delayed_item",
575 item->bytes_reserved = num_bytes;
581 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
582 struct btrfs_delayed_item *item)
584 struct btrfs_block_rsv *rsv;
586 if (!item->bytes_reserved)
589 rsv = &fs_info->delayed_block_rsv;
590 trace_btrfs_space_reservation(fs_info, "delayed_item",
591 item->key.objectid, item->bytes_reserved,
593 btrfs_block_rsv_release(fs_info, rsv,
594 item->bytes_reserved);
597 static int btrfs_delayed_inode_reserve_metadata(
598 struct btrfs_trans_handle *trans,
599 struct btrfs_root *root,
600 struct btrfs_inode *inode,
601 struct btrfs_delayed_node *node)
603 struct btrfs_fs_info *fs_info = root->fs_info;
604 struct btrfs_block_rsv *src_rsv;
605 struct btrfs_block_rsv *dst_rsv;
608 bool release = false;
610 src_rsv = trans->block_rsv;
611 dst_rsv = &fs_info->delayed_block_rsv;
613 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
616 * If our block_rsv is the delalloc block reserve then check and see if
617 * we have our extra reservation for updating the inode. If not fall
618 * through and try to reserve space quickly.
620 * We used to try and steal from the delalloc block rsv or the global
621 * reserve, but we'd steal a full reservation, which isn't kind. We are
622 * here through delalloc which means we've likely just cowed down close
623 * to the leaf that contains the inode, so we would steal less just
624 * doing the fallback inode update, so if we do end up having to steal
625 * from the global block rsv we hopefully only steal one or two blocks
626 * worth which is less likely to hurt us.
628 if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
629 spin_lock(&inode->lock);
630 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
631 &inode->runtime_flags))
635 spin_unlock(&inode->lock);
639 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
640 * which doesn't reserve space for speed. This is a problem since we
641 * still need to reserve space for this update, so try to reserve the
644 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
645 * we're accounted for.
647 if (!src_rsv || (!trans->bytes_reserved &&
648 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
649 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
650 BTRFS_RESERVE_NO_FLUSH);
652 * Since we're under a transaction reserve_metadata_bytes could
653 * try to commit the transaction which will make it return
654 * EAGAIN to make us stop the transaction we have, so return
655 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
660 node->bytes_reserved = num_bytes;
661 trace_btrfs_space_reservation(fs_info,
669 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
672 * Migrate only takes a reservation, it doesn't touch the size of the
673 * block_rsv. This is to simplify people who don't normally have things
674 * migrated from their block rsv. If they go to release their
675 * reservation, that will decrease the size as well, so if migrate
676 * reduced size we'd end up with a negative size. But for the
677 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
678 * but we could in fact do this reserve/migrate dance several times
679 * between the time we did the original reservation and we'd clean it
680 * up. So to take care of this, release the space for the meta
681 * reservation here. I think it may be time for a documentation page on
682 * how block rsvs. work.
685 trace_btrfs_space_reservation(fs_info, "delayed_inode",
686 btrfs_ino(inode), num_bytes, 1);
687 node->bytes_reserved = num_bytes;
691 trace_btrfs_space_reservation(fs_info, "delalloc",
692 btrfs_ino(inode), num_bytes, 0);
693 btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
699 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
700 struct btrfs_delayed_node *node)
702 struct btrfs_block_rsv *rsv;
704 if (!node->bytes_reserved)
707 rsv = &fs_info->delayed_block_rsv;
708 trace_btrfs_space_reservation(fs_info, "delayed_inode",
709 node->inode_id, node->bytes_reserved, 0);
710 btrfs_block_rsv_release(fs_info, rsv,
711 node->bytes_reserved);
712 node->bytes_reserved = 0;
716 * This helper will insert some continuous items into the same leaf according
717 * to the free space of the leaf.
719 static int btrfs_batch_insert_items(struct btrfs_root *root,
720 struct btrfs_path *path,
721 struct btrfs_delayed_item *item)
723 struct btrfs_fs_info *fs_info = root->fs_info;
724 struct btrfs_delayed_item *curr, *next;
726 int total_data_size = 0, total_size = 0;
727 struct extent_buffer *leaf;
729 struct btrfs_key *keys;
731 struct list_head head;
737 BUG_ON(!path->nodes[0]);
739 leaf = path->nodes[0];
740 free_space = btrfs_leaf_free_space(fs_info, leaf);
741 INIT_LIST_HEAD(&head);
747 * count the number of the continuous items that we can insert in batch
749 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
751 total_data_size += next->data_len;
752 total_size += next->data_len + sizeof(struct btrfs_item);
753 list_add_tail(&next->tree_list, &head);
757 next = __btrfs_next_delayed_item(curr);
761 if (!btrfs_is_continuous_delayed_item(curr, next))
771 * we need allocate some memory space, but it might cause the task
772 * to sleep, so we set all locked nodes in the path to blocking locks
775 btrfs_set_path_blocking(path);
777 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
783 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
789 /* get keys of all the delayed items */
791 list_for_each_entry(next, &head, tree_list) {
793 data_size[i] = next->data_len;
797 /* reset all the locked nodes in the patch to spinning locks. */
798 btrfs_clear_path_blocking(path, NULL, 0);
800 /* insert the keys of the items */
801 setup_items_for_insert(root, path, keys, data_size,
802 total_data_size, total_size, nitems);
804 /* insert the dir index items */
805 slot = path->slots[0];
806 list_for_each_entry_safe(curr, next, &head, tree_list) {
807 data_ptr = btrfs_item_ptr(leaf, slot, char);
808 write_extent_buffer(leaf, &curr->data,
809 (unsigned long)data_ptr,
813 btrfs_delayed_item_release_metadata(fs_info, curr);
815 list_del(&curr->tree_list);
816 btrfs_release_delayed_item(curr);
827 * This helper can just do simple insertion that needn't extend item for new
828 * data, such as directory name index insertion, inode insertion.
830 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
831 struct btrfs_root *root,
832 struct btrfs_path *path,
833 struct btrfs_delayed_item *delayed_item)
835 struct btrfs_fs_info *fs_info = root->fs_info;
836 struct extent_buffer *leaf;
837 unsigned int nofs_flag;
841 nofs_flag = memalloc_nofs_save();
842 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
843 delayed_item->data_len);
844 memalloc_nofs_restore(nofs_flag);
845 if (ret < 0 && ret != -EEXIST)
848 leaf = path->nodes[0];
850 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
852 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
853 delayed_item->data_len);
854 btrfs_mark_buffer_dirty(leaf);
856 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
861 * we insert an item first, then if there are some continuous items, we try
862 * to insert those items into the same leaf.
864 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
865 struct btrfs_path *path,
866 struct btrfs_root *root,
867 struct btrfs_delayed_node *node)
869 struct btrfs_delayed_item *curr, *prev;
873 mutex_lock(&node->mutex);
874 curr = __btrfs_first_delayed_insertion_item(node);
878 ret = btrfs_insert_delayed_item(trans, root, path, curr);
880 btrfs_release_path(path);
885 curr = __btrfs_next_delayed_item(prev);
886 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
887 /* insert the continuous items into the same leaf */
889 btrfs_batch_insert_items(root, path, curr);
891 btrfs_release_delayed_item(prev);
892 btrfs_mark_buffer_dirty(path->nodes[0]);
894 btrfs_release_path(path);
895 mutex_unlock(&node->mutex);
899 mutex_unlock(&node->mutex);
903 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
904 struct btrfs_root *root,
905 struct btrfs_path *path,
906 struct btrfs_delayed_item *item)
908 struct btrfs_fs_info *fs_info = root->fs_info;
909 struct btrfs_delayed_item *curr, *next;
910 struct extent_buffer *leaf;
911 struct btrfs_key key;
912 struct list_head head;
913 int nitems, i, last_item;
916 BUG_ON(!path->nodes[0]);
918 leaf = path->nodes[0];
921 last_item = btrfs_header_nritems(leaf) - 1;
923 return -ENOENT; /* FIXME: Is errno suitable? */
926 INIT_LIST_HEAD(&head);
927 btrfs_item_key_to_cpu(leaf, &key, i);
930 * count the number of the dir index items that we can delete in batch
932 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
933 list_add_tail(&next->tree_list, &head);
937 next = __btrfs_next_delayed_item(curr);
941 if (!btrfs_is_continuous_delayed_item(curr, next))
947 btrfs_item_key_to_cpu(leaf, &key, i);
953 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
957 list_for_each_entry_safe(curr, next, &head, tree_list) {
958 btrfs_delayed_item_release_metadata(fs_info, curr);
959 list_del(&curr->tree_list);
960 btrfs_release_delayed_item(curr);
967 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
968 struct btrfs_path *path,
969 struct btrfs_root *root,
970 struct btrfs_delayed_node *node)
972 struct btrfs_delayed_item *curr, *prev;
973 unsigned int nofs_flag;
977 mutex_lock(&node->mutex);
978 curr = __btrfs_first_delayed_deletion_item(node);
982 nofs_flag = memalloc_nofs_save();
983 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
984 memalloc_nofs_restore(nofs_flag);
989 * can't find the item which the node points to, so this node
990 * is invalid, just drop it.
993 curr = __btrfs_next_delayed_item(prev);
994 btrfs_release_delayed_item(prev);
996 btrfs_release_path(path);
998 mutex_unlock(&node->mutex);
1004 btrfs_batch_delete_items(trans, root, path, curr);
1005 btrfs_release_path(path);
1006 mutex_unlock(&node->mutex);
1010 btrfs_release_path(path);
1011 mutex_unlock(&node->mutex);
1015 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1017 struct btrfs_delayed_root *delayed_root;
1020 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1021 BUG_ON(!delayed_node->root);
1022 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1023 delayed_node->count--;
1025 delayed_root = delayed_node->root->fs_info->delayed_root;
1026 finish_one_item(delayed_root);
1030 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1032 struct btrfs_delayed_root *delayed_root;
1034 ASSERT(delayed_node->root);
1035 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1036 delayed_node->count--;
1038 delayed_root = delayed_node->root->fs_info->delayed_root;
1039 finish_one_item(delayed_root);
1042 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1043 struct btrfs_root *root,
1044 struct btrfs_path *path,
1045 struct btrfs_delayed_node *node)
1047 struct btrfs_fs_info *fs_info = root->fs_info;
1048 struct btrfs_key key;
1049 struct btrfs_inode_item *inode_item;
1050 struct extent_buffer *leaf;
1051 unsigned int nofs_flag;
1055 key.objectid = node->inode_id;
1056 key.type = BTRFS_INODE_ITEM_KEY;
1059 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1064 nofs_flag = memalloc_nofs_save();
1065 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1066 memalloc_nofs_restore(nofs_flag);
1072 leaf = path->nodes[0];
1073 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1074 struct btrfs_inode_item);
1075 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1076 sizeof(struct btrfs_inode_item));
1077 btrfs_mark_buffer_dirty(leaf);
1079 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1083 if (path->slots[0] >= btrfs_header_nritems(leaf))
1086 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1087 if (key.objectid != node->inode_id)
1090 if (key.type != BTRFS_INODE_REF_KEY &&
1091 key.type != BTRFS_INODE_EXTREF_KEY)
1095 * Delayed iref deletion is for the inode who has only one link,
1096 * so there is only one iref. The case that several irefs are
1097 * in the same item doesn't exist.
1099 btrfs_del_item(trans, root, path);
1101 btrfs_release_delayed_iref(node);
1103 btrfs_release_path(path);
1105 btrfs_delayed_inode_release_metadata(fs_info, node);
1106 btrfs_release_delayed_inode(node);
1109 * If we fail to update the delayed inode we need to abort the
1110 * transaction, because we could leave the inode with the improper
1113 if (ret && ret != -ENOENT)
1114 btrfs_abort_transaction(trans, ret);
1119 btrfs_release_path(path);
1121 key.type = BTRFS_INODE_EXTREF_KEY;
1124 nofs_flag = memalloc_nofs_save();
1125 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1126 memalloc_nofs_restore(nofs_flag);
1132 leaf = path->nodes[0];
1137 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1138 struct btrfs_root *root,
1139 struct btrfs_path *path,
1140 struct btrfs_delayed_node *node)
1144 mutex_lock(&node->mutex);
1145 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1146 mutex_unlock(&node->mutex);
1150 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1151 mutex_unlock(&node->mutex);
1156 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1157 struct btrfs_path *path,
1158 struct btrfs_delayed_node *node)
1162 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1166 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1170 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1175 * Called when committing the transaction.
1176 * Returns 0 on success.
1177 * Returns < 0 on error and returns with an aborted transaction with any
1178 * outstanding delayed items cleaned up.
1180 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1181 struct btrfs_fs_info *fs_info, int nr)
1183 struct btrfs_delayed_root *delayed_root;
1184 struct btrfs_delayed_node *curr_node, *prev_node;
1185 struct btrfs_path *path;
1186 struct btrfs_block_rsv *block_rsv;
1188 bool count = (nr > 0);
1193 path = btrfs_alloc_path();
1196 path->leave_spinning = 1;
1198 block_rsv = trans->block_rsv;
1199 trans->block_rsv = &fs_info->delayed_block_rsv;
1201 delayed_root = fs_info->delayed_root;
1203 curr_node = btrfs_first_delayed_node(delayed_root);
1204 while (curr_node && (!count || (count && nr--))) {
1205 ret = __btrfs_commit_inode_delayed_items(trans, path,
1208 btrfs_abort_transaction(trans, ret);
1212 prev_node = curr_node;
1213 curr_node = btrfs_next_delayed_node(curr_node);
1215 * See the comment below about releasing path before releasing
1216 * node. If the commit of delayed items was successful the path
1217 * should always be released, but in case of an error, it may
1218 * point to locked extent buffers (a leaf at the very least).
1220 ASSERT(path->nodes[0] == NULL);
1221 btrfs_release_delayed_node(prev_node);
1225 * Release the path to avoid a potential deadlock and lockdep splat when
1226 * releasing the delayed node, as that requires taking the delayed node's
1227 * mutex. If another task starts running delayed items before we take
1228 * the mutex, it will first lock the mutex and then it may try to lock
1229 * the same btree path (leaf).
1231 btrfs_free_path(path);
1234 btrfs_release_delayed_node(curr_node);
1235 trans->block_rsv = block_rsv;
1240 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1241 struct btrfs_fs_info *fs_info)
1243 return __btrfs_run_delayed_items(trans, fs_info, -1);
1246 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1247 struct btrfs_fs_info *fs_info, int nr)
1249 return __btrfs_run_delayed_items(trans, fs_info, nr);
1252 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1253 struct btrfs_inode *inode)
1255 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1256 struct btrfs_path *path;
1257 struct btrfs_block_rsv *block_rsv;
1263 mutex_lock(&delayed_node->mutex);
1264 if (!delayed_node->count) {
1265 mutex_unlock(&delayed_node->mutex);
1266 btrfs_release_delayed_node(delayed_node);
1269 mutex_unlock(&delayed_node->mutex);
1271 path = btrfs_alloc_path();
1273 btrfs_release_delayed_node(delayed_node);
1276 path->leave_spinning = 1;
1278 block_rsv = trans->block_rsv;
1279 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1281 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1283 btrfs_release_delayed_node(delayed_node);
1284 btrfs_free_path(path);
1285 trans->block_rsv = block_rsv;
1290 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1292 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1293 struct btrfs_trans_handle *trans;
1294 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1295 struct btrfs_path *path;
1296 struct btrfs_block_rsv *block_rsv;
1302 mutex_lock(&delayed_node->mutex);
1303 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1304 mutex_unlock(&delayed_node->mutex);
1305 btrfs_release_delayed_node(delayed_node);
1308 mutex_unlock(&delayed_node->mutex);
1310 trans = btrfs_join_transaction(delayed_node->root);
1311 if (IS_ERR(trans)) {
1312 ret = PTR_ERR(trans);
1316 path = btrfs_alloc_path();
1321 path->leave_spinning = 1;
1323 block_rsv = trans->block_rsv;
1324 trans->block_rsv = &fs_info->delayed_block_rsv;
1326 mutex_lock(&delayed_node->mutex);
1327 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1328 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1329 path, delayed_node);
1332 mutex_unlock(&delayed_node->mutex);
1334 btrfs_free_path(path);
1335 trans->block_rsv = block_rsv;
1337 btrfs_end_transaction(trans);
1338 btrfs_btree_balance_dirty(fs_info);
1340 btrfs_release_delayed_node(delayed_node);
1345 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1347 struct btrfs_delayed_node *delayed_node;
1349 delayed_node = READ_ONCE(inode->delayed_node);
1353 inode->delayed_node = NULL;
1354 btrfs_release_delayed_node(delayed_node);
1357 struct btrfs_async_delayed_work {
1358 struct btrfs_delayed_root *delayed_root;
1360 struct btrfs_work work;
1363 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1365 struct btrfs_async_delayed_work *async_work;
1366 struct btrfs_delayed_root *delayed_root;
1367 struct btrfs_trans_handle *trans;
1368 struct btrfs_path *path;
1369 struct btrfs_delayed_node *delayed_node = NULL;
1370 struct btrfs_root *root;
1371 struct btrfs_block_rsv *block_rsv;
1374 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1375 delayed_root = async_work->delayed_root;
1377 path = btrfs_alloc_path();
1382 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1385 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1389 path->leave_spinning = 1;
1390 root = delayed_node->root;
1392 trans = btrfs_join_transaction(root);
1396 block_rsv = trans->block_rsv;
1397 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1399 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1401 trans->block_rsv = block_rsv;
1402 btrfs_end_transaction(trans);
1403 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1406 btrfs_release_path(path);
1409 btrfs_release_prepared_delayed_node(delayed_node);
1410 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1411 total_done < async_work->nr)
1415 btrfs_free_path(path);
1417 wake_up(&delayed_root->wait);
1422 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1423 struct btrfs_fs_info *fs_info, int nr)
1425 struct btrfs_async_delayed_work *async_work;
1427 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1428 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1431 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1435 async_work->delayed_root = delayed_root;
1436 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1437 btrfs_async_run_delayed_root, NULL, NULL);
1438 async_work->nr = nr;
1440 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1444 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1446 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1449 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1451 int val = atomic_read(&delayed_root->items_seq);
1453 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1456 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1462 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1464 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1466 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1469 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1473 seq = atomic_read(&delayed_root->items_seq);
1475 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1479 wait_event_interruptible(delayed_root->wait,
1480 could_end_wait(delayed_root, seq));
1484 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1487 /* Will return 0 or -ENOMEM */
1488 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1489 struct btrfs_fs_info *fs_info,
1490 const char *name, int name_len,
1491 struct btrfs_inode *dir,
1492 struct btrfs_disk_key *disk_key, u8 type,
1495 struct btrfs_delayed_node *delayed_node;
1496 struct btrfs_delayed_item *delayed_item;
1497 struct btrfs_dir_item *dir_item;
1500 delayed_node = btrfs_get_or_create_delayed_node(dir);
1501 if (IS_ERR(delayed_node))
1502 return PTR_ERR(delayed_node);
1504 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1505 if (!delayed_item) {
1510 delayed_item->key.objectid = btrfs_ino(dir);
1511 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1512 delayed_item->key.offset = index;
1514 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1515 dir_item->location = *disk_key;
1516 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1517 btrfs_set_stack_dir_data_len(dir_item, 0);
1518 btrfs_set_stack_dir_name_len(dir_item, name_len);
1519 btrfs_set_stack_dir_type(dir_item, type);
1520 memcpy((char *)(dir_item + 1), name, name_len);
1522 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1524 * we have reserved enough space when we start a new transaction,
1525 * so reserving metadata failure is impossible
1530 mutex_lock(&delayed_node->mutex);
1531 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1532 if (unlikely(ret)) {
1534 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1535 name_len, name, delayed_node->root->objectid,
1536 delayed_node->inode_id, ret);
1539 mutex_unlock(&delayed_node->mutex);
1542 btrfs_release_delayed_node(delayed_node);
1546 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1547 struct btrfs_delayed_node *node,
1548 struct btrfs_key *key)
1550 struct btrfs_delayed_item *item;
1552 mutex_lock(&node->mutex);
1553 item = __btrfs_lookup_delayed_insertion_item(node, key);
1555 mutex_unlock(&node->mutex);
1559 btrfs_delayed_item_release_metadata(fs_info, item);
1560 btrfs_release_delayed_item(item);
1561 mutex_unlock(&node->mutex);
1565 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1566 struct btrfs_fs_info *fs_info,
1567 struct btrfs_inode *dir, u64 index)
1569 struct btrfs_delayed_node *node;
1570 struct btrfs_delayed_item *item;
1571 struct btrfs_key item_key;
1574 node = btrfs_get_or_create_delayed_node(dir);
1576 return PTR_ERR(node);
1578 item_key.objectid = btrfs_ino(dir);
1579 item_key.type = BTRFS_DIR_INDEX_KEY;
1580 item_key.offset = index;
1582 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1586 item = btrfs_alloc_delayed_item(0);
1592 item->key = item_key;
1594 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1596 * we have reserved enough space when we start a new transaction,
1597 * so reserving metadata failure is impossible.
1601 mutex_lock(&node->mutex);
1602 ret = __btrfs_add_delayed_deletion_item(node, item);
1603 if (unlikely(ret)) {
1605 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1606 index, node->root->objectid, node->inode_id, ret);
1609 mutex_unlock(&node->mutex);
1611 btrfs_release_delayed_node(node);
1615 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1617 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1623 * Since we have held i_mutex of this directory, it is impossible that
1624 * a new directory index is added into the delayed node and index_cnt
1625 * is updated now. So we needn't lock the delayed node.
1627 if (!delayed_node->index_cnt) {
1628 btrfs_release_delayed_node(delayed_node);
1632 inode->index_cnt = delayed_node->index_cnt;
1633 btrfs_release_delayed_node(delayed_node);
1637 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1638 struct list_head *ins_list,
1639 struct list_head *del_list)
1641 struct btrfs_delayed_node *delayed_node;
1642 struct btrfs_delayed_item *item;
1644 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1649 * We can only do one readdir with delayed items at a time because of
1650 * item->readdir_list.
1652 inode_unlock_shared(inode);
1655 mutex_lock(&delayed_node->mutex);
1656 item = __btrfs_first_delayed_insertion_item(delayed_node);
1658 refcount_inc(&item->refs);
1659 list_add_tail(&item->readdir_list, ins_list);
1660 item = __btrfs_next_delayed_item(item);
1663 item = __btrfs_first_delayed_deletion_item(delayed_node);
1665 refcount_inc(&item->refs);
1666 list_add_tail(&item->readdir_list, del_list);
1667 item = __btrfs_next_delayed_item(item);
1669 mutex_unlock(&delayed_node->mutex);
1671 * This delayed node is still cached in the btrfs inode, so refs
1672 * must be > 1 now, and we needn't check it is going to be freed
1675 * Besides that, this function is used to read dir, we do not
1676 * insert/delete delayed items in this period. So we also needn't
1677 * requeue or dequeue this delayed node.
1679 refcount_dec(&delayed_node->refs);
1684 void btrfs_readdir_put_delayed_items(struct inode *inode,
1685 struct list_head *ins_list,
1686 struct list_head *del_list)
1688 struct btrfs_delayed_item *curr, *next;
1690 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1691 list_del(&curr->readdir_list);
1692 if (refcount_dec_and_test(&curr->refs))
1696 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1697 list_del(&curr->readdir_list);
1698 if (refcount_dec_and_test(&curr->refs))
1703 * The VFS is going to do up_read(), so we need to downgrade back to a
1706 downgrade_write(&inode->i_rwsem);
1709 int btrfs_should_delete_dir_index(struct list_head *del_list,
1712 struct btrfs_delayed_item *curr;
1715 list_for_each_entry(curr, del_list, readdir_list) {
1716 if (curr->key.offset > index)
1718 if (curr->key.offset == index) {
1727 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1730 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1731 struct list_head *ins_list)
1733 struct btrfs_dir_item *di;
1734 struct btrfs_delayed_item *curr, *next;
1735 struct btrfs_key location;
1739 unsigned char d_type;
1741 if (list_empty(ins_list))
1745 * Changing the data of the delayed item is impossible. So
1746 * we needn't lock them. And we have held i_mutex of the
1747 * directory, nobody can delete any directory indexes now.
1749 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1750 list_del(&curr->readdir_list);
1752 if (curr->key.offset < ctx->pos) {
1753 if (refcount_dec_and_test(&curr->refs))
1758 ctx->pos = curr->key.offset;
1760 di = (struct btrfs_dir_item *)curr->data;
1761 name = (char *)(di + 1);
1762 name_len = btrfs_stack_dir_name_len(di);
1764 d_type = btrfs_filetype_table[di->type];
1765 btrfs_disk_key_to_cpu(&location, &di->location);
1767 over = !dir_emit(ctx, name, name_len,
1768 location.objectid, d_type);
1770 if (refcount_dec_and_test(&curr->refs))
1780 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1781 struct btrfs_inode_item *inode_item,
1782 struct inode *inode)
1784 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1785 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1786 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1787 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1788 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1789 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1790 btrfs_set_stack_inode_generation(inode_item,
1791 BTRFS_I(inode)->generation);
1792 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1793 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1794 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1795 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1796 btrfs_set_stack_inode_block_group(inode_item, 0);
1798 btrfs_set_stack_timespec_sec(&inode_item->atime,
1799 inode->i_atime.tv_sec);
1800 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1801 inode->i_atime.tv_nsec);
1803 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1804 inode->i_mtime.tv_sec);
1805 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1806 inode->i_mtime.tv_nsec);
1808 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1809 inode->i_ctime.tv_sec);
1810 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1811 inode->i_ctime.tv_nsec);
1813 btrfs_set_stack_timespec_sec(&inode_item->otime,
1814 BTRFS_I(inode)->i_otime.tv_sec);
1815 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1816 BTRFS_I(inode)->i_otime.tv_nsec);
1819 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1821 struct btrfs_delayed_node *delayed_node;
1822 struct btrfs_inode_item *inode_item;
1824 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1828 mutex_lock(&delayed_node->mutex);
1829 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1830 mutex_unlock(&delayed_node->mutex);
1831 btrfs_release_delayed_node(delayed_node);
1835 inode_item = &delayed_node->inode_item;
1837 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1838 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1839 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1840 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1841 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1842 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1843 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1844 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1846 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1848 *rdev = btrfs_stack_inode_rdev(inode_item);
1849 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1851 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1852 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1854 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1855 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1857 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1858 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1860 BTRFS_I(inode)->i_otime.tv_sec =
1861 btrfs_stack_timespec_sec(&inode_item->otime);
1862 BTRFS_I(inode)->i_otime.tv_nsec =
1863 btrfs_stack_timespec_nsec(&inode_item->otime);
1865 inode->i_generation = BTRFS_I(inode)->generation;
1866 BTRFS_I(inode)->index_cnt = (u64)-1;
1868 mutex_unlock(&delayed_node->mutex);
1869 btrfs_release_delayed_node(delayed_node);
1873 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1874 struct btrfs_root *root, struct inode *inode)
1876 struct btrfs_delayed_node *delayed_node;
1879 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1880 if (IS_ERR(delayed_node))
1881 return PTR_ERR(delayed_node);
1883 mutex_lock(&delayed_node->mutex);
1884 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1885 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1889 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1894 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1895 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1896 delayed_node->count++;
1897 atomic_inc(&root->fs_info->delayed_root->items);
1899 mutex_unlock(&delayed_node->mutex);
1900 btrfs_release_delayed_node(delayed_node);
1904 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1906 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1907 struct btrfs_delayed_node *delayed_node;
1910 * we don't do delayed inode updates during log recovery because it
1911 * leads to enospc problems. This means we also can't do
1912 * delayed inode refs
1914 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1917 delayed_node = btrfs_get_or_create_delayed_node(inode);
1918 if (IS_ERR(delayed_node))
1919 return PTR_ERR(delayed_node);
1922 * We don't reserve space for inode ref deletion is because:
1923 * - We ONLY do async inode ref deletion for the inode who has only
1924 * one link(i_nlink == 1), it means there is only one inode ref.
1925 * And in most case, the inode ref and the inode item are in the
1926 * same leaf, and we will deal with them at the same time.
1927 * Since we are sure we will reserve the space for the inode item,
1928 * it is unnecessary to reserve space for inode ref deletion.
1929 * - If the inode ref and the inode item are not in the same leaf,
1930 * We also needn't worry about enospc problem, because we reserve
1931 * much more space for the inode update than it needs.
1932 * - At the worst, we can steal some space from the global reservation.
1935 mutex_lock(&delayed_node->mutex);
1936 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1939 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1940 delayed_node->count++;
1941 atomic_inc(&fs_info->delayed_root->items);
1943 mutex_unlock(&delayed_node->mutex);
1944 btrfs_release_delayed_node(delayed_node);
1948 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1950 struct btrfs_root *root = delayed_node->root;
1951 struct btrfs_fs_info *fs_info = root->fs_info;
1952 struct btrfs_delayed_item *curr_item, *prev_item;
1954 mutex_lock(&delayed_node->mutex);
1955 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1957 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1958 prev_item = curr_item;
1959 curr_item = __btrfs_next_delayed_item(prev_item);
1960 btrfs_release_delayed_item(prev_item);
1963 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1965 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1966 prev_item = curr_item;
1967 curr_item = __btrfs_next_delayed_item(prev_item);
1968 btrfs_release_delayed_item(prev_item);
1971 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1972 btrfs_release_delayed_iref(delayed_node);
1974 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1975 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1976 btrfs_release_delayed_inode(delayed_node);
1978 mutex_unlock(&delayed_node->mutex);
1981 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1983 struct btrfs_delayed_node *delayed_node;
1985 delayed_node = btrfs_get_delayed_node(inode);
1989 __btrfs_kill_delayed_node(delayed_node);
1990 btrfs_release_delayed_node(delayed_node);
1993 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1996 struct btrfs_delayed_node *delayed_nodes[8];
2000 spin_lock(&root->inode_lock);
2001 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2002 (void **)delayed_nodes, inode_id,
2003 ARRAY_SIZE(delayed_nodes));
2005 spin_unlock(&root->inode_lock);
2009 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2010 for (i = 0; i < n; i++) {
2012 * Don't increase refs in case the node is dead and
2013 * about to be removed from the tree in the loop below
2015 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2016 delayed_nodes[i] = NULL;
2018 spin_unlock(&root->inode_lock);
2020 for (i = 0; i < n; i++) {
2021 if (!delayed_nodes[i])
2023 __btrfs_kill_delayed_node(delayed_nodes[i]);
2024 btrfs_release_delayed_node(delayed_nodes[i]);
2029 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2031 struct btrfs_delayed_node *curr_node, *prev_node;
2033 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2035 __btrfs_kill_delayed_node(curr_node);
2037 prev_node = curr_node;
2038 curr_node = btrfs_next_delayed_node(curr_node);
2039 btrfs_release_delayed_node(prev_node);