GNU Linux-libre 6.8.9-gnu
[releases.git] / fs / btrfs / delayed-inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "ctree.h"
10 #include "fs.h"
11 #include "messages.h"
12 #include "misc.h"
13 #include "delayed-inode.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "qgroup.h"
17 #include "locking.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
22
23 #define BTRFS_DELAYED_WRITEBACK         512
24 #define BTRFS_DELAYED_BACKGROUND        128
25 #define BTRFS_DELAYED_BATCH             16
26
27 static struct kmem_cache *delayed_node_cache;
28
29 int __init btrfs_delayed_inode_init(void)
30 {
31         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
32                                         sizeof(struct btrfs_delayed_node),
33                                         0,
34                                         SLAB_MEM_SPREAD,
35                                         NULL);
36         if (!delayed_node_cache)
37                 return -ENOMEM;
38         return 0;
39 }
40
41 void __cold btrfs_delayed_inode_exit(void)
42 {
43         kmem_cache_destroy(delayed_node_cache);
44 }
45
46 static inline void btrfs_init_delayed_node(
47                                 struct btrfs_delayed_node *delayed_node,
48                                 struct btrfs_root *root, u64 inode_id)
49 {
50         delayed_node->root = root;
51         delayed_node->inode_id = inode_id;
52         refcount_set(&delayed_node->refs, 0);
53         delayed_node->ins_root = RB_ROOT_CACHED;
54         delayed_node->del_root = RB_ROOT_CACHED;
55         mutex_init(&delayed_node->mutex);
56         INIT_LIST_HEAD(&delayed_node->n_list);
57         INIT_LIST_HEAD(&delayed_node->p_list);
58 }
59
60 static struct btrfs_delayed_node *btrfs_get_delayed_node(
61                 struct btrfs_inode *btrfs_inode)
62 {
63         struct btrfs_root *root = btrfs_inode->root;
64         u64 ino = btrfs_ino(btrfs_inode);
65         struct btrfs_delayed_node *node;
66
67         node = READ_ONCE(btrfs_inode->delayed_node);
68         if (node) {
69                 refcount_inc(&node->refs);
70                 return node;
71         }
72
73         spin_lock(&root->inode_lock);
74         node = xa_load(&root->delayed_nodes, ino);
75
76         if (node) {
77                 if (btrfs_inode->delayed_node) {
78                         refcount_inc(&node->refs);      /* can be accessed */
79                         BUG_ON(btrfs_inode->delayed_node != node);
80                         spin_unlock(&root->inode_lock);
81                         return node;
82                 }
83
84                 /*
85                  * It's possible that we're racing into the middle of removing
86                  * this node from the xarray.  In this case, the refcount
87                  * was zero and it should never go back to one.  Just return
88                  * NULL like it was never in the xarray at all; our release
89                  * function is in the process of removing it.
90                  *
91                  * Some implementations of refcount_inc refuse to bump the
92                  * refcount once it has hit zero.  If we don't do this dance
93                  * here, refcount_inc() may decide to just WARN_ONCE() instead
94                  * of actually bumping the refcount.
95                  *
96                  * If this node is properly in the xarray, we want to bump the
97                  * refcount twice, once for the inode and once for this get
98                  * operation.
99                  */
100                 if (refcount_inc_not_zero(&node->refs)) {
101                         refcount_inc(&node->refs);
102                         btrfs_inode->delayed_node = node;
103                 } else {
104                         node = NULL;
105                 }
106
107                 spin_unlock(&root->inode_lock);
108                 return node;
109         }
110         spin_unlock(&root->inode_lock);
111
112         return NULL;
113 }
114
115 /* Will return either the node or PTR_ERR(-ENOMEM) */
116 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
117                 struct btrfs_inode *btrfs_inode)
118 {
119         struct btrfs_delayed_node *node;
120         struct btrfs_root *root = btrfs_inode->root;
121         u64 ino = btrfs_ino(btrfs_inode);
122         int ret;
123         void *ptr;
124
125 again:
126         node = btrfs_get_delayed_node(btrfs_inode);
127         if (node)
128                 return node;
129
130         node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
131         if (!node)
132                 return ERR_PTR(-ENOMEM);
133         btrfs_init_delayed_node(node, root, ino);
134
135         /* Cached in the inode and can be accessed. */
136         refcount_set(&node->refs, 2);
137
138         /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
139         ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
140         if (ret == -ENOMEM) {
141                 kmem_cache_free(delayed_node_cache, node);
142                 return ERR_PTR(-ENOMEM);
143         }
144         spin_lock(&root->inode_lock);
145         ptr = xa_load(&root->delayed_nodes, ino);
146         if (ptr) {
147                 /* Somebody inserted it, go back and read it. */
148                 spin_unlock(&root->inode_lock);
149                 kmem_cache_free(delayed_node_cache, node);
150                 node = NULL;
151                 goto again;
152         }
153         ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
154         ASSERT(xa_err(ptr) != -EINVAL);
155         ASSERT(xa_err(ptr) != -ENOMEM);
156         ASSERT(ptr == NULL);
157         btrfs_inode->delayed_node = node;
158         spin_unlock(&root->inode_lock);
159
160         return node;
161 }
162
163 /*
164  * Call it when holding delayed_node->mutex
165  *
166  * If mod = 1, add this node into the prepared list.
167  */
168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
169                                      struct btrfs_delayed_node *node,
170                                      int mod)
171 {
172         spin_lock(&root->lock);
173         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
174                 if (!list_empty(&node->p_list))
175                         list_move_tail(&node->p_list, &root->prepare_list);
176                 else if (mod)
177                         list_add_tail(&node->p_list, &root->prepare_list);
178         } else {
179                 list_add_tail(&node->n_list, &root->node_list);
180                 list_add_tail(&node->p_list, &root->prepare_list);
181                 refcount_inc(&node->refs);      /* inserted into list */
182                 root->nodes++;
183                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
184         }
185         spin_unlock(&root->lock);
186 }
187
188 /* Call it when holding delayed_node->mutex */
189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
190                                        struct btrfs_delayed_node *node)
191 {
192         spin_lock(&root->lock);
193         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
194                 root->nodes--;
195                 refcount_dec(&node->refs);      /* not in the list */
196                 list_del_init(&node->n_list);
197                 if (!list_empty(&node->p_list))
198                         list_del_init(&node->p_list);
199                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
200         }
201         spin_unlock(&root->lock);
202 }
203
204 static struct btrfs_delayed_node *btrfs_first_delayed_node(
205                         struct btrfs_delayed_root *delayed_root)
206 {
207         struct list_head *p;
208         struct btrfs_delayed_node *node = NULL;
209
210         spin_lock(&delayed_root->lock);
211         if (list_empty(&delayed_root->node_list))
212                 goto out;
213
214         p = delayed_root->node_list.next;
215         node = list_entry(p, struct btrfs_delayed_node, n_list);
216         refcount_inc(&node->refs);
217 out:
218         spin_unlock(&delayed_root->lock);
219
220         return node;
221 }
222
223 static struct btrfs_delayed_node *btrfs_next_delayed_node(
224                                                 struct btrfs_delayed_node *node)
225 {
226         struct btrfs_delayed_root *delayed_root;
227         struct list_head *p;
228         struct btrfs_delayed_node *next = NULL;
229
230         delayed_root = node->root->fs_info->delayed_root;
231         spin_lock(&delayed_root->lock);
232         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
233                 /* not in the list */
234                 if (list_empty(&delayed_root->node_list))
235                         goto out;
236                 p = delayed_root->node_list.next;
237         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
238                 goto out;
239         else
240                 p = node->n_list.next;
241
242         next = list_entry(p, struct btrfs_delayed_node, n_list);
243         refcount_inc(&next->refs);
244 out:
245         spin_unlock(&delayed_root->lock);
246
247         return next;
248 }
249
250 static void __btrfs_release_delayed_node(
251                                 struct btrfs_delayed_node *delayed_node,
252                                 int mod)
253 {
254         struct btrfs_delayed_root *delayed_root;
255
256         if (!delayed_node)
257                 return;
258
259         delayed_root = delayed_node->root->fs_info->delayed_root;
260
261         mutex_lock(&delayed_node->mutex);
262         if (delayed_node->count)
263                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
264         else
265                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
266         mutex_unlock(&delayed_node->mutex);
267
268         if (refcount_dec_and_test(&delayed_node->refs)) {
269                 struct btrfs_root *root = delayed_node->root;
270
271                 spin_lock(&root->inode_lock);
272                 /*
273                  * Once our refcount goes to zero, nobody is allowed to bump it
274                  * back up.  We can delete it now.
275                  */
276                 ASSERT(refcount_read(&delayed_node->refs) == 0);
277                 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
278                 spin_unlock(&root->inode_lock);
279                 kmem_cache_free(delayed_node_cache, delayed_node);
280         }
281 }
282
283 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
284 {
285         __btrfs_release_delayed_node(node, 0);
286 }
287
288 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
289                                         struct btrfs_delayed_root *delayed_root)
290 {
291         struct list_head *p;
292         struct btrfs_delayed_node *node = NULL;
293
294         spin_lock(&delayed_root->lock);
295         if (list_empty(&delayed_root->prepare_list))
296                 goto out;
297
298         p = delayed_root->prepare_list.next;
299         list_del_init(p);
300         node = list_entry(p, struct btrfs_delayed_node, p_list);
301         refcount_inc(&node->refs);
302 out:
303         spin_unlock(&delayed_root->lock);
304
305         return node;
306 }
307
308 static inline void btrfs_release_prepared_delayed_node(
309                                         struct btrfs_delayed_node *node)
310 {
311         __btrfs_release_delayed_node(node, 1);
312 }
313
314 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
315                                            struct btrfs_delayed_node *node,
316                                            enum btrfs_delayed_item_type type)
317 {
318         struct btrfs_delayed_item *item;
319
320         item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
321         if (item) {
322                 item->data_len = data_len;
323                 item->type = type;
324                 item->bytes_reserved = 0;
325                 item->delayed_node = node;
326                 RB_CLEAR_NODE(&item->rb_node);
327                 INIT_LIST_HEAD(&item->log_list);
328                 item->logged = false;
329                 refcount_set(&item->refs, 1);
330         }
331         return item;
332 }
333
334 /*
335  * Look up the delayed item by key.
336  *
337  * @delayed_node: pointer to the delayed node
338  * @index:        the dir index value to lookup (offset of a dir index key)
339  *
340  * Note: if we don't find the right item, we will return the prev item and
341  * the next item.
342  */
343 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
344                                 struct rb_root *root,
345                                 u64 index)
346 {
347         struct rb_node *node = root->rb_node;
348         struct btrfs_delayed_item *delayed_item = NULL;
349
350         while (node) {
351                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
352                                         rb_node);
353                 if (delayed_item->index < index)
354                         node = node->rb_right;
355                 else if (delayed_item->index > index)
356                         node = node->rb_left;
357                 else
358                         return delayed_item;
359         }
360
361         return NULL;
362 }
363
364 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
365                                     struct btrfs_delayed_item *ins)
366 {
367         struct rb_node **p, *node;
368         struct rb_node *parent_node = NULL;
369         struct rb_root_cached *root;
370         struct btrfs_delayed_item *item;
371         bool leftmost = true;
372
373         if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
374                 root = &delayed_node->ins_root;
375         else
376                 root = &delayed_node->del_root;
377
378         p = &root->rb_root.rb_node;
379         node = &ins->rb_node;
380
381         while (*p) {
382                 parent_node = *p;
383                 item = rb_entry(parent_node, struct btrfs_delayed_item,
384                                  rb_node);
385
386                 if (item->index < ins->index) {
387                         p = &(*p)->rb_right;
388                         leftmost = false;
389                 } else if (item->index > ins->index) {
390                         p = &(*p)->rb_left;
391                 } else {
392                         return -EEXIST;
393                 }
394         }
395
396         rb_link_node(node, parent_node, p);
397         rb_insert_color_cached(node, root, leftmost);
398
399         if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
400             ins->index >= delayed_node->index_cnt)
401                 delayed_node->index_cnt = ins->index + 1;
402
403         delayed_node->count++;
404         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
405         return 0;
406 }
407
408 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
409 {
410         int seq = atomic_inc_return(&delayed_root->items_seq);
411
412         /* atomic_dec_return implies a barrier */
413         if ((atomic_dec_return(&delayed_root->items) <
414             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
415                 cond_wake_up_nomb(&delayed_root->wait);
416 }
417
418 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
419 {
420         struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
421         struct rb_root_cached *root;
422         struct btrfs_delayed_root *delayed_root;
423
424         /* Not inserted, ignore it. */
425         if (RB_EMPTY_NODE(&delayed_item->rb_node))
426                 return;
427
428         /* If it's in a rbtree, then we need to have delayed node locked. */
429         lockdep_assert_held(&delayed_node->mutex);
430
431         delayed_root = delayed_node->root->fs_info->delayed_root;
432
433         BUG_ON(!delayed_root);
434
435         if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
436                 root = &delayed_node->ins_root;
437         else
438                 root = &delayed_node->del_root;
439
440         rb_erase_cached(&delayed_item->rb_node, root);
441         RB_CLEAR_NODE(&delayed_item->rb_node);
442         delayed_node->count--;
443
444         finish_one_item(delayed_root);
445 }
446
447 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
448 {
449         if (item) {
450                 __btrfs_remove_delayed_item(item);
451                 if (refcount_dec_and_test(&item->refs))
452                         kfree(item);
453         }
454 }
455
456 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
457                                         struct btrfs_delayed_node *delayed_node)
458 {
459         struct rb_node *p;
460         struct btrfs_delayed_item *item = NULL;
461
462         p = rb_first_cached(&delayed_node->ins_root);
463         if (p)
464                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
465
466         return item;
467 }
468
469 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
470                                         struct btrfs_delayed_node *delayed_node)
471 {
472         struct rb_node *p;
473         struct btrfs_delayed_item *item = NULL;
474
475         p = rb_first_cached(&delayed_node->del_root);
476         if (p)
477                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
478
479         return item;
480 }
481
482 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
483                                                 struct btrfs_delayed_item *item)
484 {
485         struct rb_node *p;
486         struct btrfs_delayed_item *next = NULL;
487
488         p = rb_next(&item->rb_node);
489         if (p)
490                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
491
492         return next;
493 }
494
495 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
496                                                struct btrfs_delayed_item *item)
497 {
498         struct btrfs_block_rsv *src_rsv;
499         struct btrfs_block_rsv *dst_rsv;
500         struct btrfs_fs_info *fs_info = trans->fs_info;
501         u64 num_bytes;
502         int ret;
503
504         if (!trans->bytes_reserved)
505                 return 0;
506
507         src_rsv = trans->block_rsv;
508         dst_rsv = &fs_info->delayed_block_rsv;
509
510         num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
511
512         /*
513          * Here we migrate space rsv from transaction rsv, since have already
514          * reserved space when starting a transaction.  So no need to reserve
515          * qgroup space here.
516          */
517         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
518         if (!ret) {
519                 trace_btrfs_space_reservation(fs_info, "delayed_item",
520                                               item->delayed_node->inode_id,
521                                               num_bytes, 1);
522                 /*
523                  * For insertions we track reserved metadata space by accounting
524                  * for the number of leaves that will be used, based on the delayed
525                  * node's curr_index_batch_size and index_item_leaves fields.
526                  */
527                 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
528                         item->bytes_reserved = num_bytes;
529         }
530
531         return ret;
532 }
533
534 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
535                                                 struct btrfs_delayed_item *item)
536 {
537         struct btrfs_block_rsv *rsv;
538         struct btrfs_fs_info *fs_info = root->fs_info;
539
540         if (!item->bytes_reserved)
541                 return;
542
543         rsv = &fs_info->delayed_block_rsv;
544         /*
545          * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
546          * to release/reserve qgroup space.
547          */
548         trace_btrfs_space_reservation(fs_info, "delayed_item",
549                                       item->delayed_node->inode_id,
550                                       item->bytes_reserved, 0);
551         btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
552 }
553
554 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
555                                               unsigned int num_leaves)
556 {
557         struct btrfs_fs_info *fs_info = node->root->fs_info;
558         const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
559
560         /* There are no space reservations during log replay, bail out. */
561         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
562                 return;
563
564         trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
565                                       bytes, 0);
566         btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
567 }
568
569 static int btrfs_delayed_inode_reserve_metadata(
570                                         struct btrfs_trans_handle *trans,
571                                         struct btrfs_root *root,
572                                         struct btrfs_delayed_node *node)
573 {
574         struct btrfs_fs_info *fs_info = root->fs_info;
575         struct btrfs_block_rsv *src_rsv;
576         struct btrfs_block_rsv *dst_rsv;
577         u64 num_bytes;
578         int ret;
579
580         src_rsv = trans->block_rsv;
581         dst_rsv = &fs_info->delayed_block_rsv;
582
583         num_bytes = btrfs_calc_metadata_size(fs_info, 1);
584
585         /*
586          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
587          * which doesn't reserve space for speed.  This is a problem since we
588          * still need to reserve space for this update, so try to reserve the
589          * space.
590          *
591          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
592          * we always reserve enough to update the inode item.
593          */
594         if (!src_rsv || (!trans->bytes_reserved &&
595                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
596                 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
597                                           BTRFS_QGROUP_RSV_META_PREALLOC, true);
598                 if (ret < 0)
599                         return ret;
600                 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
601                                           BTRFS_RESERVE_NO_FLUSH);
602                 /* NO_FLUSH could only fail with -ENOSPC */
603                 ASSERT(ret == 0 || ret == -ENOSPC);
604                 if (ret)
605                         btrfs_qgroup_free_meta_prealloc(root, num_bytes);
606         } else {
607                 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
608         }
609
610         if (!ret) {
611                 trace_btrfs_space_reservation(fs_info, "delayed_inode",
612                                               node->inode_id, num_bytes, 1);
613                 node->bytes_reserved = num_bytes;
614         }
615
616         return ret;
617 }
618
619 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
620                                                 struct btrfs_delayed_node *node,
621                                                 bool qgroup_free)
622 {
623         struct btrfs_block_rsv *rsv;
624
625         if (!node->bytes_reserved)
626                 return;
627
628         rsv = &fs_info->delayed_block_rsv;
629         trace_btrfs_space_reservation(fs_info, "delayed_inode",
630                                       node->inode_id, node->bytes_reserved, 0);
631         btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
632         if (qgroup_free)
633                 btrfs_qgroup_free_meta_prealloc(node->root,
634                                 node->bytes_reserved);
635         else
636                 btrfs_qgroup_convert_reserved_meta(node->root,
637                                 node->bytes_reserved);
638         node->bytes_reserved = 0;
639 }
640
641 /*
642  * Insert a single delayed item or a batch of delayed items, as many as possible
643  * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
644  * in the rbtree, and if there's a gap between two consecutive dir index items,
645  * then it means at some point we had delayed dir indexes to add but they got
646  * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
647  * into the subvolume tree. Dir index keys also have their offsets coming from a
648  * monotonically increasing counter, so we can't get new keys with an offset that
649  * fits within a gap between delayed dir index items.
650  */
651 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
652                                      struct btrfs_root *root,
653                                      struct btrfs_path *path,
654                                      struct btrfs_delayed_item *first_item)
655 {
656         struct btrfs_fs_info *fs_info = root->fs_info;
657         struct btrfs_delayed_node *node = first_item->delayed_node;
658         LIST_HEAD(item_list);
659         struct btrfs_delayed_item *curr;
660         struct btrfs_delayed_item *next;
661         const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
662         struct btrfs_item_batch batch;
663         struct btrfs_key first_key;
664         const u32 first_data_size = first_item->data_len;
665         int total_size;
666         char *ins_data = NULL;
667         int ret;
668         bool continuous_keys_only = false;
669
670         lockdep_assert_held(&node->mutex);
671
672         /*
673          * During normal operation the delayed index offset is continuously
674          * increasing, so we can batch insert all items as there will not be any
675          * overlapping keys in the tree.
676          *
677          * The exception to this is log replay, where we may have interleaved
678          * offsets in the tree, so our batch needs to be continuous keys only in
679          * order to ensure we do not end up with out of order items in our leaf.
680          */
681         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
682                 continuous_keys_only = true;
683
684         /*
685          * For delayed items to insert, we track reserved metadata bytes based
686          * on the number of leaves that we will use.
687          * See btrfs_insert_delayed_dir_index() and
688          * btrfs_delayed_item_reserve_metadata()).
689          */
690         ASSERT(first_item->bytes_reserved == 0);
691
692         list_add_tail(&first_item->tree_list, &item_list);
693         batch.total_data_size = first_data_size;
694         batch.nr = 1;
695         total_size = first_data_size + sizeof(struct btrfs_item);
696         curr = first_item;
697
698         while (true) {
699                 int next_size;
700
701                 next = __btrfs_next_delayed_item(curr);
702                 if (!next)
703                         break;
704
705                 /*
706                  * We cannot allow gaps in the key space if we're doing log
707                  * replay.
708                  */
709                 if (continuous_keys_only && (next->index != curr->index + 1))
710                         break;
711
712                 ASSERT(next->bytes_reserved == 0);
713
714                 next_size = next->data_len + sizeof(struct btrfs_item);
715                 if (total_size + next_size > max_size)
716                         break;
717
718                 list_add_tail(&next->tree_list, &item_list);
719                 batch.nr++;
720                 total_size += next_size;
721                 batch.total_data_size += next->data_len;
722                 curr = next;
723         }
724
725         if (batch.nr == 1) {
726                 first_key.objectid = node->inode_id;
727                 first_key.type = BTRFS_DIR_INDEX_KEY;
728                 first_key.offset = first_item->index;
729                 batch.keys = &first_key;
730                 batch.data_sizes = &first_data_size;
731         } else {
732                 struct btrfs_key *ins_keys;
733                 u32 *ins_sizes;
734                 int i = 0;
735
736                 ins_data = kmalloc(batch.nr * sizeof(u32) +
737                                    batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
738                 if (!ins_data) {
739                         ret = -ENOMEM;
740                         goto out;
741                 }
742                 ins_sizes = (u32 *)ins_data;
743                 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
744                 batch.keys = ins_keys;
745                 batch.data_sizes = ins_sizes;
746                 list_for_each_entry(curr, &item_list, tree_list) {
747                         ins_keys[i].objectid = node->inode_id;
748                         ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
749                         ins_keys[i].offset = curr->index;
750                         ins_sizes[i] = curr->data_len;
751                         i++;
752                 }
753         }
754
755         ret = btrfs_insert_empty_items(trans, root, path, &batch);
756         if (ret)
757                 goto out;
758
759         list_for_each_entry(curr, &item_list, tree_list) {
760                 char *data_ptr;
761
762                 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
763                 write_extent_buffer(path->nodes[0], &curr->data,
764                                     (unsigned long)data_ptr, curr->data_len);
765                 path->slots[0]++;
766         }
767
768         /*
769          * Now release our path before releasing the delayed items and their
770          * metadata reservations, so that we don't block other tasks for more
771          * time than needed.
772          */
773         btrfs_release_path(path);
774
775         ASSERT(node->index_item_leaves > 0);
776
777         /*
778          * For normal operations we will batch an entire leaf's worth of delayed
779          * items, so if there are more items to process we can decrement
780          * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
781          *
782          * However for log replay we may not have inserted an entire leaf's
783          * worth of items, we may have not had continuous items, so decrementing
784          * here would mess up the index_item_leaves accounting.  For this case
785          * only clean up the accounting when there are no items left.
786          */
787         if (next && !continuous_keys_only) {
788                 /*
789                  * We inserted one batch of items into a leaf a there are more
790                  * items to flush in a future batch, now release one unit of
791                  * metadata space from the delayed block reserve, corresponding
792                  * the leaf we just flushed to.
793                  */
794                 btrfs_delayed_item_release_leaves(node, 1);
795                 node->index_item_leaves--;
796         } else if (!next) {
797                 /*
798                  * There are no more items to insert. We can have a number of
799                  * reserved leaves > 1 here - this happens when many dir index
800                  * items are added and then removed before they are flushed (file
801                  * names with a very short life, never span a transaction). So
802                  * release all remaining leaves.
803                  */
804                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
805                 node->index_item_leaves = 0;
806         }
807
808         list_for_each_entry_safe(curr, next, &item_list, tree_list) {
809                 list_del(&curr->tree_list);
810                 btrfs_release_delayed_item(curr);
811         }
812 out:
813         kfree(ins_data);
814         return ret;
815 }
816
817 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
818                                       struct btrfs_path *path,
819                                       struct btrfs_root *root,
820                                       struct btrfs_delayed_node *node)
821 {
822         int ret = 0;
823
824         while (ret == 0) {
825                 struct btrfs_delayed_item *curr;
826
827                 mutex_lock(&node->mutex);
828                 curr = __btrfs_first_delayed_insertion_item(node);
829                 if (!curr) {
830                         mutex_unlock(&node->mutex);
831                         break;
832                 }
833                 ret = btrfs_insert_delayed_item(trans, root, path, curr);
834                 mutex_unlock(&node->mutex);
835         }
836
837         return ret;
838 }
839
840 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
841                                     struct btrfs_root *root,
842                                     struct btrfs_path *path,
843                                     struct btrfs_delayed_item *item)
844 {
845         const u64 ino = item->delayed_node->inode_id;
846         struct btrfs_fs_info *fs_info = root->fs_info;
847         struct btrfs_delayed_item *curr, *next;
848         struct extent_buffer *leaf = path->nodes[0];
849         LIST_HEAD(batch_list);
850         int nitems, slot, last_slot;
851         int ret;
852         u64 total_reserved_size = item->bytes_reserved;
853
854         ASSERT(leaf != NULL);
855
856         slot = path->slots[0];
857         last_slot = btrfs_header_nritems(leaf) - 1;
858         /*
859          * Our caller always gives us a path pointing to an existing item, so
860          * this can not happen.
861          */
862         ASSERT(slot <= last_slot);
863         if (WARN_ON(slot > last_slot))
864                 return -ENOENT;
865
866         nitems = 1;
867         curr = item;
868         list_add_tail(&curr->tree_list, &batch_list);
869
870         /*
871          * Keep checking if the next delayed item matches the next item in the
872          * leaf - if so, we can add it to the batch of items to delete from the
873          * leaf.
874          */
875         while (slot < last_slot) {
876                 struct btrfs_key key;
877
878                 next = __btrfs_next_delayed_item(curr);
879                 if (!next)
880                         break;
881
882                 slot++;
883                 btrfs_item_key_to_cpu(leaf, &key, slot);
884                 if (key.objectid != ino ||
885                     key.type != BTRFS_DIR_INDEX_KEY ||
886                     key.offset != next->index)
887                         break;
888                 nitems++;
889                 curr = next;
890                 list_add_tail(&curr->tree_list, &batch_list);
891                 total_reserved_size += curr->bytes_reserved;
892         }
893
894         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
895         if (ret)
896                 return ret;
897
898         /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
899         if (total_reserved_size > 0) {
900                 /*
901                  * Check btrfs_delayed_item_reserve_metadata() to see why we
902                  * don't need to release/reserve qgroup space.
903                  */
904                 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
905                                               total_reserved_size, 0);
906                 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
907                                         total_reserved_size, NULL);
908         }
909
910         list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
911                 list_del(&curr->tree_list);
912                 btrfs_release_delayed_item(curr);
913         }
914
915         return 0;
916 }
917
918 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
919                                       struct btrfs_path *path,
920                                       struct btrfs_root *root,
921                                       struct btrfs_delayed_node *node)
922 {
923         struct btrfs_key key;
924         int ret = 0;
925
926         key.objectid = node->inode_id;
927         key.type = BTRFS_DIR_INDEX_KEY;
928
929         while (ret == 0) {
930                 struct btrfs_delayed_item *item;
931
932                 mutex_lock(&node->mutex);
933                 item = __btrfs_first_delayed_deletion_item(node);
934                 if (!item) {
935                         mutex_unlock(&node->mutex);
936                         break;
937                 }
938
939                 key.offset = item->index;
940                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
941                 if (ret > 0) {
942                         /*
943                          * There's no matching item in the leaf. This means we
944                          * have already deleted this item in a past run of the
945                          * delayed items. We ignore errors when running delayed
946                          * items from an async context, through a work queue job
947                          * running btrfs_async_run_delayed_root(), and don't
948                          * release delayed items that failed to complete. This
949                          * is because we will retry later, and at transaction
950                          * commit time we always run delayed items and will
951                          * then deal with errors if they fail to run again.
952                          *
953                          * So just release delayed items for which we can't find
954                          * an item in the tree, and move to the next item.
955                          */
956                         btrfs_release_path(path);
957                         btrfs_release_delayed_item(item);
958                         ret = 0;
959                 } else if (ret == 0) {
960                         ret = btrfs_batch_delete_items(trans, root, path, item);
961                         btrfs_release_path(path);
962                 }
963
964                 /*
965                  * We unlock and relock on each iteration, this is to prevent
966                  * blocking other tasks for too long while we are being run from
967                  * the async context (work queue job). Those tasks are typically
968                  * running system calls like creat/mkdir/rename/unlink/etc which
969                  * need to add delayed items to this delayed node.
970                  */
971                 mutex_unlock(&node->mutex);
972         }
973
974         return ret;
975 }
976
977 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
978 {
979         struct btrfs_delayed_root *delayed_root;
980
981         if (delayed_node &&
982             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
983                 BUG_ON(!delayed_node->root);
984                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
985                 delayed_node->count--;
986
987                 delayed_root = delayed_node->root->fs_info->delayed_root;
988                 finish_one_item(delayed_root);
989         }
990 }
991
992 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
993 {
994
995         if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
996                 struct btrfs_delayed_root *delayed_root;
997
998                 ASSERT(delayed_node->root);
999                 delayed_node->count--;
1000
1001                 delayed_root = delayed_node->root->fs_info->delayed_root;
1002                 finish_one_item(delayed_root);
1003         }
1004 }
1005
1006 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007                                         struct btrfs_root *root,
1008                                         struct btrfs_path *path,
1009                                         struct btrfs_delayed_node *node)
1010 {
1011         struct btrfs_fs_info *fs_info = root->fs_info;
1012         struct btrfs_key key;
1013         struct btrfs_inode_item *inode_item;
1014         struct extent_buffer *leaf;
1015         int mod;
1016         int ret;
1017
1018         key.objectid = node->inode_id;
1019         key.type = BTRFS_INODE_ITEM_KEY;
1020         key.offset = 0;
1021
1022         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1023                 mod = -1;
1024         else
1025                 mod = 1;
1026
1027         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1028         if (ret > 0)
1029                 ret = -ENOENT;
1030         if (ret < 0)
1031                 goto out;
1032
1033         leaf = path->nodes[0];
1034         inode_item = btrfs_item_ptr(leaf, path->slots[0],
1035                                     struct btrfs_inode_item);
1036         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1037                             sizeof(struct btrfs_inode_item));
1038         btrfs_mark_buffer_dirty(trans, leaf);
1039
1040         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1041                 goto out;
1042
1043         /*
1044          * Now we're going to delete the INODE_REF/EXTREF, which should be the
1045          * only one ref left.  Check if the next item is an INODE_REF/EXTREF.
1046          *
1047          * But if we're the last item already, release and search for the last
1048          * INODE_REF/EXTREF.
1049          */
1050         if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1051                 key.objectid = node->inode_id;
1052                 key.type = BTRFS_INODE_EXTREF_KEY;
1053                 key.offset = (u64)-1;
1054
1055                 btrfs_release_path(path);
1056                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1057                 if (ret < 0)
1058                         goto err_out;
1059                 ASSERT(ret > 0);
1060                 ASSERT(path->slots[0] > 0);
1061                 ret = 0;
1062                 path->slots[0]--;
1063                 leaf = path->nodes[0];
1064         } else {
1065                 path->slots[0]++;
1066         }
1067         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1068         if (key.objectid != node->inode_id)
1069                 goto out;
1070         if (key.type != BTRFS_INODE_REF_KEY &&
1071             key.type != BTRFS_INODE_EXTREF_KEY)
1072                 goto out;
1073
1074         /*
1075          * Delayed iref deletion is for the inode who has only one link,
1076          * so there is only one iref. The case that several irefs are
1077          * in the same item doesn't exist.
1078          */
1079         ret = btrfs_del_item(trans, root, path);
1080 out:
1081         btrfs_release_delayed_iref(node);
1082         btrfs_release_path(path);
1083 err_out:
1084         btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1085         btrfs_release_delayed_inode(node);
1086
1087         /*
1088          * If we fail to update the delayed inode we need to abort the
1089          * transaction, because we could leave the inode with the improper
1090          * counts behind.
1091          */
1092         if (ret && ret != -ENOENT)
1093                 btrfs_abort_transaction(trans, ret);
1094
1095         return ret;
1096 }
1097
1098 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1099                                              struct btrfs_root *root,
1100                                              struct btrfs_path *path,
1101                                              struct btrfs_delayed_node *node)
1102 {
1103         int ret;
1104
1105         mutex_lock(&node->mutex);
1106         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1107                 mutex_unlock(&node->mutex);
1108                 return 0;
1109         }
1110
1111         ret = __btrfs_update_delayed_inode(trans, root, path, node);
1112         mutex_unlock(&node->mutex);
1113         return ret;
1114 }
1115
1116 static inline int
1117 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1118                                    struct btrfs_path *path,
1119                                    struct btrfs_delayed_node *node)
1120 {
1121         int ret;
1122
1123         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1124         if (ret)
1125                 return ret;
1126
1127         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1128         if (ret)
1129                 return ret;
1130
1131         ret = btrfs_record_root_in_trans(trans, node->root);
1132         if (ret)
1133                 return ret;
1134         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1135         return ret;
1136 }
1137
1138 /*
1139  * Called when committing the transaction.
1140  * Returns 0 on success.
1141  * Returns < 0 on error and returns with an aborted transaction with any
1142  * outstanding delayed items cleaned up.
1143  */
1144 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1145 {
1146         struct btrfs_fs_info *fs_info = trans->fs_info;
1147         struct btrfs_delayed_root *delayed_root;
1148         struct btrfs_delayed_node *curr_node, *prev_node;
1149         struct btrfs_path *path;
1150         struct btrfs_block_rsv *block_rsv;
1151         int ret = 0;
1152         bool count = (nr > 0);
1153
1154         if (TRANS_ABORTED(trans))
1155                 return -EIO;
1156
1157         path = btrfs_alloc_path();
1158         if (!path)
1159                 return -ENOMEM;
1160
1161         block_rsv = trans->block_rsv;
1162         trans->block_rsv = &fs_info->delayed_block_rsv;
1163
1164         delayed_root = fs_info->delayed_root;
1165
1166         curr_node = btrfs_first_delayed_node(delayed_root);
1167         while (curr_node && (!count || nr--)) {
1168                 ret = __btrfs_commit_inode_delayed_items(trans, path,
1169                                                          curr_node);
1170                 if (ret) {
1171                         btrfs_abort_transaction(trans, ret);
1172                         break;
1173                 }
1174
1175                 prev_node = curr_node;
1176                 curr_node = btrfs_next_delayed_node(curr_node);
1177                 /*
1178                  * See the comment below about releasing path before releasing
1179                  * node. If the commit of delayed items was successful the path
1180                  * should always be released, but in case of an error, it may
1181                  * point to locked extent buffers (a leaf at the very least).
1182                  */
1183                 ASSERT(path->nodes[0] == NULL);
1184                 btrfs_release_delayed_node(prev_node);
1185         }
1186
1187         /*
1188          * Release the path to avoid a potential deadlock and lockdep splat when
1189          * releasing the delayed node, as that requires taking the delayed node's
1190          * mutex. If another task starts running delayed items before we take
1191          * the mutex, it will first lock the mutex and then it may try to lock
1192          * the same btree path (leaf).
1193          */
1194         btrfs_free_path(path);
1195
1196         if (curr_node)
1197                 btrfs_release_delayed_node(curr_node);
1198         trans->block_rsv = block_rsv;
1199
1200         return ret;
1201 }
1202
1203 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1204 {
1205         return __btrfs_run_delayed_items(trans, -1);
1206 }
1207
1208 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1209 {
1210         return __btrfs_run_delayed_items(trans, nr);
1211 }
1212
1213 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1214                                      struct btrfs_inode *inode)
1215 {
1216         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1217         struct btrfs_path *path;
1218         struct btrfs_block_rsv *block_rsv;
1219         int ret;
1220
1221         if (!delayed_node)
1222                 return 0;
1223
1224         mutex_lock(&delayed_node->mutex);
1225         if (!delayed_node->count) {
1226                 mutex_unlock(&delayed_node->mutex);
1227                 btrfs_release_delayed_node(delayed_node);
1228                 return 0;
1229         }
1230         mutex_unlock(&delayed_node->mutex);
1231
1232         path = btrfs_alloc_path();
1233         if (!path) {
1234                 btrfs_release_delayed_node(delayed_node);
1235                 return -ENOMEM;
1236         }
1237
1238         block_rsv = trans->block_rsv;
1239         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1240
1241         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1242
1243         btrfs_release_delayed_node(delayed_node);
1244         btrfs_free_path(path);
1245         trans->block_rsv = block_rsv;
1246
1247         return ret;
1248 }
1249
1250 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1251 {
1252         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1253         struct btrfs_trans_handle *trans;
1254         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1255         struct btrfs_path *path;
1256         struct btrfs_block_rsv *block_rsv;
1257         int ret;
1258
1259         if (!delayed_node)
1260                 return 0;
1261
1262         mutex_lock(&delayed_node->mutex);
1263         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1264                 mutex_unlock(&delayed_node->mutex);
1265                 btrfs_release_delayed_node(delayed_node);
1266                 return 0;
1267         }
1268         mutex_unlock(&delayed_node->mutex);
1269
1270         trans = btrfs_join_transaction(delayed_node->root);
1271         if (IS_ERR(trans)) {
1272                 ret = PTR_ERR(trans);
1273                 goto out;
1274         }
1275
1276         path = btrfs_alloc_path();
1277         if (!path) {
1278                 ret = -ENOMEM;
1279                 goto trans_out;
1280         }
1281
1282         block_rsv = trans->block_rsv;
1283         trans->block_rsv = &fs_info->delayed_block_rsv;
1284
1285         mutex_lock(&delayed_node->mutex);
1286         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1287                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1288                                                    path, delayed_node);
1289         else
1290                 ret = 0;
1291         mutex_unlock(&delayed_node->mutex);
1292
1293         btrfs_free_path(path);
1294         trans->block_rsv = block_rsv;
1295 trans_out:
1296         btrfs_end_transaction(trans);
1297         btrfs_btree_balance_dirty(fs_info);
1298 out:
1299         btrfs_release_delayed_node(delayed_node);
1300
1301         return ret;
1302 }
1303
1304 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1305 {
1306         struct btrfs_delayed_node *delayed_node;
1307
1308         delayed_node = READ_ONCE(inode->delayed_node);
1309         if (!delayed_node)
1310                 return;
1311
1312         inode->delayed_node = NULL;
1313         btrfs_release_delayed_node(delayed_node);
1314 }
1315
1316 struct btrfs_async_delayed_work {
1317         struct btrfs_delayed_root *delayed_root;
1318         int nr;
1319         struct btrfs_work work;
1320 };
1321
1322 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1323 {
1324         struct btrfs_async_delayed_work *async_work;
1325         struct btrfs_delayed_root *delayed_root;
1326         struct btrfs_trans_handle *trans;
1327         struct btrfs_path *path;
1328         struct btrfs_delayed_node *delayed_node = NULL;
1329         struct btrfs_root *root;
1330         struct btrfs_block_rsv *block_rsv;
1331         int total_done = 0;
1332
1333         async_work = container_of(work, struct btrfs_async_delayed_work, work);
1334         delayed_root = async_work->delayed_root;
1335
1336         path = btrfs_alloc_path();
1337         if (!path)
1338                 goto out;
1339
1340         do {
1341                 if (atomic_read(&delayed_root->items) <
1342                     BTRFS_DELAYED_BACKGROUND / 2)
1343                         break;
1344
1345                 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1346                 if (!delayed_node)
1347                         break;
1348
1349                 root = delayed_node->root;
1350
1351                 trans = btrfs_join_transaction(root);
1352                 if (IS_ERR(trans)) {
1353                         btrfs_release_path(path);
1354                         btrfs_release_prepared_delayed_node(delayed_node);
1355                         total_done++;
1356                         continue;
1357                 }
1358
1359                 block_rsv = trans->block_rsv;
1360                 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1361
1362                 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1363
1364                 trans->block_rsv = block_rsv;
1365                 btrfs_end_transaction(trans);
1366                 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1367
1368                 btrfs_release_path(path);
1369                 btrfs_release_prepared_delayed_node(delayed_node);
1370                 total_done++;
1371
1372         } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1373                  || total_done < async_work->nr);
1374
1375         btrfs_free_path(path);
1376 out:
1377         wake_up(&delayed_root->wait);
1378         kfree(async_work);
1379 }
1380
1381
1382 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1383                                      struct btrfs_fs_info *fs_info, int nr)
1384 {
1385         struct btrfs_async_delayed_work *async_work;
1386
1387         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1388         if (!async_work)
1389                 return -ENOMEM;
1390
1391         async_work->delayed_root = delayed_root;
1392         btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1393         async_work->nr = nr;
1394
1395         btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1396         return 0;
1397 }
1398
1399 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1400 {
1401         WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1402 }
1403
1404 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1405 {
1406         int val = atomic_read(&delayed_root->items_seq);
1407
1408         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1409                 return 1;
1410
1411         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1412                 return 1;
1413
1414         return 0;
1415 }
1416
1417 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1418 {
1419         struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1420
1421         if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1422                 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1423                 return;
1424
1425         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1426                 int seq;
1427                 int ret;
1428
1429                 seq = atomic_read(&delayed_root->items_seq);
1430
1431                 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1432                 if (ret)
1433                         return;
1434
1435                 wait_event_interruptible(delayed_root->wait,
1436                                          could_end_wait(delayed_root, seq));
1437                 return;
1438         }
1439
1440         btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1441 }
1442
1443 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1444 {
1445         struct btrfs_fs_info *fs_info = trans->fs_info;
1446         const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1447
1448         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1449                 return;
1450
1451         /*
1452          * Adding the new dir index item does not require touching another
1453          * leaf, so we can release 1 unit of metadata that was previously
1454          * reserved when starting the transaction. This applies only to
1455          * the case where we had a transaction start and excludes the
1456          * transaction join case (when replaying log trees).
1457          */
1458         trace_btrfs_space_reservation(fs_info, "transaction",
1459                                       trans->transid, bytes, 0);
1460         btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1461         ASSERT(trans->bytes_reserved >= bytes);
1462         trans->bytes_reserved -= bytes;
1463 }
1464
1465 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1466 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1467                                    const char *name, int name_len,
1468                                    struct btrfs_inode *dir,
1469                                    struct btrfs_disk_key *disk_key, u8 flags,
1470                                    u64 index)
1471 {
1472         struct btrfs_fs_info *fs_info = trans->fs_info;
1473         const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1474         struct btrfs_delayed_node *delayed_node;
1475         struct btrfs_delayed_item *delayed_item;
1476         struct btrfs_dir_item *dir_item;
1477         bool reserve_leaf_space;
1478         u32 data_len;
1479         int ret;
1480
1481         delayed_node = btrfs_get_or_create_delayed_node(dir);
1482         if (IS_ERR(delayed_node))
1483                 return PTR_ERR(delayed_node);
1484
1485         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1486                                                 delayed_node,
1487                                                 BTRFS_DELAYED_INSERTION_ITEM);
1488         if (!delayed_item) {
1489                 ret = -ENOMEM;
1490                 goto release_node;
1491         }
1492
1493         delayed_item->index = index;
1494
1495         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1496         dir_item->location = *disk_key;
1497         btrfs_set_stack_dir_transid(dir_item, trans->transid);
1498         btrfs_set_stack_dir_data_len(dir_item, 0);
1499         btrfs_set_stack_dir_name_len(dir_item, name_len);
1500         btrfs_set_stack_dir_flags(dir_item, flags);
1501         memcpy((char *)(dir_item + 1), name, name_len);
1502
1503         data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1504
1505         mutex_lock(&delayed_node->mutex);
1506
1507         /*
1508          * First attempt to insert the delayed item. This is to make the error
1509          * handling path simpler in case we fail (-EEXIST). There's no risk of
1510          * any other task coming in and running the delayed item before we do
1511          * the metadata space reservation below, because we are holding the
1512          * delayed node's mutex and that mutex must also be locked before the
1513          * node's delayed items can be run.
1514          */
1515         ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1516         if (unlikely(ret)) {
1517                 btrfs_err(trans->fs_info,
1518 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1519                           name_len, name, index, btrfs_root_id(delayed_node->root),
1520                           delayed_node->inode_id, dir->index_cnt,
1521                           delayed_node->index_cnt, ret);
1522                 btrfs_release_delayed_item(delayed_item);
1523                 btrfs_release_dir_index_item_space(trans);
1524                 mutex_unlock(&delayed_node->mutex);
1525                 goto release_node;
1526         }
1527
1528         if (delayed_node->index_item_leaves == 0 ||
1529             delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1530                 delayed_node->curr_index_batch_size = data_len;
1531                 reserve_leaf_space = true;
1532         } else {
1533                 delayed_node->curr_index_batch_size += data_len;
1534                 reserve_leaf_space = false;
1535         }
1536
1537         if (reserve_leaf_space) {
1538                 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1539                 /*
1540                  * Space was reserved for a dir index item insertion when we
1541                  * started the transaction, so getting a failure here should be
1542                  * impossible.
1543                  */
1544                 if (WARN_ON(ret)) {
1545                         btrfs_release_delayed_item(delayed_item);
1546                         mutex_unlock(&delayed_node->mutex);
1547                         goto release_node;
1548                 }
1549
1550                 delayed_node->index_item_leaves++;
1551         } else {
1552                 btrfs_release_dir_index_item_space(trans);
1553         }
1554         mutex_unlock(&delayed_node->mutex);
1555
1556 release_node:
1557         btrfs_release_delayed_node(delayed_node);
1558         return ret;
1559 }
1560
1561 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1562                                                struct btrfs_delayed_node *node,
1563                                                u64 index)
1564 {
1565         struct btrfs_delayed_item *item;
1566
1567         mutex_lock(&node->mutex);
1568         item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1569         if (!item) {
1570                 mutex_unlock(&node->mutex);
1571                 return 1;
1572         }
1573
1574         /*
1575          * For delayed items to insert, we track reserved metadata bytes based
1576          * on the number of leaves that we will use.
1577          * See btrfs_insert_delayed_dir_index() and
1578          * btrfs_delayed_item_reserve_metadata()).
1579          */
1580         ASSERT(item->bytes_reserved == 0);
1581         ASSERT(node->index_item_leaves > 0);
1582
1583         /*
1584          * If there's only one leaf reserved, we can decrement this item from the
1585          * current batch, otherwise we can not because we don't know which leaf
1586          * it belongs to. With the current limit on delayed items, we rarely
1587          * accumulate enough dir index items to fill more than one leaf (even
1588          * when using a leaf size of 4K).
1589          */
1590         if (node->index_item_leaves == 1) {
1591                 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1592
1593                 ASSERT(node->curr_index_batch_size >= data_len);
1594                 node->curr_index_batch_size -= data_len;
1595         }
1596
1597         btrfs_release_delayed_item(item);
1598
1599         /* If we now have no more dir index items, we can release all leaves. */
1600         if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1601                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1602                 node->index_item_leaves = 0;
1603         }
1604
1605         mutex_unlock(&node->mutex);
1606         return 0;
1607 }
1608
1609 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1610                                    struct btrfs_inode *dir, u64 index)
1611 {
1612         struct btrfs_delayed_node *node;
1613         struct btrfs_delayed_item *item;
1614         int ret;
1615
1616         node = btrfs_get_or_create_delayed_node(dir);
1617         if (IS_ERR(node))
1618                 return PTR_ERR(node);
1619
1620         ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1621         if (!ret)
1622                 goto end;
1623
1624         item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1625         if (!item) {
1626                 ret = -ENOMEM;
1627                 goto end;
1628         }
1629
1630         item->index = index;
1631
1632         ret = btrfs_delayed_item_reserve_metadata(trans, item);
1633         /*
1634          * we have reserved enough space when we start a new transaction,
1635          * so reserving metadata failure is impossible.
1636          */
1637         if (ret < 0) {
1638                 btrfs_err(trans->fs_info,
1639 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1640                 btrfs_release_delayed_item(item);
1641                 goto end;
1642         }
1643
1644         mutex_lock(&node->mutex);
1645         ret = __btrfs_add_delayed_item(node, item);
1646         if (unlikely(ret)) {
1647                 btrfs_err(trans->fs_info,
1648                           "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1649                           index, node->root->root_key.objectid,
1650                           node->inode_id, ret);
1651                 btrfs_delayed_item_release_metadata(dir->root, item);
1652                 btrfs_release_delayed_item(item);
1653         }
1654         mutex_unlock(&node->mutex);
1655 end:
1656         btrfs_release_delayed_node(node);
1657         return ret;
1658 }
1659
1660 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1661 {
1662         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1663
1664         if (!delayed_node)
1665                 return -ENOENT;
1666
1667         /*
1668          * Since we have held i_mutex of this directory, it is impossible that
1669          * a new directory index is added into the delayed node and index_cnt
1670          * is updated now. So we needn't lock the delayed node.
1671          */
1672         if (!delayed_node->index_cnt) {
1673                 btrfs_release_delayed_node(delayed_node);
1674                 return -EINVAL;
1675         }
1676
1677         inode->index_cnt = delayed_node->index_cnt;
1678         btrfs_release_delayed_node(delayed_node);
1679         return 0;
1680 }
1681
1682 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1683                                      u64 last_index,
1684                                      struct list_head *ins_list,
1685                                      struct list_head *del_list)
1686 {
1687         struct btrfs_delayed_node *delayed_node;
1688         struct btrfs_delayed_item *item;
1689
1690         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1691         if (!delayed_node)
1692                 return false;
1693
1694         /*
1695          * We can only do one readdir with delayed items at a time because of
1696          * item->readdir_list.
1697          */
1698         btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1699         btrfs_inode_lock(BTRFS_I(inode), 0);
1700
1701         mutex_lock(&delayed_node->mutex);
1702         item = __btrfs_first_delayed_insertion_item(delayed_node);
1703         while (item && item->index <= last_index) {
1704                 refcount_inc(&item->refs);
1705                 list_add_tail(&item->readdir_list, ins_list);
1706                 item = __btrfs_next_delayed_item(item);
1707         }
1708
1709         item = __btrfs_first_delayed_deletion_item(delayed_node);
1710         while (item && item->index <= last_index) {
1711                 refcount_inc(&item->refs);
1712                 list_add_tail(&item->readdir_list, del_list);
1713                 item = __btrfs_next_delayed_item(item);
1714         }
1715         mutex_unlock(&delayed_node->mutex);
1716         /*
1717          * This delayed node is still cached in the btrfs inode, so refs
1718          * must be > 1 now, and we needn't check it is going to be freed
1719          * or not.
1720          *
1721          * Besides that, this function is used to read dir, we do not
1722          * insert/delete delayed items in this period. So we also needn't
1723          * requeue or dequeue this delayed node.
1724          */
1725         refcount_dec(&delayed_node->refs);
1726
1727         return true;
1728 }
1729
1730 void btrfs_readdir_put_delayed_items(struct inode *inode,
1731                                      struct list_head *ins_list,
1732                                      struct list_head *del_list)
1733 {
1734         struct btrfs_delayed_item *curr, *next;
1735
1736         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1737                 list_del(&curr->readdir_list);
1738                 if (refcount_dec_and_test(&curr->refs))
1739                         kfree(curr);
1740         }
1741
1742         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1743                 list_del(&curr->readdir_list);
1744                 if (refcount_dec_and_test(&curr->refs))
1745                         kfree(curr);
1746         }
1747
1748         /*
1749          * The VFS is going to do up_read(), so we need to downgrade back to a
1750          * read lock.
1751          */
1752         downgrade_write(&inode->i_rwsem);
1753 }
1754
1755 int btrfs_should_delete_dir_index(struct list_head *del_list,
1756                                   u64 index)
1757 {
1758         struct btrfs_delayed_item *curr;
1759         int ret = 0;
1760
1761         list_for_each_entry(curr, del_list, readdir_list) {
1762                 if (curr->index > index)
1763                         break;
1764                 if (curr->index == index) {
1765                         ret = 1;
1766                         break;
1767                 }
1768         }
1769         return ret;
1770 }
1771
1772 /*
1773  * Read dir info stored in the delayed tree.
1774  */
1775 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1776                                     struct list_head *ins_list)
1777 {
1778         struct btrfs_dir_item *di;
1779         struct btrfs_delayed_item *curr, *next;
1780         struct btrfs_key location;
1781         char *name;
1782         int name_len;
1783         int over = 0;
1784         unsigned char d_type;
1785
1786         /*
1787          * Changing the data of the delayed item is impossible. So
1788          * we needn't lock them. And we have held i_mutex of the
1789          * directory, nobody can delete any directory indexes now.
1790          */
1791         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1792                 list_del(&curr->readdir_list);
1793
1794                 if (curr->index < ctx->pos) {
1795                         if (refcount_dec_and_test(&curr->refs))
1796                                 kfree(curr);
1797                         continue;
1798                 }
1799
1800                 ctx->pos = curr->index;
1801
1802                 di = (struct btrfs_dir_item *)curr->data;
1803                 name = (char *)(di + 1);
1804                 name_len = btrfs_stack_dir_name_len(di);
1805
1806                 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1807                 btrfs_disk_key_to_cpu(&location, &di->location);
1808
1809                 over = !dir_emit(ctx, name, name_len,
1810                                location.objectid, d_type);
1811
1812                 if (refcount_dec_and_test(&curr->refs))
1813                         kfree(curr);
1814
1815                 if (over)
1816                         return 1;
1817                 ctx->pos++;
1818         }
1819         return 0;
1820 }
1821
1822 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1823                                   struct btrfs_inode_item *inode_item,
1824                                   struct inode *inode)
1825 {
1826         u64 flags;
1827
1828         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1829         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1830         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1831         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1832         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1833         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1834         btrfs_set_stack_inode_generation(inode_item,
1835                                          BTRFS_I(inode)->generation);
1836         btrfs_set_stack_inode_sequence(inode_item,
1837                                        inode_peek_iversion(inode));
1838         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1839         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1840         flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1841                                           BTRFS_I(inode)->ro_flags);
1842         btrfs_set_stack_inode_flags(inode_item, flags);
1843         btrfs_set_stack_inode_block_group(inode_item, 0);
1844
1845         btrfs_set_stack_timespec_sec(&inode_item->atime,
1846                                      inode_get_atime_sec(inode));
1847         btrfs_set_stack_timespec_nsec(&inode_item->atime,
1848                                       inode_get_atime_nsec(inode));
1849
1850         btrfs_set_stack_timespec_sec(&inode_item->mtime,
1851                                      inode_get_mtime_sec(inode));
1852         btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1853                                       inode_get_mtime_nsec(inode));
1854
1855         btrfs_set_stack_timespec_sec(&inode_item->ctime,
1856                                      inode_get_ctime_sec(inode));
1857         btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1858                                       inode_get_ctime_nsec(inode));
1859
1860         btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1861         btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1862 }
1863
1864 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1865 {
1866         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1867         struct btrfs_delayed_node *delayed_node;
1868         struct btrfs_inode_item *inode_item;
1869
1870         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1871         if (!delayed_node)
1872                 return -ENOENT;
1873
1874         mutex_lock(&delayed_node->mutex);
1875         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1876                 mutex_unlock(&delayed_node->mutex);
1877                 btrfs_release_delayed_node(delayed_node);
1878                 return -ENOENT;
1879         }
1880
1881         inode_item = &delayed_node->inode_item;
1882
1883         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1884         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1885         btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1886         btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1887                         round_up(i_size_read(inode), fs_info->sectorsize));
1888         inode->i_mode = btrfs_stack_inode_mode(inode_item);
1889         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1890         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1891         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1892         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1893
1894         inode_set_iversion_queried(inode,
1895                                    btrfs_stack_inode_sequence(inode_item));
1896         inode->i_rdev = 0;
1897         *rdev = btrfs_stack_inode_rdev(inode_item);
1898         btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1899                                 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1900
1901         inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1902                         btrfs_stack_timespec_nsec(&inode_item->atime));
1903
1904         inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1905                         btrfs_stack_timespec_nsec(&inode_item->mtime));
1906
1907         inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1908                         btrfs_stack_timespec_nsec(&inode_item->ctime));
1909
1910         BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1911         BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1912
1913         inode->i_generation = BTRFS_I(inode)->generation;
1914         BTRFS_I(inode)->index_cnt = (u64)-1;
1915
1916         mutex_unlock(&delayed_node->mutex);
1917         btrfs_release_delayed_node(delayed_node);
1918         return 0;
1919 }
1920
1921 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1922                                struct btrfs_inode *inode)
1923 {
1924         struct btrfs_root *root = inode->root;
1925         struct btrfs_delayed_node *delayed_node;
1926         int ret = 0;
1927
1928         delayed_node = btrfs_get_or_create_delayed_node(inode);
1929         if (IS_ERR(delayed_node))
1930                 return PTR_ERR(delayed_node);
1931
1932         mutex_lock(&delayed_node->mutex);
1933         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1934                 fill_stack_inode_item(trans, &delayed_node->inode_item,
1935                                       &inode->vfs_inode);
1936                 goto release_node;
1937         }
1938
1939         ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1940         if (ret)
1941                 goto release_node;
1942
1943         fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1944         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1945         delayed_node->count++;
1946         atomic_inc(&root->fs_info->delayed_root->items);
1947 release_node:
1948         mutex_unlock(&delayed_node->mutex);
1949         btrfs_release_delayed_node(delayed_node);
1950         return ret;
1951 }
1952
1953 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1954 {
1955         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1956         struct btrfs_delayed_node *delayed_node;
1957
1958         /*
1959          * we don't do delayed inode updates during log recovery because it
1960          * leads to enospc problems.  This means we also can't do
1961          * delayed inode refs
1962          */
1963         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1964                 return -EAGAIN;
1965
1966         delayed_node = btrfs_get_or_create_delayed_node(inode);
1967         if (IS_ERR(delayed_node))
1968                 return PTR_ERR(delayed_node);
1969
1970         /*
1971          * We don't reserve space for inode ref deletion is because:
1972          * - We ONLY do async inode ref deletion for the inode who has only
1973          *   one link(i_nlink == 1), it means there is only one inode ref.
1974          *   And in most case, the inode ref and the inode item are in the
1975          *   same leaf, and we will deal with them at the same time.
1976          *   Since we are sure we will reserve the space for the inode item,
1977          *   it is unnecessary to reserve space for inode ref deletion.
1978          * - If the inode ref and the inode item are not in the same leaf,
1979          *   We also needn't worry about enospc problem, because we reserve
1980          *   much more space for the inode update than it needs.
1981          * - At the worst, we can steal some space from the global reservation.
1982          *   It is very rare.
1983          */
1984         mutex_lock(&delayed_node->mutex);
1985         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1986                 goto release_node;
1987
1988         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1989         delayed_node->count++;
1990         atomic_inc(&fs_info->delayed_root->items);
1991 release_node:
1992         mutex_unlock(&delayed_node->mutex);
1993         btrfs_release_delayed_node(delayed_node);
1994         return 0;
1995 }
1996
1997 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1998 {
1999         struct btrfs_root *root = delayed_node->root;
2000         struct btrfs_fs_info *fs_info = root->fs_info;
2001         struct btrfs_delayed_item *curr_item, *prev_item;
2002
2003         mutex_lock(&delayed_node->mutex);
2004         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2005         while (curr_item) {
2006                 prev_item = curr_item;
2007                 curr_item = __btrfs_next_delayed_item(prev_item);
2008                 btrfs_release_delayed_item(prev_item);
2009         }
2010
2011         if (delayed_node->index_item_leaves > 0) {
2012                 btrfs_delayed_item_release_leaves(delayed_node,
2013                                           delayed_node->index_item_leaves);
2014                 delayed_node->index_item_leaves = 0;
2015         }
2016
2017         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2018         while (curr_item) {
2019                 btrfs_delayed_item_release_metadata(root, curr_item);
2020                 prev_item = curr_item;
2021                 curr_item = __btrfs_next_delayed_item(prev_item);
2022                 btrfs_release_delayed_item(prev_item);
2023         }
2024
2025         btrfs_release_delayed_iref(delayed_node);
2026
2027         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2028                 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2029                 btrfs_release_delayed_inode(delayed_node);
2030         }
2031         mutex_unlock(&delayed_node->mutex);
2032 }
2033
2034 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2035 {
2036         struct btrfs_delayed_node *delayed_node;
2037
2038         delayed_node = btrfs_get_delayed_node(inode);
2039         if (!delayed_node)
2040                 return;
2041
2042         __btrfs_kill_delayed_node(delayed_node);
2043         btrfs_release_delayed_node(delayed_node);
2044 }
2045
2046 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2047 {
2048         unsigned long index = 0;
2049         struct btrfs_delayed_node *delayed_nodes[8];
2050
2051         while (1) {
2052                 struct btrfs_delayed_node *node;
2053                 int count;
2054
2055                 spin_lock(&root->inode_lock);
2056                 if (xa_empty(&root->delayed_nodes)) {
2057                         spin_unlock(&root->inode_lock);
2058                         return;
2059                 }
2060
2061                 count = 0;
2062                 xa_for_each_start(&root->delayed_nodes, index, node, index) {
2063                         /*
2064                          * Don't increase refs in case the node is dead and
2065                          * about to be removed from the tree in the loop below
2066                          */
2067                         if (refcount_inc_not_zero(&node->refs)) {
2068                                 delayed_nodes[count] = node;
2069                                 count++;
2070                         }
2071                         if (count >= ARRAY_SIZE(delayed_nodes))
2072                                 break;
2073                 }
2074                 spin_unlock(&root->inode_lock);
2075                 index++;
2076
2077                 for (int i = 0; i < count; i++) {
2078                         __btrfs_kill_delayed_node(delayed_nodes[i]);
2079                         btrfs_release_delayed_node(delayed_nodes[i]);
2080                 }
2081         }
2082 }
2083
2084 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2085 {
2086         struct btrfs_delayed_node *curr_node, *prev_node;
2087
2088         curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2089         while (curr_node) {
2090                 __btrfs_kill_delayed_node(curr_node);
2091
2092                 prev_node = curr_node;
2093                 curr_node = btrfs_next_delayed_node(curr_node);
2094                 btrfs_release_delayed_node(prev_node);
2095         }
2096 }
2097
2098 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2099                                  struct list_head *ins_list,
2100                                  struct list_head *del_list)
2101 {
2102         struct btrfs_delayed_node *node;
2103         struct btrfs_delayed_item *item;
2104
2105         node = btrfs_get_delayed_node(inode);
2106         if (!node)
2107                 return;
2108
2109         mutex_lock(&node->mutex);
2110         item = __btrfs_first_delayed_insertion_item(node);
2111         while (item) {
2112                 /*
2113                  * It's possible that the item is already in a log list. This
2114                  * can happen in case two tasks are trying to log the same
2115                  * directory. For example if we have tasks A and task B:
2116                  *
2117                  * Task A collected the delayed items into a log list while
2118                  * under the inode's log_mutex (at btrfs_log_inode()), but it
2119                  * only releases the items after logging the inodes they point
2120                  * to (if they are new inodes), which happens after unlocking
2121                  * the log mutex;
2122                  *
2123                  * Task B enters btrfs_log_inode() and acquires the log_mutex
2124                  * of the same directory inode, before task B releases the
2125                  * delayed items. This can happen for example when logging some
2126                  * inode we need to trigger logging of its parent directory, so
2127                  * logging two files that have the same parent directory can
2128                  * lead to this.
2129                  *
2130                  * If this happens, just ignore delayed items already in a log
2131                  * list. All the tasks logging the directory are under a log
2132                  * transaction and whichever finishes first can not sync the log
2133                  * before the other completes and leaves the log transaction.
2134                  */
2135                 if (!item->logged && list_empty(&item->log_list)) {
2136                         refcount_inc(&item->refs);
2137                         list_add_tail(&item->log_list, ins_list);
2138                 }
2139                 item = __btrfs_next_delayed_item(item);
2140         }
2141
2142         item = __btrfs_first_delayed_deletion_item(node);
2143         while (item) {
2144                 /* It may be non-empty, for the same reason mentioned above. */
2145                 if (!item->logged && list_empty(&item->log_list)) {
2146                         refcount_inc(&item->refs);
2147                         list_add_tail(&item->log_list, del_list);
2148                 }
2149                 item = __btrfs_next_delayed_item(item);
2150         }
2151         mutex_unlock(&node->mutex);
2152
2153         /*
2154          * We are called during inode logging, which means the inode is in use
2155          * and can not be evicted before we finish logging the inode. So we never
2156          * have the last reference on the delayed inode.
2157          * Also, we don't use btrfs_release_delayed_node() because that would
2158          * requeue the delayed inode (change its order in the list of prepared
2159          * nodes) and we don't want to do such change because we don't create or
2160          * delete delayed items.
2161          */
2162         ASSERT(refcount_read(&node->refs) > 1);
2163         refcount_dec(&node->refs);
2164 }
2165
2166 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2167                                  struct list_head *ins_list,
2168                                  struct list_head *del_list)
2169 {
2170         struct btrfs_delayed_node *node;
2171         struct btrfs_delayed_item *item;
2172         struct btrfs_delayed_item *next;
2173
2174         node = btrfs_get_delayed_node(inode);
2175         if (!node)
2176                 return;
2177
2178         mutex_lock(&node->mutex);
2179
2180         list_for_each_entry_safe(item, next, ins_list, log_list) {
2181                 item->logged = true;
2182                 list_del_init(&item->log_list);
2183                 if (refcount_dec_and_test(&item->refs))
2184                         kfree(item);
2185         }
2186
2187         list_for_each_entry_safe(item, next, del_list, log_list) {
2188                 item->logged = true;
2189                 list_del_init(&item->log_list);
2190                 if (refcount_dec_and_test(&item->refs))
2191                         kfree(item);
2192         }
2193
2194         mutex_unlock(&node->mutex);
2195
2196         /*
2197          * We are called during inode logging, which means the inode is in use
2198          * and can not be evicted before we finish logging the inode. So we never
2199          * have the last reference on the delayed inode.
2200          * Also, we don't use btrfs_release_delayed_node() because that would
2201          * requeue the delayed inode (change its order in the list of prepared
2202          * nodes) and we don't want to do such change because we don't create or
2203          * delete delayed items.
2204          */
2205         ASSERT(refcount_read(&node->refs) > 1);
2206         refcount_dec(&node->refs);
2207 }