GNU Linux-libre 6.0.2-gnu
[releases.git] / fs / btrfs / delayed-inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "misc.h"
10 #include "delayed-inode.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "ctree.h"
14 #include "qgroup.h"
15 #include "locking.h"
16 #include "inode-item.h"
17
18 #define BTRFS_DELAYED_WRITEBACK         512
19 #define BTRFS_DELAYED_BACKGROUND        128
20 #define BTRFS_DELAYED_BATCH             16
21
22 static struct kmem_cache *delayed_node_cache;
23
24 int __init btrfs_delayed_inode_init(void)
25 {
26         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27                                         sizeof(struct btrfs_delayed_node),
28                                         0,
29                                         SLAB_MEM_SPREAD,
30                                         NULL);
31         if (!delayed_node_cache)
32                 return -ENOMEM;
33         return 0;
34 }
35
36 void __cold btrfs_delayed_inode_exit(void)
37 {
38         kmem_cache_destroy(delayed_node_cache);
39 }
40
41 static inline void btrfs_init_delayed_node(
42                                 struct btrfs_delayed_node *delayed_node,
43                                 struct btrfs_root *root, u64 inode_id)
44 {
45         delayed_node->root = root;
46         delayed_node->inode_id = inode_id;
47         refcount_set(&delayed_node->refs, 0);
48         delayed_node->ins_root = RB_ROOT_CACHED;
49         delayed_node->del_root = RB_ROOT_CACHED;
50         mutex_init(&delayed_node->mutex);
51         INIT_LIST_HEAD(&delayed_node->n_list);
52         INIT_LIST_HEAD(&delayed_node->p_list);
53 }
54
55 static struct btrfs_delayed_node *btrfs_get_delayed_node(
56                 struct btrfs_inode *btrfs_inode)
57 {
58         struct btrfs_root *root = btrfs_inode->root;
59         u64 ino = btrfs_ino(btrfs_inode);
60         struct btrfs_delayed_node *node;
61
62         node = READ_ONCE(btrfs_inode->delayed_node);
63         if (node) {
64                 refcount_inc(&node->refs);
65                 return node;
66         }
67
68         spin_lock(&root->inode_lock);
69         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
70
71         if (node) {
72                 if (btrfs_inode->delayed_node) {
73                         refcount_inc(&node->refs);      /* can be accessed */
74                         BUG_ON(btrfs_inode->delayed_node != node);
75                         spin_unlock(&root->inode_lock);
76                         return node;
77                 }
78
79                 /*
80                  * It's possible that we're racing into the middle of removing
81                  * this node from the radix tree.  In this case, the refcount
82                  * was zero and it should never go back to one.  Just return
83                  * NULL like it was never in the radix at all; our release
84                  * function is in the process of removing it.
85                  *
86                  * Some implementations of refcount_inc refuse to bump the
87                  * refcount once it has hit zero.  If we don't do this dance
88                  * here, refcount_inc() may decide to just WARN_ONCE() instead
89                  * of actually bumping the refcount.
90                  *
91                  * If this node is properly in the radix, we want to bump the
92                  * refcount twice, once for the inode and once for this get
93                  * operation.
94                  */
95                 if (refcount_inc_not_zero(&node->refs)) {
96                         refcount_inc(&node->refs);
97                         btrfs_inode->delayed_node = node;
98                 } else {
99                         node = NULL;
100                 }
101
102                 spin_unlock(&root->inode_lock);
103                 return node;
104         }
105         spin_unlock(&root->inode_lock);
106
107         return NULL;
108 }
109
110 /* Will return either the node or PTR_ERR(-ENOMEM) */
111 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
112                 struct btrfs_inode *btrfs_inode)
113 {
114         struct btrfs_delayed_node *node;
115         struct btrfs_root *root = btrfs_inode->root;
116         u64 ino = btrfs_ino(btrfs_inode);
117         int ret;
118
119 again:
120         node = btrfs_get_delayed_node(btrfs_inode);
121         if (node)
122                 return node;
123
124         node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
125         if (!node)
126                 return ERR_PTR(-ENOMEM);
127         btrfs_init_delayed_node(node, root, ino);
128
129         /* cached in the btrfs inode and can be accessed */
130         refcount_set(&node->refs, 2);
131
132         ret = radix_tree_preload(GFP_NOFS);
133         if (ret) {
134                 kmem_cache_free(delayed_node_cache, node);
135                 return ERR_PTR(ret);
136         }
137
138         spin_lock(&root->inode_lock);
139         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
140         if (ret == -EEXIST) {
141                 spin_unlock(&root->inode_lock);
142                 kmem_cache_free(delayed_node_cache, node);
143                 radix_tree_preload_end();
144                 goto again;
145         }
146         btrfs_inode->delayed_node = node;
147         spin_unlock(&root->inode_lock);
148         radix_tree_preload_end();
149
150         return node;
151 }
152
153 /*
154  * Call it when holding delayed_node->mutex
155  *
156  * If mod = 1, add this node into the prepared list.
157  */
158 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
159                                      struct btrfs_delayed_node *node,
160                                      int mod)
161 {
162         spin_lock(&root->lock);
163         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
164                 if (!list_empty(&node->p_list))
165                         list_move_tail(&node->p_list, &root->prepare_list);
166                 else if (mod)
167                         list_add_tail(&node->p_list, &root->prepare_list);
168         } else {
169                 list_add_tail(&node->n_list, &root->node_list);
170                 list_add_tail(&node->p_list, &root->prepare_list);
171                 refcount_inc(&node->refs);      /* inserted into list */
172                 root->nodes++;
173                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
174         }
175         spin_unlock(&root->lock);
176 }
177
178 /* Call it when holding delayed_node->mutex */
179 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
180                                        struct btrfs_delayed_node *node)
181 {
182         spin_lock(&root->lock);
183         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
184                 root->nodes--;
185                 refcount_dec(&node->refs);      /* not in the list */
186                 list_del_init(&node->n_list);
187                 if (!list_empty(&node->p_list))
188                         list_del_init(&node->p_list);
189                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
190         }
191         spin_unlock(&root->lock);
192 }
193
194 static struct btrfs_delayed_node *btrfs_first_delayed_node(
195                         struct btrfs_delayed_root *delayed_root)
196 {
197         struct list_head *p;
198         struct btrfs_delayed_node *node = NULL;
199
200         spin_lock(&delayed_root->lock);
201         if (list_empty(&delayed_root->node_list))
202                 goto out;
203
204         p = delayed_root->node_list.next;
205         node = list_entry(p, struct btrfs_delayed_node, n_list);
206         refcount_inc(&node->refs);
207 out:
208         spin_unlock(&delayed_root->lock);
209
210         return node;
211 }
212
213 static struct btrfs_delayed_node *btrfs_next_delayed_node(
214                                                 struct btrfs_delayed_node *node)
215 {
216         struct btrfs_delayed_root *delayed_root;
217         struct list_head *p;
218         struct btrfs_delayed_node *next = NULL;
219
220         delayed_root = node->root->fs_info->delayed_root;
221         spin_lock(&delayed_root->lock);
222         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
223                 /* not in the list */
224                 if (list_empty(&delayed_root->node_list))
225                         goto out;
226                 p = delayed_root->node_list.next;
227         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
228                 goto out;
229         else
230                 p = node->n_list.next;
231
232         next = list_entry(p, struct btrfs_delayed_node, n_list);
233         refcount_inc(&next->refs);
234 out:
235         spin_unlock(&delayed_root->lock);
236
237         return next;
238 }
239
240 static void __btrfs_release_delayed_node(
241                                 struct btrfs_delayed_node *delayed_node,
242                                 int mod)
243 {
244         struct btrfs_delayed_root *delayed_root;
245
246         if (!delayed_node)
247                 return;
248
249         delayed_root = delayed_node->root->fs_info->delayed_root;
250
251         mutex_lock(&delayed_node->mutex);
252         if (delayed_node->count)
253                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
254         else
255                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
256         mutex_unlock(&delayed_node->mutex);
257
258         if (refcount_dec_and_test(&delayed_node->refs)) {
259                 struct btrfs_root *root = delayed_node->root;
260
261                 spin_lock(&root->inode_lock);
262                 /*
263                  * Once our refcount goes to zero, nobody is allowed to bump it
264                  * back up.  We can delete it now.
265                  */
266                 ASSERT(refcount_read(&delayed_node->refs) == 0);
267                 radix_tree_delete(&root->delayed_nodes_tree,
268                                   delayed_node->inode_id);
269                 spin_unlock(&root->inode_lock);
270                 kmem_cache_free(delayed_node_cache, delayed_node);
271         }
272 }
273
274 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
275 {
276         __btrfs_release_delayed_node(node, 0);
277 }
278
279 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
280                                         struct btrfs_delayed_root *delayed_root)
281 {
282         struct list_head *p;
283         struct btrfs_delayed_node *node = NULL;
284
285         spin_lock(&delayed_root->lock);
286         if (list_empty(&delayed_root->prepare_list))
287                 goto out;
288
289         p = delayed_root->prepare_list.next;
290         list_del_init(p);
291         node = list_entry(p, struct btrfs_delayed_node, p_list);
292         refcount_inc(&node->refs);
293 out:
294         spin_unlock(&delayed_root->lock);
295
296         return node;
297 }
298
299 static inline void btrfs_release_prepared_delayed_node(
300                                         struct btrfs_delayed_node *node)
301 {
302         __btrfs_release_delayed_node(node, 1);
303 }
304
305 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
306 {
307         struct btrfs_delayed_item *item;
308         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
309         if (item) {
310                 item->data_len = data_len;
311                 item->ins_or_del = 0;
312                 item->bytes_reserved = 0;
313                 item->delayed_node = NULL;
314                 refcount_set(&item->refs, 1);
315         }
316         return item;
317 }
318
319 /*
320  * __btrfs_lookup_delayed_item - look up the delayed item by key
321  * @delayed_node: pointer to the delayed node
322  * @key:          the key to look up
323  * @prev:         used to store the prev item if the right item isn't found
324  * @next:         used to store the next item if the right item isn't found
325  *
326  * Note: if we don't find the right item, we will return the prev item and
327  * the next item.
328  */
329 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
330                                 struct rb_root *root,
331                                 struct btrfs_key *key,
332                                 struct btrfs_delayed_item **prev,
333                                 struct btrfs_delayed_item **next)
334 {
335         struct rb_node *node, *prev_node = NULL;
336         struct btrfs_delayed_item *delayed_item = NULL;
337         int ret = 0;
338
339         node = root->rb_node;
340
341         while (node) {
342                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
343                                         rb_node);
344                 prev_node = node;
345                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
346                 if (ret < 0)
347                         node = node->rb_right;
348                 else if (ret > 0)
349                         node = node->rb_left;
350                 else
351                         return delayed_item;
352         }
353
354         if (prev) {
355                 if (!prev_node)
356                         *prev = NULL;
357                 else if (ret < 0)
358                         *prev = delayed_item;
359                 else if ((node = rb_prev(prev_node)) != NULL) {
360                         *prev = rb_entry(node, struct btrfs_delayed_item,
361                                          rb_node);
362                 } else
363                         *prev = NULL;
364         }
365
366         if (next) {
367                 if (!prev_node)
368                         *next = NULL;
369                 else if (ret > 0)
370                         *next = delayed_item;
371                 else if ((node = rb_next(prev_node)) != NULL) {
372                         *next = rb_entry(node, struct btrfs_delayed_item,
373                                          rb_node);
374                 } else
375                         *next = NULL;
376         }
377         return NULL;
378 }
379
380 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
381                                         struct btrfs_delayed_node *delayed_node,
382                                         struct btrfs_key *key)
383 {
384         return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
385                                            NULL, NULL);
386 }
387
388 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
389                                     struct btrfs_delayed_item *ins)
390 {
391         struct rb_node **p, *node;
392         struct rb_node *parent_node = NULL;
393         struct rb_root_cached *root;
394         struct btrfs_delayed_item *item;
395         int cmp;
396         bool leftmost = true;
397
398         if (ins->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
399                 root = &delayed_node->ins_root;
400         else if (ins->ins_or_del == BTRFS_DELAYED_DELETION_ITEM)
401                 root = &delayed_node->del_root;
402         else
403                 BUG();
404         p = &root->rb_root.rb_node;
405         node = &ins->rb_node;
406
407         while (*p) {
408                 parent_node = *p;
409                 item = rb_entry(parent_node, struct btrfs_delayed_item,
410                                  rb_node);
411
412                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
413                 if (cmp < 0) {
414                         p = &(*p)->rb_right;
415                         leftmost = false;
416                 } else if (cmp > 0) {
417                         p = &(*p)->rb_left;
418                 } else {
419                         return -EEXIST;
420                 }
421         }
422
423         rb_link_node(node, parent_node, p);
424         rb_insert_color_cached(node, root, leftmost);
425         ins->delayed_node = delayed_node;
426
427         /* Delayed items are always for dir index items. */
428         ASSERT(ins->key.type == BTRFS_DIR_INDEX_KEY);
429
430         if (ins->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM &&
431             ins->key.offset >= delayed_node->index_cnt)
432                 delayed_node->index_cnt = ins->key.offset + 1;
433
434         delayed_node->count++;
435         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
436         return 0;
437 }
438
439 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
440 {
441         int seq = atomic_inc_return(&delayed_root->items_seq);
442
443         /* atomic_dec_return implies a barrier */
444         if ((atomic_dec_return(&delayed_root->items) <
445             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
446                 cond_wake_up_nomb(&delayed_root->wait);
447 }
448
449 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
450 {
451         struct rb_root_cached *root;
452         struct btrfs_delayed_root *delayed_root;
453
454         /* Not associated with any delayed_node */
455         if (!delayed_item->delayed_node)
456                 return;
457         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
458
459         BUG_ON(!delayed_root);
460         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
461                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
462
463         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
464                 root = &delayed_item->delayed_node->ins_root;
465         else
466                 root = &delayed_item->delayed_node->del_root;
467
468         rb_erase_cached(&delayed_item->rb_node, root);
469         delayed_item->delayed_node->count--;
470
471         finish_one_item(delayed_root);
472 }
473
474 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
475 {
476         if (item) {
477                 __btrfs_remove_delayed_item(item);
478                 if (refcount_dec_and_test(&item->refs))
479                         kfree(item);
480         }
481 }
482
483 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
484                                         struct btrfs_delayed_node *delayed_node)
485 {
486         struct rb_node *p;
487         struct btrfs_delayed_item *item = NULL;
488
489         p = rb_first_cached(&delayed_node->ins_root);
490         if (p)
491                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
492
493         return item;
494 }
495
496 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
497                                         struct btrfs_delayed_node *delayed_node)
498 {
499         struct rb_node *p;
500         struct btrfs_delayed_item *item = NULL;
501
502         p = rb_first_cached(&delayed_node->del_root);
503         if (p)
504                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
505
506         return item;
507 }
508
509 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
510                                                 struct btrfs_delayed_item *item)
511 {
512         struct rb_node *p;
513         struct btrfs_delayed_item *next = NULL;
514
515         p = rb_next(&item->rb_node);
516         if (p)
517                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519         return next;
520 }
521
522 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
523                                                struct btrfs_root *root,
524                                                struct btrfs_delayed_item *item)
525 {
526         struct btrfs_block_rsv *src_rsv;
527         struct btrfs_block_rsv *dst_rsv;
528         struct btrfs_fs_info *fs_info = root->fs_info;
529         u64 num_bytes;
530         int ret;
531
532         if (!trans->bytes_reserved)
533                 return 0;
534
535         src_rsv = trans->block_rsv;
536         dst_rsv = &fs_info->delayed_block_rsv;
537
538         num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
539
540         /*
541          * Here we migrate space rsv from transaction rsv, since have already
542          * reserved space when starting a transaction.  So no need to reserve
543          * qgroup space here.
544          */
545         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
546         if (!ret) {
547                 trace_btrfs_space_reservation(fs_info, "delayed_item",
548                                               item->key.objectid,
549                                               num_bytes, 1);
550                 /*
551                  * For insertions we track reserved metadata space by accounting
552                  * for the number of leaves that will be used, based on the delayed
553                  * node's index_items_size field.
554                  */
555                 if (item->ins_or_del == BTRFS_DELAYED_DELETION_ITEM)
556                         item->bytes_reserved = num_bytes;
557         }
558
559         return ret;
560 }
561
562 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
563                                                 struct btrfs_delayed_item *item)
564 {
565         struct btrfs_block_rsv *rsv;
566         struct btrfs_fs_info *fs_info = root->fs_info;
567
568         if (!item->bytes_reserved)
569                 return;
570
571         rsv = &fs_info->delayed_block_rsv;
572         /*
573          * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
574          * to release/reserve qgroup space.
575          */
576         trace_btrfs_space_reservation(fs_info, "delayed_item",
577                                       item->key.objectid, item->bytes_reserved,
578                                       0);
579         btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
580 }
581
582 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
583                                               unsigned int num_leaves)
584 {
585         struct btrfs_fs_info *fs_info = node->root->fs_info;
586         const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
587
588         /* There are no space reservations during log replay, bail out. */
589         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
590                 return;
591
592         trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
593                                       bytes, 0);
594         btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
595 }
596
597 static int btrfs_delayed_inode_reserve_metadata(
598                                         struct btrfs_trans_handle *trans,
599                                         struct btrfs_root *root,
600                                         struct btrfs_delayed_node *node)
601 {
602         struct btrfs_fs_info *fs_info = root->fs_info;
603         struct btrfs_block_rsv *src_rsv;
604         struct btrfs_block_rsv *dst_rsv;
605         u64 num_bytes;
606         int ret;
607
608         src_rsv = trans->block_rsv;
609         dst_rsv = &fs_info->delayed_block_rsv;
610
611         num_bytes = btrfs_calc_metadata_size(fs_info, 1);
612
613         /*
614          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
615          * which doesn't reserve space for speed.  This is a problem since we
616          * still need to reserve space for this update, so try to reserve the
617          * space.
618          *
619          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
620          * we always reserve enough to update the inode item.
621          */
622         if (!src_rsv || (!trans->bytes_reserved &&
623                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
624                 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
625                                           BTRFS_QGROUP_RSV_META_PREALLOC, true);
626                 if (ret < 0)
627                         return ret;
628                 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
629                                           BTRFS_RESERVE_NO_FLUSH);
630                 /* NO_FLUSH could only fail with -ENOSPC */
631                 ASSERT(ret == 0 || ret == -ENOSPC);
632                 if (ret)
633                         btrfs_qgroup_free_meta_prealloc(root, num_bytes);
634         } else {
635                 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
636         }
637
638         if (!ret) {
639                 trace_btrfs_space_reservation(fs_info, "delayed_inode",
640                                               node->inode_id, num_bytes, 1);
641                 node->bytes_reserved = num_bytes;
642         }
643
644         return ret;
645 }
646
647 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
648                                                 struct btrfs_delayed_node *node,
649                                                 bool qgroup_free)
650 {
651         struct btrfs_block_rsv *rsv;
652
653         if (!node->bytes_reserved)
654                 return;
655
656         rsv = &fs_info->delayed_block_rsv;
657         trace_btrfs_space_reservation(fs_info, "delayed_inode",
658                                       node->inode_id, node->bytes_reserved, 0);
659         btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
660         if (qgroup_free)
661                 btrfs_qgroup_free_meta_prealloc(node->root,
662                                 node->bytes_reserved);
663         else
664                 btrfs_qgroup_convert_reserved_meta(node->root,
665                                 node->bytes_reserved);
666         node->bytes_reserved = 0;
667 }
668
669 /*
670  * Insert a single delayed item or a batch of delayed items, as many as possible
671  * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
672  * in the rbtree, and if there's a gap between two consecutive dir index items,
673  * then it means at some point we had delayed dir indexes to add but they got
674  * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
675  * into the subvolume tree. Dir index keys also have their offsets coming from a
676  * monotonically increasing counter, so we can't get new keys with an offset that
677  * fits within a gap between delayed dir index items.
678  */
679 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
680                                      struct btrfs_root *root,
681                                      struct btrfs_path *path,
682                                      struct btrfs_delayed_item *first_item)
683 {
684         struct btrfs_fs_info *fs_info = root->fs_info;
685         struct btrfs_delayed_node *node = first_item->delayed_node;
686         LIST_HEAD(item_list);
687         struct btrfs_delayed_item *curr;
688         struct btrfs_delayed_item *next;
689         const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
690         struct btrfs_item_batch batch;
691         int total_size;
692         char *ins_data = NULL;
693         int ret;
694         bool continuous_keys_only = false;
695
696         lockdep_assert_held(&node->mutex);
697
698         /*
699          * During normal operation the delayed index offset is continuously
700          * increasing, so we can batch insert all items as there will not be any
701          * overlapping keys in the tree.
702          *
703          * The exception to this is log replay, where we may have interleaved
704          * offsets in the tree, so our batch needs to be continuous keys only in
705          * order to ensure we do not end up with out of order items in our leaf.
706          */
707         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
708                 continuous_keys_only = true;
709
710         /*
711          * For delayed items to insert, we track reserved metadata bytes based
712          * on the number of leaves that we will use.
713          * See btrfs_insert_delayed_dir_index() and
714          * btrfs_delayed_item_reserve_metadata()).
715          */
716         ASSERT(first_item->bytes_reserved == 0);
717
718         list_add_tail(&first_item->tree_list, &item_list);
719         batch.total_data_size = first_item->data_len;
720         batch.nr = 1;
721         total_size = first_item->data_len + sizeof(struct btrfs_item);
722         curr = first_item;
723
724         while (true) {
725                 int next_size;
726
727                 next = __btrfs_next_delayed_item(curr);
728                 if (!next)
729                         break;
730
731                 /*
732                  * We cannot allow gaps in the key space if we're doing log
733                  * replay.
734                  */
735                 if (continuous_keys_only &&
736                     (next->key.offset != curr->key.offset + 1))
737                         break;
738
739                 ASSERT(next->bytes_reserved == 0);
740
741                 next_size = next->data_len + sizeof(struct btrfs_item);
742                 if (total_size + next_size > max_size)
743                         break;
744
745                 list_add_tail(&next->tree_list, &item_list);
746                 batch.nr++;
747                 total_size += next_size;
748                 batch.total_data_size += next->data_len;
749                 curr = next;
750         }
751
752         if (batch.nr == 1) {
753                 batch.keys = &first_item->key;
754                 batch.data_sizes = &first_item->data_len;
755         } else {
756                 struct btrfs_key *ins_keys;
757                 u32 *ins_sizes;
758                 int i = 0;
759
760                 ins_data = kmalloc(batch.nr * sizeof(u32) +
761                                    batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
762                 if (!ins_data) {
763                         ret = -ENOMEM;
764                         goto out;
765                 }
766                 ins_sizes = (u32 *)ins_data;
767                 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
768                 batch.keys = ins_keys;
769                 batch.data_sizes = ins_sizes;
770                 list_for_each_entry(curr, &item_list, tree_list) {
771                         ins_keys[i] = curr->key;
772                         ins_sizes[i] = curr->data_len;
773                         i++;
774                 }
775         }
776
777         ret = btrfs_insert_empty_items(trans, root, path, &batch);
778         if (ret)
779                 goto out;
780
781         list_for_each_entry(curr, &item_list, tree_list) {
782                 char *data_ptr;
783
784                 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
785                 write_extent_buffer(path->nodes[0], &curr->data,
786                                     (unsigned long)data_ptr, curr->data_len);
787                 path->slots[0]++;
788         }
789
790         /*
791          * Now release our path before releasing the delayed items and their
792          * metadata reservations, so that we don't block other tasks for more
793          * time than needed.
794          */
795         btrfs_release_path(path);
796
797         ASSERT(node->index_item_leaves > 0);
798
799         /*
800          * For normal operations we will batch an entire leaf's worth of delayed
801          * items, so if there are more items to process we can decrement
802          * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
803          *
804          * However for log replay we may not have inserted an entire leaf's
805          * worth of items, we may have not had continuous items, so decrementing
806          * here would mess up the index_item_leaves accounting.  For this case
807          * only clean up the accounting when there are no items left.
808          */
809         if (next && !continuous_keys_only) {
810                 /*
811                  * We inserted one batch of items into a leaf a there are more
812                  * items to flush in a future batch, now release one unit of
813                  * metadata space from the delayed block reserve, corresponding
814                  * the leaf we just flushed to.
815                  */
816                 btrfs_delayed_item_release_leaves(node, 1);
817                 node->index_item_leaves--;
818         } else if (!next) {
819                 /*
820                  * There are no more items to insert. We can have a number of
821                  * reserved leaves > 1 here - this happens when many dir index
822                  * items are added and then removed before they are flushed (file
823                  * names with a very short life, never span a transaction). So
824                  * release all remaining leaves.
825                  */
826                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
827                 node->index_item_leaves = 0;
828         }
829
830         list_for_each_entry_safe(curr, next, &item_list, tree_list) {
831                 list_del(&curr->tree_list);
832                 btrfs_release_delayed_item(curr);
833         }
834 out:
835         kfree(ins_data);
836         return ret;
837 }
838
839 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
840                                       struct btrfs_path *path,
841                                       struct btrfs_root *root,
842                                       struct btrfs_delayed_node *node)
843 {
844         int ret = 0;
845
846         while (ret == 0) {
847                 struct btrfs_delayed_item *curr;
848
849                 mutex_lock(&node->mutex);
850                 curr = __btrfs_first_delayed_insertion_item(node);
851                 if (!curr) {
852                         mutex_unlock(&node->mutex);
853                         break;
854                 }
855                 ret = btrfs_insert_delayed_item(trans, root, path, curr);
856                 mutex_unlock(&node->mutex);
857         }
858
859         return ret;
860 }
861
862 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
863                                     struct btrfs_root *root,
864                                     struct btrfs_path *path,
865                                     struct btrfs_delayed_item *item)
866 {
867         struct btrfs_fs_info *fs_info = root->fs_info;
868         struct btrfs_delayed_item *curr, *next;
869         struct extent_buffer *leaf = path->nodes[0];
870         LIST_HEAD(batch_list);
871         int nitems, slot, last_slot;
872         int ret;
873         u64 total_reserved_size = item->bytes_reserved;
874
875         ASSERT(leaf != NULL);
876
877         slot = path->slots[0];
878         last_slot = btrfs_header_nritems(leaf) - 1;
879         /*
880          * Our caller always gives us a path pointing to an existing item, so
881          * this can not happen.
882          */
883         ASSERT(slot <= last_slot);
884         if (WARN_ON(slot > last_slot))
885                 return -ENOENT;
886
887         nitems = 1;
888         curr = item;
889         list_add_tail(&curr->tree_list, &batch_list);
890
891         /*
892          * Keep checking if the next delayed item matches the next item in the
893          * leaf - if so, we can add it to the batch of items to delete from the
894          * leaf.
895          */
896         while (slot < last_slot) {
897                 struct btrfs_key key;
898
899                 next = __btrfs_next_delayed_item(curr);
900                 if (!next)
901                         break;
902
903                 slot++;
904                 btrfs_item_key_to_cpu(leaf, &key, slot);
905                 if (btrfs_comp_cpu_keys(&next->key, &key) != 0)
906                         break;
907                 nitems++;
908                 curr = next;
909                 list_add_tail(&curr->tree_list, &batch_list);
910                 total_reserved_size += curr->bytes_reserved;
911         }
912
913         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
914         if (ret)
915                 return ret;
916
917         /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
918         if (total_reserved_size > 0) {
919                 /*
920                  * Check btrfs_delayed_item_reserve_metadata() to see why we
921                  * don't need to release/reserve qgroup space.
922                  */
923                 trace_btrfs_space_reservation(fs_info, "delayed_item",
924                                               item->key.objectid, total_reserved_size,
925                                               0);
926                 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
927                                         total_reserved_size, NULL);
928         }
929
930         list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
931                 list_del(&curr->tree_list);
932                 btrfs_release_delayed_item(curr);
933         }
934
935         return 0;
936 }
937
938 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
939                                       struct btrfs_path *path,
940                                       struct btrfs_root *root,
941                                       struct btrfs_delayed_node *node)
942 {
943         int ret = 0;
944
945         while (ret == 0) {
946                 struct btrfs_delayed_item *item;
947
948                 mutex_lock(&node->mutex);
949                 item = __btrfs_first_delayed_deletion_item(node);
950                 if (!item) {
951                         mutex_unlock(&node->mutex);
952                         break;
953                 }
954
955                 ret = btrfs_search_slot(trans, root, &item->key, path, -1, 1);
956                 if (ret > 0) {
957                         /*
958                          * There's no matching item in the leaf. This means we
959                          * have already deleted this item in a past run of the
960                          * delayed items. We ignore errors when running delayed
961                          * items from an async context, through a work queue job
962                          * running btrfs_async_run_delayed_root(), and don't
963                          * release delayed items that failed to complete. This
964                          * is because we will retry later, and at transaction
965                          * commit time we always run delayed items and will
966                          * then deal with errors if they fail to run again.
967                          *
968                          * So just release delayed items for which we can't find
969                          * an item in the tree, and move to the next item.
970                          */
971                         btrfs_release_path(path);
972                         btrfs_release_delayed_item(item);
973                         ret = 0;
974                 } else if (ret == 0) {
975                         ret = btrfs_batch_delete_items(trans, root, path, item);
976                         btrfs_release_path(path);
977                 }
978
979                 /*
980                  * We unlock and relock on each iteration, this is to prevent
981                  * blocking other tasks for too long while we are being run from
982                  * the async context (work queue job). Those tasks are typically
983                  * running system calls like creat/mkdir/rename/unlink/etc which
984                  * need to add delayed items to this delayed node.
985                  */
986                 mutex_unlock(&node->mutex);
987         }
988
989         return ret;
990 }
991
992 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
993 {
994         struct btrfs_delayed_root *delayed_root;
995
996         if (delayed_node &&
997             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
998                 BUG_ON(!delayed_node->root);
999                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1000                 delayed_node->count--;
1001
1002                 delayed_root = delayed_node->root->fs_info->delayed_root;
1003                 finish_one_item(delayed_root);
1004         }
1005 }
1006
1007 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1008 {
1009
1010         if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
1011                 struct btrfs_delayed_root *delayed_root;
1012
1013                 ASSERT(delayed_node->root);
1014                 delayed_node->count--;
1015
1016                 delayed_root = delayed_node->root->fs_info->delayed_root;
1017                 finish_one_item(delayed_root);
1018         }
1019 }
1020
1021 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1022                                         struct btrfs_root *root,
1023                                         struct btrfs_path *path,
1024                                         struct btrfs_delayed_node *node)
1025 {
1026         struct btrfs_fs_info *fs_info = root->fs_info;
1027         struct btrfs_key key;
1028         struct btrfs_inode_item *inode_item;
1029         struct extent_buffer *leaf;
1030         int mod;
1031         int ret;
1032
1033         key.objectid = node->inode_id;
1034         key.type = BTRFS_INODE_ITEM_KEY;
1035         key.offset = 0;
1036
1037         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1038                 mod = -1;
1039         else
1040                 mod = 1;
1041
1042         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1043         if (ret > 0)
1044                 ret = -ENOENT;
1045         if (ret < 0)
1046                 goto out;
1047
1048         leaf = path->nodes[0];
1049         inode_item = btrfs_item_ptr(leaf, path->slots[0],
1050                                     struct btrfs_inode_item);
1051         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1052                             sizeof(struct btrfs_inode_item));
1053         btrfs_mark_buffer_dirty(leaf);
1054
1055         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1056                 goto out;
1057
1058         path->slots[0]++;
1059         if (path->slots[0] >= btrfs_header_nritems(leaf))
1060                 goto search;
1061 again:
1062         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1063         if (key.objectid != node->inode_id)
1064                 goto out;
1065
1066         if (key.type != BTRFS_INODE_REF_KEY &&
1067             key.type != BTRFS_INODE_EXTREF_KEY)
1068                 goto out;
1069
1070         /*
1071          * Delayed iref deletion is for the inode who has only one link,
1072          * so there is only one iref. The case that several irefs are
1073          * in the same item doesn't exist.
1074          */
1075         btrfs_del_item(trans, root, path);
1076 out:
1077         btrfs_release_delayed_iref(node);
1078         btrfs_release_path(path);
1079 err_out:
1080         btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1081         btrfs_release_delayed_inode(node);
1082
1083         /*
1084          * If we fail to update the delayed inode we need to abort the
1085          * transaction, because we could leave the inode with the improper
1086          * counts behind.
1087          */
1088         if (ret && ret != -ENOENT)
1089                 btrfs_abort_transaction(trans, ret);
1090
1091         return ret;
1092
1093 search:
1094         btrfs_release_path(path);
1095
1096         key.type = BTRFS_INODE_EXTREF_KEY;
1097         key.offset = -1;
1098
1099         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1100         if (ret < 0)
1101                 goto err_out;
1102         ASSERT(ret);
1103
1104         ret = 0;
1105         leaf = path->nodes[0];
1106         path->slots[0]--;
1107         goto again;
1108 }
1109
1110 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1111                                              struct btrfs_root *root,
1112                                              struct btrfs_path *path,
1113                                              struct btrfs_delayed_node *node)
1114 {
1115         int ret;
1116
1117         mutex_lock(&node->mutex);
1118         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1119                 mutex_unlock(&node->mutex);
1120                 return 0;
1121         }
1122
1123         ret = __btrfs_update_delayed_inode(trans, root, path, node);
1124         mutex_unlock(&node->mutex);
1125         return ret;
1126 }
1127
1128 static inline int
1129 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1130                                    struct btrfs_path *path,
1131                                    struct btrfs_delayed_node *node)
1132 {
1133         int ret;
1134
1135         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1136         if (ret)
1137                 return ret;
1138
1139         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1140         if (ret)
1141                 return ret;
1142
1143         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1144         return ret;
1145 }
1146
1147 /*
1148  * Called when committing the transaction.
1149  * Returns 0 on success.
1150  * Returns < 0 on error and returns with an aborted transaction with any
1151  * outstanding delayed items cleaned up.
1152  */
1153 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1154 {
1155         struct btrfs_fs_info *fs_info = trans->fs_info;
1156         struct btrfs_delayed_root *delayed_root;
1157         struct btrfs_delayed_node *curr_node, *prev_node;
1158         struct btrfs_path *path;
1159         struct btrfs_block_rsv *block_rsv;
1160         int ret = 0;
1161         bool count = (nr > 0);
1162
1163         if (TRANS_ABORTED(trans))
1164                 return -EIO;
1165
1166         path = btrfs_alloc_path();
1167         if (!path)
1168                 return -ENOMEM;
1169
1170         block_rsv = trans->block_rsv;
1171         trans->block_rsv = &fs_info->delayed_block_rsv;
1172
1173         delayed_root = fs_info->delayed_root;
1174
1175         curr_node = btrfs_first_delayed_node(delayed_root);
1176         while (curr_node && (!count || nr--)) {
1177                 ret = __btrfs_commit_inode_delayed_items(trans, path,
1178                                                          curr_node);
1179                 if (ret) {
1180                         btrfs_release_delayed_node(curr_node);
1181                         curr_node = NULL;
1182                         btrfs_abort_transaction(trans, ret);
1183                         break;
1184                 }
1185
1186                 prev_node = curr_node;
1187                 curr_node = btrfs_next_delayed_node(curr_node);
1188                 btrfs_release_delayed_node(prev_node);
1189         }
1190
1191         if (curr_node)
1192                 btrfs_release_delayed_node(curr_node);
1193         btrfs_free_path(path);
1194         trans->block_rsv = block_rsv;
1195
1196         return ret;
1197 }
1198
1199 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1200 {
1201         return __btrfs_run_delayed_items(trans, -1);
1202 }
1203
1204 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1205 {
1206         return __btrfs_run_delayed_items(trans, nr);
1207 }
1208
1209 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1210                                      struct btrfs_inode *inode)
1211 {
1212         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1213         struct btrfs_path *path;
1214         struct btrfs_block_rsv *block_rsv;
1215         int ret;
1216
1217         if (!delayed_node)
1218                 return 0;
1219
1220         mutex_lock(&delayed_node->mutex);
1221         if (!delayed_node->count) {
1222                 mutex_unlock(&delayed_node->mutex);
1223                 btrfs_release_delayed_node(delayed_node);
1224                 return 0;
1225         }
1226         mutex_unlock(&delayed_node->mutex);
1227
1228         path = btrfs_alloc_path();
1229         if (!path) {
1230                 btrfs_release_delayed_node(delayed_node);
1231                 return -ENOMEM;
1232         }
1233
1234         block_rsv = trans->block_rsv;
1235         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1236
1237         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1238
1239         btrfs_release_delayed_node(delayed_node);
1240         btrfs_free_path(path);
1241         trans->block_rsv = block_rsv;
1242
1243         return ret;
1244 }
1245
1246 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1247 {
1248         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1249         struct btrfs_trans_handle *trans;
1250         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1251         struct btrfs_path *path;
1252         struct btrfs_block_rsv *block_rsv;
1253         int ret;
1254
1255         if (!delayed_node)
1256                 return 0;
1257
1258         mutex_lock(&delayed_node->mutex);
1259         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1260                 mutex_unlock(&delayed_node->mutex);
1261                 btrfs_release_delayed_node(delayed_node);
1262                 return 0;
1263         }
1264         mutex_unlock(&delayed_node->mutex);
1265
1266         trans = btrfs_join_transaction(delayed_node->root);
1267         if (IS_ERR(trans)) {
1268                 ret = PTR_ERR(trans);
1269                 goto out;
1270         }
1271
1272         path = btrfs_alloc_path();
1273         if (!path) {
1274                 ret = -ENOMEM;
1275                 goto trans_out;
1276         }
1277
1278         block_rsv = trans->block_rsv;
1279         trans->block_rsv = &fs_info->delayed_block_rsv;
1280
1281         mutex_lock(&delayed_node->mutex);
1282         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1283                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1284                                                    path, delayed_node);
1285         else
1286                 ret = 0;
1287         mutex_unlock(&delayed_node->mutex);
1288
1289         btrfs_free_path(path);
1290         trans->block_rsv = block_rsv;
1291 trans_out:
1292         btrfs_end_transaction(trans);
1293         btrfs_btree_balance_dirty(fs_info);
1294 out:
1295         btrfs_release_delayed_node(delayed_node);
1296
1297         return ret;
1298 }
1299
1300 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1301 {
1302         struct btrfs_delayed_node *delayed_node;
1303
1304         delayed_node = READ_ONCE(inode->delayed_node);
1305         if (!delayed_node)
1306                 return;
1307
1308         inode->delayed_node = NULL;
1309         btrfs_release_delayed_node(delayed_node);
1310 }
1311
1312 struct btrfs_async_delayed_work {
1313         struct btrfs_delayed_root *delayed_root;
1314         int nr;
1315         struct btrfs_work work;
1316 };
1317
1318 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1319 {
1320         struct btrfs_async_delayed_work *async_work;
1321         struct btrfs_delayed_root *delayed_root;
1322         struct btrfs_trans_handle *trans;
1323         struct btrfs_path *path;
1324         struct btrfs_delayed_node *delayed_node = NULL;
1325         struct btrfs_root *root;
1326         struct btrfs_block_rsv *block_rsv;
1327         int total_done = 0;
1328
1329         async_work = container_of(work, struct btrfs_async_delayed_work, work);
1330         delayed_root = async_work->delayed_root;
1331
1332         path = btrfs_alloc_path();
1333         if (!path)
1334                 goto out;
1335
1336         do {
1337                 if (atomic_read(&delayed_root->items) <
1338                     BTRFS_DELAYED_BACKGROUND / 2)
1339                         break;
1340
1341                 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1342                 if (!delayed_node)
1343                         break;
1344
1345                 root = delayed_node->root;
1346
1347                 trans = btrfs_join_transaction(root);
1348                 if (IS_ERR(trans)) {
1349                         btrfs_release_path(path);
1350                         btrfs_release_prepared_delayed_node(delayed_node);
1351                         total_done++;
1352                         continue;
1353                 }
1354
1355                 block_rsv = trans->block_rsv;
1356                 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1357
1358                 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1359
1360                 trans->block_rsv = block_rsv;
1361                 btrfs_end_transaction(trans);
1362                 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1363
1364                 btrfs_release_path(path);
1365                 btrfs_release_prepared_delayed_node(delayed_node);
1366                 total_done++;
1367
1368         } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1369                  || total_done < async_work->nr);
1370
1371         btrfs_free_path(path);
1372 out:
1373         wake_up(&delayed_root->wait);
1374         kfree(async_work);
1375 }
1376
1377
1378 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1379                                      struct btrfs_fs_info *fs_info, int nr)
1380 {
1381         struct btrfs_async_delayed_work *async_work;
1382
1383         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1384         if (!async_work)
1385                 return -ENOMEM;
1386
1387         async_work->delayed_root = delayed_root;
1388         btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1389                         NULL);
1390         async_work->nr = nr;
1391
1392         btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1393         return 0;
1394 }
1395
1396 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1397 {
1398         WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1399 }
1400
1401 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1402 {
1403         int val = atomic_read(&delayed_root->items_seq);
1404
1405         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1406                 return 1;
1407
1408         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1409                 return 1;
1410
1411         return 0;
1412 }
1413
1414 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1415 {
1416         struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1417
1418         if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1419                 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1420                 return;
1421
1422         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1423                 int seq;
1424                 int ret;
1425
1426                 seq = atomic_read(&delayed_root->items_seq);
1427
1428                 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1429                 if (ret)
1430                         return;
1431
1432                 wait_event_interruptible(delayed_root->wait,
1433                                          could_end_wait(delayed_root, seq));
1434                 return;
1435         }
1436
1437         btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1438 }
1439
1440 /* Will return 0 or -ENOMEM */
1441 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1442                                    const char *name, int name_len,
1443                                    struct btrfs_inode *dir,
1444                                    struct btrfs_disk_key *disk_key, u8 type,
1445                                    u64 index)
1446 {
1447         struct btrfs_fs_info *fs_info = trans->fs_info;
1448         const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1449         struct btrfs_delayed_node *delayed_node;
1450         struct btrfs_delayed_item *delayed_item;
1451         struct btrfs_dir_item *dir_item;
1452         bool reserve_leaf_space;
1453         u32 data_len;
1454         int ret;
1455
1456         delayed_node = btrfs_get_or_create_delayed_node(dir);
1457         if (IS_ERR(delayed_node))
1458                 return PTR_ERR(delayed_node);
1459
1460         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1461         if (!delayed_item) {
1462                 ret = -ENOMEM;
1463                 goto release_node;
1464         }
1465
1466         delayed_item->key.objectid = btrfs_ino(dir);
1467         delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1468         delayed_item->key.offset = index;
1469         delayed_item->ins_or_del = BTRFS_DELAYED_INSERTION_ITEM;
1470
1471         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1472         dir_item->location = *disk_key;
1473         btrfs_set_stack_dir_transid(dir_item, trans->transid);
1474         btrfs_set_stack_dir_data_len(dir_item, 0);
1475         btrfs_set_stack_dir_name_len(dir_item, name_len);
1476         btrfs_set_stack_dir_type(dir_item, type);
1477         memcpy((char *)(dir_item + 1), name, name_len);
1478
1479         data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1480
1481         mutex_lock(&delayed_node->mutex);
1482
1483         if (delayed_node->index_item_leaves == 0 ||
1484             delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1485                 delayed_node->curr_index_batch_size = data_len;
1486                 reserve_leaf_space = true;
1487         } else {
1488                 delayed_node->curr_index_batch_size += data_len;
1489                 reserve_leaf_space = false;
1490         }
1491
1492         if (reserve_leaf_space) {
1493                 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root,
1494                                                           delayed_item);
1495                 /*
1496                  * Space was reserved for a dir index item insertion when we
1497                  * started the transaction, so getting a failure here should be
1498                  * impossible.
1499                  */
1500                 if (WARN_ON(ret)) {
1501                         mutex_unlock(&delayed_node->mutex);
1502                         btrfs_release_delayed_item(delayed_item);
1503                         goto release_node;
1504                 }
1505
1506                 delayed_node->index_item_leaves++;
1507         } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
1508                 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1509
1510                 /*
1511                  * Adding the new dir index item does not require touching another
1512                  * leaf, so we can release 1 unit of metadata that was previously
1513                  * reserved when starting the transaction. This applies only to
1514                  * the case where we had a transaction start and excludes the
1515                  * transaction join case (when replaying log trees).
1516                  */
1517                 trace_btrfs_space_reservation(fs_info, "transaction",
1518                                               trans->transid, bytes, 0);
1519                 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1520                 ASSERT(trans->bytes_reserved >= bytes);
1521                 trans->bytes_reserved -= bytes;
1522         }
1523
1524         ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1525         if (unlikely(ret)) {
1526                 btrfs_err(trans->fs_info,
1527                           "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1528                           name_len, name, delayed_node->root->root_key.objectid,
1529                           delayed_node->inode_id, ret);
1530                 BUG();
1531         }
1532         mutex_unlock(&delayed_node->mutex);
1533
1534 release_node:
1535         btrfs_release_delayed_node(delayed_node);
1536         return ret;
1537 }
1538
1539 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1540                                                struct btrfs_delayed_node *node,
1541                                                struct btrfs_key *key)
1542 {
1543         struct btrfs_delayed_item *item;
1544
1545         mutex_lock(&node->mutex);
1546         item = __btrfs_lookup_delayed_insertion_item(node, key);
1547         if (!item) {
1548                 mutex_unlock(&node->mutex);
1549                 return 1;
1550         }
1551
1552         /*
1553          * For delayed items to insert, we track reserved metadata bytes based
1554          * on the number of leaves that we will use.
1555          * See btrfs_insert_delayed_dir_index() and
1556          * btrfs_delayed_item_reserve_metadata()).
1557          */
1558         ASSERT(item->bytes_reserved == 0);
1559         ASSERT(node->index_item_leaves > 0);
1560
1561         /*
1562          * If there's only one leaf reserved, we can decrement this item from the
1563          * current batch, otherwise we can not because we don't know which leaf
1564          * it belongs to. With the current limit on delayed items, we rarely
1565          * accumulate enough dir index items to fill more than one leaf (even
1566          * when using a leaf size of 4K).
1567          */
1568         if (node->index_item_leaves == 1) {
1569                 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1570
1571                 ASSERT(node->curr_index_batch_size >= data_len);
1572                 node->curr_index_batch_size -= data_len;
1573         }
1574
1575         btrfs_release_delayed_item(item);
1576
1577         /* If we now have no more dir index items, we can release all leaves. */
1578         if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1579                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1580                 node->index_item_leaves = 0;
1581         }
1582
1583         mutex_unlock(&node->mutex);
1584         return 0;
1585 }
1586
1587 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1588                                    struct btrfs_inode *dir, u64 index)
1589 {
1590         struct btrfs_delayed_node *node;
1591         struct btrfs_delayed_item *item;
1592         struct btrfs_key item_key;
1593         int ret;
1594
1595         node = btrfs_get_or_create_delayed_node(dir);
1596         if (IS_ERR(node))
1597                 return PTR_ERR(node);
1598
1599         item_key.objectid = btrfs_ino(dir);
1600         item_key.type = BTRFS_DIR_INDEX_KEY;
1601         item_key.offset = index;
1602
1603         ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1604                                                   &item_key);
1605         if (!ret)
1606                 goto end;
1607
1608         item = btrfs_alloc_delayed_item(0);
1609         if (!item) {
1610                 ret = -ENOMEM;
1611                 goto end;
1612         }
1613
1614         item->key = item_key;
1615         item->ins_or_del = BTRFS_DELAYED_DELETION_ITEM;
1616
1617         ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1618         /*
1619          * we have reserved enough space when we start a new transaction,
1620          * so reserving metadata failure is impossible.
1621          */
1622         if (ret < 0) {
1623                 btrfs_err(trans->fs_info,
1624 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1625                 btrfs_release_delayed_item(item);
1626                 goto end;
1627         }
1628
1629         mutex_lock(&node->mutex);
1630         ret = __btrfs_add_delayed_item(node, item);
1631         if (unlikely(ret)) {
1632                 btrfs_err(trans->fs_info,
1633                           "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1634                           index, node->root->root_key.objectid,
1635                           node->inode_id, ret);
1636                 btrfs_delayed_item_release_metadata(dir->root, item);
1637                 btrfs_release_delayed_item(item);
1638         }
1639         mutex_unlock(&node->mutex);
1640 end:
1641         btrfs_release_delayed_node(node);
1642         return ret;
1643 }
1644
1645 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1646 {
1647         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1648
1649         if (!delayed_node)
1650                 return -ENOENT;
1651
1652         /*
1653          * Since we have held i_mutex of this directory, it is impossible that
1654          * a new directory index is added into the delayed node and index_cnt
1655          * is updated now. So we needn't lock the delayed node.
1656          */
1657         if (!delayed_node->index_cnt) {
1658                 btrfs_release_delayed_node(delayed_node);
1659                 return -EINVAL;
1660         }
1661
1662         inode->index_cnt = delayed_node->index_cnt;
1663         btrfs_release_delayed_node(delayed_node);
1664         return 0;
1665 }
1666
1667 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1668                                      struct list_head *ins_list,
1669                                      struct list_head *del_list)
1670 {
1671         struct btrfs_delayed_node *delayed_node;
1672         struct btrfs_delayed_item *item;
1673
1674         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1675         if (!delayed_node)
1676                 return false;
1677
1678         /*
1679          * We can only do one readdir with delayed items at a time because of
1680          * item->readdir_list.
1681          */
1682         btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1683         btrfs_inode_lock(inode, 0);
1684
1685         mutex_lock(&delayed_node->mutex);
1686         item = __btrfs_first_delayed_insertion_item(delayed_node);
1687         while (item) {
1688                 refcount_inc(&item->refs);
1689                 list_add_tail(&item->readdir_list, ins_list);
1690                 item = __btrfs_next_delayed_item(item);
1691         }
1692
1693         item = __btrfs_first_delayed_deletion_item(delayed_node);
1694         while (item) {
1695                 refcount_inc(&item->refs);
1696                 list_add_tail(&item->readdir_list, del_list);
1697                 item = __btrfs_next_delayed_item(item);
1698         }
1699         mutex_unlock(&delayed_node->mutex);
1700         /*
1701          * This delayed node is still cached in the btrfs inode, so refs
1702          * must be > 1 now, and we needn't check it is going to be freed
1703          * or not.
1704          *
1705          * Besides that, this function is used to read dir, we do not
1706          * insert/delete delayed items in this period. So we also needn't
1707          * requeue or dequeue this delayed node.
1708          */
1709         refcount_dec(&delayed_node->refs);
1710
1711         return true;
1712 }
1713
1714 void btrfs_readdir_put_delayed_items(struct inode *inode,
1715                                      struct list_head *ins_list,
1716                                      struct list_head *del_list)
1717 {
1718         struct btrfs_delayed_item *curr, *next;
1719
1720         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1721                 list_del(&curr->readdir_list);
1722                 if (refcount_dec_and_test(&curr->refs))
1723                         kfree(curr);
1724         }
1725
1726         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1727                 list_del(&curr->readdir_list);
1728                 if (refcount_dec_and_test(&curr->refs))
1729                         kfree(curr);
1730         }
1731
1732         /*
1733          * The VFS is going to do up_read(), so we need to downgrade back to a
1734          * read lock.
1735          */
1736         downgrade_write(&inode->i_rwsem);
1737 }
1738
1739 int btrfs_should_delete_dir_index(struct list_head *del_list,
1740                                   u64 index)
1741 {
1742         struct btrfs_delayed_item *curr;
1743         int ret = 0;
1744
1745         list_for_each_entry(curr, del_list, readdir_list) {
1746                 if (curr->key.offset > index)
1747                         break;
1748                 if (curr->key.offset == index) {
1749                         ret = 1;
1750                         break;
1751                 }
1752         }
1753         return ret;
1754 }
1755
1756 /*
1757  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1758  *
1759  */
1760 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1761                                     struct list_head *ins_list)
1762 {
1763         struct btrfs_dir_item *di;
1764         struct btrfs_delayed_item *curr, *next;
1765         struct btrfs_key location;
1766         char *name;
1767         int name_len;
1768         int over = 0;
1769         unsigned char d_type;
1770
1771         if (list_empty(ins_list))
1772                 return 0;
1773
1774         /*
1775          * Changing the data of the delayed item is impossible. So
1776          * we needn't lock them. And we have held i_mutex of the
1777          * directory, nobody can delete any directory indexes now.
1778          */
1779         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1780                 list_del(&curr->readdir_list);
1781
1782                 if (curr->key.offset < ctx->pos) {
1783                         if (refcount_dec_and_test(&curr->refs))
1784                                 kfree(curr);
1785                         continue;
1786                 }
1787
1788                 ctx->pos = curr->key.offset;
1789
1790                 di = (struct btrfs_dir_item *)curr->data;
1791                 name = (char *)(di + 1);
1792                 name_len = btrfs_stack_dir_name_len(di);
1793
1794                 d_type = fs_ftype_to_dtype(di->type);
1795                 btrfs_disk_key_to_cpu(&location, &di->location);
1796
1797                 over = !dir_emit(ctx, name, name_len,
1798                                location.objectid, d_type);
1799
1800                 if (refcount_dec_and_test(&curr->refs))
1801                         kfree(curr);
1802
1803                 if (over)
1804                         return 1;
1805                 ctx->pos++;
1806         }
1807         return 0;
1808 }
1809
1810 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1811                                   struct btrfs_inode_item *inode_item,
1812                                   struct inode *inode)
1813 {
1814         u64 flags;
1815
1816         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1817         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1818         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1819         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1820         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1821         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1822         btrfs_set_stack_inode_generation(inode_item,
1823                                          BTRFS_I(inode)->generation);
1824         btrfs_set_stack_inode_sequence(inode_item,
1825                                        inode_peek_iversion(inode));
1826         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1827         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1828         flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1829                                           BTRFS_I(inode)->ro_flags);
1830         btrfs_set_stack_inode_flags(inode_item, flags);
1831         btrfs_set_stack_inode_block_group(inode_item, 0);
1832
1833         btrfs_set_stack_timespec_sec(&inode_item->atime,
1834                                      inode->i_atime.tv_sec);
1835         btrfs_set_stack_timespec_nsec(&inode_item->atime,
1836                                       inode->i_atime.tv_nsec);
1837
1838         btrfs_set_stack_timespec_sec(&inode_item->mtime,
1839                                      inode->i_mtime.tv_sec);
1840         btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1841                                       inode->i_mtime.tv_nsec);
1842
1843         btrfs_set_stack_timespec_sec(&inode_item->ctime,
1844                                      inode->i_ctime.tv_sec);
1845         btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1846                                       inode->i_ctime.tv_nsec);
1847
1848         btrfs_set_stack_timespec_sec(&inode_item->otime,
1849                                      BTRFS_I(inode)->i_otime.tv_sec);
1850         btrfs_set_stack_timespec_nsec(&inode_item->otime,
1851                                      BTRFS_I(inode)->i_otime.tv_nsec);
1852 }
1853
1854 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1855 {
1856         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1857         struct btrfs_delayed_node *delayed_node;
1858         struct btrfs_inode_item *inode_item;
1859
1860         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1861         if (!delayed_node)
1862                 return -ENOENT;
1863
1864         mutex_lock(&delayed_node->mutex);
1865         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1866                 mutex_unlock(&delayed_node->mutex);
1867                 btrfs_release_delayed_node(delayed_node);
1868                 return -ENOENT;
1869         }
1870
1871         inode_item = &delayed_node->inode_item;
1872
1873         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1874         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1875         btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1876         btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1877                         round_up(i_size_read(inode), fs_info->sectorsize));
1878         inode->i_mode = btrfs_stack_inode_mode(inode_item);
1879         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1880         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1881         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1882         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1883
1884         inode_set_iversion_queried(inode,
1885                                    btrfs_stack_inode_sequence(inode_item));
1886         inode->i_rdev = 0;
1887         *rdev = btrfs_stack_inode_rdev(inode_item);
1888         btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1889                                 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1890
1891         inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1892         inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1893
1894         inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1895         inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1896
1897         inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1898         inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1899
1900         BTRFS_I(inode)->i_otime.tv_sec =
1901                 btrfs_stack_timespec_sec(&inode_item->otime);
1902         BTRFS_I(inode)->i_otime.tv_nsec =
1903                 btrfs_stack_timespec_nsec(&inode_item->otime);
1904
1905         inode->i_generation = BTRFS_I(inode)->generation;
1906         BTRFS_I(inode)->index_cnt = (u64)-1;
1907
1908         mutex_unlock(&delayed_node->mutex);
1909         btrfs_release_delayed_node(delayed_node);
1910         return 0;
1911 }
1912
1913 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1914                                struct btrfs_root *root,
1915                                struct btrfs_inode *inode)
1916 {
1917         struct btrfs_delayed_node *delayed_node;
1918         int ret = 0;
1919
1920         delayed_node = btrfs_get_or_create_delayed_node(inode);
1921         if (IS_ERR(delayed_node))
1922                 return PTR_ERR(delayed_node);
1923
1924         mutex_lock(&delayed_node->mutex);
1925         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1926                 fill_stack_inode_item(trans, &delayed_node->inode_item,
1927                                       &inode->vfs_inode);
1928                 goto release_node;
1929         }
1930
1931         ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1932         if (ret)
1933                 goto release_node;
1934
1935         fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1936         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1937         delayed_node->count++;
1938         atomic_inc(&root->fs_info->delayed_root->items);
1939 release_node:
1940         mutex_unlock(&delayed_node->mutex);
1941         btrfs_release_delayed_node(delayed_node);
1942         return ret;
1943 }
1944
1945 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1946 {
1947         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1948         struct btrfs_delayed_node *delayed_node;
1949
1950         /*
1951          * we don't do delayed inode updates during log recovery because it
1952          * leads to enospc problems.  This means we also can't do
1953          * delayed inode refs
1954          */
1955         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1956                 return -EAGAIN;
1957
1958         delayed_node = btrfs_get_or_create_delayed_node(inode);
1959         if (IS_ERR(delayed_node))
1960                 return PTR_ERR(delayed_node);
1961
1962         /*
1963          * We don't reserve space for inode ref deletion is because:
1964          * - We ONLY do async inode ref deletion for the inode who has only
1965          *   one link(i_nlink == 1), it means there is only one inode ref.
1966          *   And in most case, the inode ref and the inode item are in the
1967          *   same leaf, and we will deal with them at the same time.
1968          *   Since we are sure we will reserve the space for the inode item,
1969          *   it is unnecessary to reserve space for inode ref deletion.
1970          * - If the inode ref and the inode item are not in the same leaf,
1971          *   We also needn't worry about enospc problem, because we reserve
1972          *   much more space for the inode update than it needs.
1973          * - At the worst, we can steal some space from the global reservation.
1974          *   It is very rare.
1975          */
1976         mutex_lock(&delayed_node->mutex);
1977         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1978                 goto release_node;
1979
1980         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1981         delayed_node->count++;
1982         atomic_inc(&fs_info->delayed_root->items);
1983 release_node:
1984         mutex_unlock(&delayed_node->mutex);
1985         btrfs_release_delayed_node(delayed_node);
1986         return 0;
1987 }
1988
1989 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1990 {
1991         struct btrfs_root *root = delayed_node->root;
1992         struct btrfs_fs_info *fs_info = root->fs_info;
1993         struct btrfs_delayed_item *curr_item, *prev_item;
1994
1995         mutex_lock(&delayed_node->mutex);
1996         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1997         while (curr_item) {
1998                 prev_item = curr_item;
1999                 curr_item = __btrfs_next_delayed_item(prev_item);
2000                 btrfs_release_delayed_item(prev_item);
2001         }
2002
2003         if (delayed_node->index_item_leaves > 0) {
2004                 btrfs_delayed_item_release_leaves(delayed_node,
2005                                           delayed_node->index_item_leaves);
2006                 delayed_node->index_item_leaves = 0;
2007         }
2008
2009         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2010         while (curr_item) {
2011                 btrfs_delayed_item_release_metadata(root, curr_item);
2012                 prev_item = curr_item;
2013                 curr_item = __btrfs_next_delayed_item(prev_item);
2014                 btrfs_release_delayed_item(prev_item);
2015         }
2016
2017         btrfs_release_delayed_iref(delayed_node);
2018
2019         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2020                 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2021                 btrfs_release_delayed_inode(delayed_node);
2022         }
2023         mutex_unlock(&delayed_node->mutex);
2024 }
2025
2026 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2027 {
2028         struct btrfs_delayed_node *delayed_node;
2029
2030         delayed_node = btrfs_get_delayed_node(inode);
2031         if (!delayed_node)
2032                 return;
2033
2034         __btrfs_kill_delayed_node(delayed_node);
2035         btrfs_release_delayed_node(delayed_node);
2036 }
2037
2038 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2039 {
2040         u64 inode_id = 0;
2041         struct btrfs_delayed_node *delayed_nodes[8];
2042         int i, n;
2043
2044         while (1) {
2045                 spin_lock(&root->inode_lock);
2046                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2047                                            (void **)delayed_nodes, inode_id,
2048                                            ARRAY_SIZE(delayed_nodes));
2049                 if (!n) {
2050                         spin_unlock(&root->inode_lock);
2051                         break;
2052                 }
2053
2054                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2055                 for (i = 0; i < n; i++) {
2056                         /*
2057                          * Don't increase refs in case the node is dead and
2058                          * about to be removed from the tree in the loop below
2059                          */
2060                         if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2061                                 delayed_nodes[i] = NULL;
2062                 }
2063                 spin_unlock(&root->inode_lock);
2064
2065                 for (i = 0; i < n; i++) {
2066                         if (!delayed_nodes[i])
2067                                 continue;
2068                         __btrfs_kill_delayed_node(delayed_nodes[i]);
2069                         btrfs_release_delayed_node(delayed_nodes[i]);
2070                 }
2071         }
2072 }
2073
2074 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2075 {
2076         struct btrfs_delayed_node *curr_node, *prev_node;
2077
2078         curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2079         while (curr_node) {
2080                 __btrfs_kill_delayed_node(curr_node);
2081
2082                 prev_node = curr_node;
2083                 curr_node = btrfs_next_delayed_node(curr_node);
2084                 btrfs_release_delayed_node(prev_node);
2085         }
2086 }
2087