GNU Linux-libre 4.9.308-gnu1
[releases.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34 #include "qgroup.h"
35
36 #define BTRFS_ROOT_TRANS_TAG 0
37
38 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
39         [TRANS_STATE_RUNNING]           = 0U,
40         [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
41                                            __TRANS_START),
42         [TRANS_STATE_COMMIT_START]      = (__TRANS_USERSPACE |
43                                            __TRANS_START |
44                                            __TRANS_ATTACH),
45         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_USERSPACE |
46                                            __TRANS_START |
47                                            __TRANS_ATTACH |
48                                            __TRANS_JOIN),
49         [TRANS_STATE_UNBLOCKED]         = (__TRANS_USERSPACE |
50                                            __TRANS_START |
51                                            __TRANS_ATTACH |
52                                            __TRANS_JOIN |
53                                            __TRANS_JOIN_NOLOCK),
54         [TRANS_STATE_COMPLETED]         = (__TRANS_USERSPACE |
55                                            __TRANS_START |
56                                            __TRANS_ATTACH |
57                                            __TRANS_JOIN |
58                                            __TRANS_JOIN_NOLOCK),
59 };
60
61 void btrfs_put_transaction(struct btrfs_transaction *transaction)
62 {
63         WARN_ON(atomic_read(&transaction->use_count) == 0);
64         if (atomic_dec_and_test(&transaction->use_count)) {
65                 BUG_ON(!list_empty(&transaction->list));
66                 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
67                 if (transaction->delayed_refs.pending_csums)
68                         btrfs_err(transaction->fs_info,
69                                   "pending csums is %llu",
70                                   transaction->delayed_refs.pending_csums);
71                 while (!list_empty(&transaction->pending_chunks)) {
72                         struct extent_map *em;
73
74                         em = list_first_entry(&transaction->pending_chunks,
75                                               struct extent_map, list);
76                         list_del_init(&em->list);
77                         free_extent_map(em);
78                 }
79                 /*
80                  * If any block groups are found in ->deleted_bgs then it's
81                  * because the transaction was aborted and a commit did not
82                  * happen (things failed before writing the new superblock
83                  * and calling btrfs_finish_extent_commit()), so we can not
84                  * discard the physical locations of the block groups.
85                  */
86                 while (!list_empty(&transaction->deleted_bgs)) {
87                         struct btrfs_block_group_cache *cache;
88
89                         cache = list_first_entry(&transaction->deleted_bgs,
90                                                  struct btrfs_block_group_cache,
91                                                  bg_list);
92                         list_del_init(&cache->bg_list);
93                         btrfs_put_block_group_trimming(cache);
94                         btrfs_put_block_group(cache);
95                 }
96                 kmem_cache_free(btrfs_transaction_cachep, transaction);
97         }
98 }
99
100 static void clear_btree_io_tree(struct extent_io_tree *tree)
101 {
102         spin_lock(&tree->lock);
103         /*
104          * Do a single barrier for the waitqueue_active check here, the state
105          * of the waitqueue should not change once clear_btree_io_tree is
106          * called.
107          */
108         smp_mb();
109         while (!RB_EMPTY_ROOT(&tree->state)) {
110                 struct rb_node *node;
111                 struct extent_state *state;
112
113                 node = rb_first(&tree->state);
114                 state = rb_entry(node, struct extent_state, rb_node);
115                 rb_erase(&state->rb_node, &tree->state);
116                 RB_CLEAR_NODE(&state->rb_node);
117                 /*
118                  * btree io trees aren't supposed to have tasks waiting for
119                  * changes in the flags of extent states ever.
120                  */
121                 ASSERT(!waitqueue_active(&state->wq));
122                 free_extent_state(state);
123
124                 cond_resched_lock(&tree->lock);
125         }
126         spin_unlock(&tree->lock);
127 }
128
129 static noinline void switch_commit_roots(struct btrfs_transaction *trans,
130                                          struct btrfs_fs_info *fs_info)
131 {
132         struct btrfs_root *root, *tmp;
133
134         down_write(&fs_info->commit_root_sem);
135         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
136                                  dirty_list) {
137                 list_del_init(&root->dirty_list);
138                 free_extent_buffer(root->commit_root);
139                 root->commit_root = btrfs_root_node(root);
140                 if (is_fstree(root->objectid))
141                         btrfs_unpin_free_ino(root);
142                 clear_btree_io_tree(&root->dirty_log_pages);
143         }
144
145         /* We can free old roots now. */
146         spin_lock(&trans->dropped_roots_lock);
147         while (!list_empty(&trans->dropped_roots)) {
148                 root = list_first_entry(&trans->dropped_roots,
149                                         struct btrfs_root, root_list);
150                 list_del_init(&root->root_list);
151                 spin_unlock(&trans->dropped_roots_lock);
152                 btrfs_drop_and_free_fs_root(fs_info, root);
153                 spin_lock(&trans->dropped_roots_lock);
154         }
155         spin_unlock(&trans->dropped_roots_lock);
156         up_write(&fs_info->commit_root_sem);
157 }
158
159 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
160                                          unsigned int type)
161 {
162         if (type & TRANS_EXTWRITERS)
163                 atomic_inc(&trans->num_extwriters);
164 }
165
166 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
167                                          unsigned int type)
168 {
169         if (type & TRANS_EXTWRITERS)
170                 atomic_dec(&trans->num_extwriters);
171 }
172
173 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
174                                           unsigned int type)
175 {
176         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
177 }
178
179 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
180 {
181         return atomic_read(&trans->num_extwriters);
182 }
183
184 /*
185  * either allocate a new transaction or hop into the existing one
186  */
187 static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
188 {
189         struct btrfs_transaction *cur_trans;
190         struct btrfs_fs_info *fs_info = root->fs_info;
191
192         spin_lock(&fs_info->trans_lock);
193 loop:
194         /* The file system has been taken offline. No new transactions. */
195         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
196                 spin_unlock(&fs_info->trans_lock);
197                 return -EROFS;
198         }
199
200         cur_trans = fs_info->running_transaction;
201         if (cur_trans) {
202                 if (cur_trans->aborted) {
203                         spin_unlock(&fs_info->trans_lock);
204                         return cur_trans->aborted;
205                 }
206                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
207                         spin_unlock(&fs_info->trans_lock);
208                         return -EBUSY;
209                 }
210                 atomic_inc(&cur_trans->use_count);
211                 atomic_inc(&cur_trans->num_writers);
212                 extwriter_counter_inc(cur_trans, type);
213                 spin_unlock(&fs_info->trans_lock);
214                 return 0;
215         }
216         spin_unlock(&fs_info->trans_lock);
217
218         /*
219          * If we are ATTACH, we just want to catch the current transaction,
220          * and commit it. If there is no transaction, just return ENOENT.
221          */
222         if (type == TRANS_ATTACH)
223                 return -ENOENT;
224
225         /*
226          * JOIN_NOLOCK only happens during the transaction commit, so
227          * it is impossible that ->running_transaction is NULL
228          */
229         BUG_ON(type == TRANS_JOIN_NOLOCK);
230
231         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
232         if (!cur_trans)
233                 return -ENOMEM;
234
235         spin_lock(&fs_info->trans_lock);
236         if (fs_info->running_transaction) {
237                 /*
238                  * someone started a transaction after we unlocked.  Make sure
239                  * to redo the checks above
240                  */
241                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
242                 goto loop;
243         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
244                 spin_unlock(&fs_info->trans_lock);
245                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
246                 return -EROFS;
247         }
248
249         cur_trans->fs_info = fs_info;
250         atomic_set(&cur_trans->num_writers, 1);
251         extwriter_counter_init(cur_trans, type);
252         init_waitqueue_head(&cur_trans->writer_wait);
253         init_waitqueue_head(&cur_trans->commit_wait);
254         init_waitqueue_head(&cur_trans->pending_wait);
255         cur_trans->state = TRANS_STATE_RUNNING;
256         /*
257          * One for this trans handle, one so it will live on until we
258          * commit the transaction.
259          */
260         atomic_set(&cur_trans->use_count, 2);
261         atomic_set(&cur_trans->pending_ordered, 0);
262         cur_trans->flags = 0;
263         cur_trans->start_time = get_seconds();
264
265         memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
266
267         cur_trans->delayed_refs.href_root = RB_ROOT;
268         cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
269         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
270
271         /*
272          * although the tree mod log is per file system and not per transaction,
273          * the log must never go across transaction boundaries.
274          */
275         smp_mb();
276         if (!list_empty(&fs_info->tree_mod_seq_list))
277                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
278         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
279                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
280         atomic64_set(&fs_info->tree_mod_seq, 0);
281
282         spin_lock_init(&cur_trans->delayed_refs.lock);
283
284         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
285         INIT_LIST_HEAD(&cur_trans->pending_chunks);
286         INIT_LIST_HEAD(&cur_trans->switch_commits);
287         INIT_LIST_HEAD(&cur_trans->dirty_bgs);
288         INIT_LIST_HEAD(&cur_trans->io_bgs);
289         INIT_LIST_HEAD(&cur_trans->dropped_roots);
290         mutex_init(&cur_trans->cache_write_mutex);
291         cur_trans->num_dirty_bgs = 0;
292         spin_lock_init(&cur_trans->dirty_bgs_lock);
293         INIT_LIST_HEAD(&cur_trans->deleted_bgs);
294         spin_lock_init(&cur_trans->dropped_roots_lock);
295         list_add_tail(&cur_trans->list, &fs_info->trans_list);
296         extent_io_tree_init(&cur_trans->dirty_pages,
297                              fs_info->btree_inode->i_mapping);
298         fs_info->generation++;
299         cur_trans->transid = fs_info->generation;
300         fs_info->running_transaction = cur_trans;
301         cur_trans->aborted = 0;
302         spin_unlock(&fs_info->trans_lock);
303
304         return 0;
305 }
306
307 /*
308  * this does all the record keeping required to make sure that a reference
309  * counted root is properly recorded in a given transaction.  This is required
310  * to make sure the old root from before we joined the transaction is deleted
311  * when the transaction commits
312  */
313 static int record_root_in_trans(struct btrfs_trans_handle *trans,
314                                struct btrfs_root *root,
315                                int force)
316 {
317         if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
318             root->last_trans < trans->transid) || force) {
319                 WARN_ON(root == root->fs_info->extent_root);
320                 WARN_ON(root->commit_root != root->node);
321
322                 /*
323                  * see below for IN_TRANS_SETUP usage rules
324                  * we have the reloc mutex held now, so there
325                  * is only one writer in this function
326                  */
327                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
328
329                 /* make sure readers find IN_TRANS_SETUP before
330                  * they find our root->last_trans update
331                  */
332                 smp_wmb();
333
334                 spin_lock(&root->fs_info->fs_roots_radix_lock);
335                 if (root->last_trans == trans->transid && !force) {
336                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
337                         return 0;
338                 }
339                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
340                            (unsigned long)root->root_key.objectid,
341                            BTRFS_ROOT_TRANS_TAG);
342                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
343                 root->last_trans = trans->transid;
344
345                 /* this is pretty tricky.  We don't want to
346                  * take the relocation lock in btrfs_record_root_in_trans
347                  * unless we're really doing the first setup for this root in
348                  * this transaction.
349                  *
350                  * Normally we'd use root->last_trans as a flag to decide
351                  * if we want to take the expensive mutex.
352                  *
353                  * But, we have to set root->last_trans before we
354                  * init the relocation root, otherwise, we trip over warnings
355                  * in ctree.c.  The solution used here is to flag ourselves
356                  * with root IN_TRANS_SETUP.  When this is 1, we're still
357                  * fixing up the reloc trees and everyone must wait.
358                  *
359                  * When this is zero, they can trust root->last_trans and fly
360                  * through btrfs_record_root_in_trans without having to take the
361                  * lock.  smp_wmb() makes sure that all the writes above are
362                  * done before we pop in the zero below
363                  */
364                 btrfs_init_reloc_root(trans, root);
365                 smp_mb__before_atomic();
366                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
367         }
368         return 0;
369 }
370
371
372 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
373                             struct btrfs_root *root)
374 {
375         struct btrfs_transaction *cur_trans = trans->transaction;
376
377         /* Add ourselves to the transaction dropped list */
378         spin_lock(&cur_trans->dropped_roots_lock);
379         list_add_tail(&root->root_list, &cur_trans->dropped_roots);
380         spin_unlock(&cur_trans->dropped_roots_lock);
381
382         /* Make sure we don't try to update the root at commit time */
383         spin_lock(&root->fs_info->fs_roots_radix_lock);
384         radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
385                              (unsigned long)root->root_key.objectid,
386                              BTRFS_ROOT_TRANS_TAG);
387         spin_unlock(&root->fs_info->fs_roots_radix_lock);
388 }
389
390 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
391                                struct btrfs_root *root)
392 {
393         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
394                 return 0;
395
396         /*
397          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
398          * and barriers
399          */
400         smp_rmb();
401         if (root->last_trans == trans->transid &&
402             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
403                 return 0;
404
405         mutex_lock(&root->fs_info->reloc_mutex);
406         record_root_in_trans(trans, root, 0);
407         mutex_unlock(&root->fs_info->reloc_mutex);
408
409         return 0;
410 }
411
412 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
413 {
414         return (trans->state >= TRANS_STATE_BLOCKED &&
415                 trans->state < TRANS_STATE_UNBLOCKED &&
416                 !trans->aborted);
417 }
418
419 /* wait for commit against the current transaction to become unblocked
420  * when this is done, it is safe to start a new transaction, but the current
421  * transaction might not be fully on disk.
422  */
423 static void wait_current_trans(struct btrfs_root *root)
424 {
425         struct btrfs_transaction *cur_trans;
426
427         spin_lock(&root->fs_info->trans_lock);
428         cur_trans = root->fs_info->running_transaction;
429         if (cur_trans && is_transaction_blocked(cur_trans)) {
430                 atomic_inc(&cur_trans->use_count);
431                 spin_unlock(&root->fs_info->trans_lock);
432
433                 wait_event(root->fs_info->transaction_wait,
434                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
435                            cur_trans->aborted);
436                 btrfs_put_transaction(cur_trans);
437         } else {
438                 spin_unlock(&root->fs_info->trans_lock);
439         }
440 }
441
442 static int may_wait_transaction(struct btrfs_root *root, int type)
443 {
444         if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
445                 return 0;
446
447         if (type == TRANS_USERSPACE)
448                 return 1;
449
450         if (type == TRANS_START &&
451             !atomic_read(&root->fs_info->open_ioctl_trans))
452                 return 1;
453
454         return 0;
455 }
456
457 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
458 {
459         if (!root->fs_info->reloc_ctl ||
460             !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
461             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
462             root->reloc_root)
463                 return false;
464
465         return true;
466 }
467
468 static struct btrfs_trans_handle *
469 start_transaction(struct btrfs_root *root, unsigned int num_items,
470                   unsigned int type, enum btrfs_reserve_flush_enum flush)
471 {
472         struct btrfs_trans_handle *h;
473         struct btrfs_transaction *cur_trans;
474         u64 num_bytes = 0;
475         u64 qgroup_reserved = 0;
476         bool reloc_reserved = false;
477         int ret;
478
479         /* Send isn't supposed to start transactions. */
480         ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
481
482         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
483                 return ERR_PTR(-EROFS);
484
485         if (current->journal_info) {
486                 WARN_ON(type & TRANS_EXTWRITERS);
487                 h = current->journal_info;
488                 h->use_count++;
489                 WARN_ON(h->use_count > 2);
490                 h->orig_rsv = h->block_rsv;
491                 h->block_rsv = NULL;
492                 goto got_it;
493         }
494
495         /*
496          * Do the reservation before we join the transaction so we can do all
497          * the appropriate flushing if need be.
498          */
499         if (num_items > 0 && root != root->fs_info->chunk_root) {
500                 qgroup_reserved = num_items * root->nodesize;
501                 ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved);
502                 if (ret)
503                         return ERR_PTR(ret);
504
505                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
506                 /*
507                  * Do the reservation for the relocation root creation
508                  */
509                 if (need_reserve_reloc_root(root)) {
510                         num_bytes += root->nodesize;
511                         reloc_reserved = true;
512                 }
513
514                 ret = btrfs_block_rsv_add(root,
515                                           &root->fs_info->trans_block_rsv,
516                                           num_bytes, flush);
517                 if (ret)
518                         goto reserve_fail;
519         }
520 again:
521         h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
522         if (!h) {
523                 ret = -ENOMEM;
524                 goto alloc_fail;
525         }
526
527         /*
528          * If we are JOIN_NOLOCK we're already committing a transaction and
529          * waiting on this guy, so we don't need to do the sb_start_intwrite
530          * because we're already holding a ref.  We need this because we could
531          * have raced in and did an fsync() on a file which can kick a commit
532          * and then we deadlock with somebody doing a freeze.
533          *
534          * If we are ATTACH, it means we just want to catch the current
535          * transaction and commit it, so we needn't do sb_start_intwrite(). 
536          */
537         if (type & __TRANS_FREEZABLE)
538                 sb_start_intwrite(root->fs_info->sb);
539
540         if (may_wait_transaction(root, type))
541                 wait_current_trans(root);
542
543         do {
544                 ret = join_transaction(root, type);
545                 if (ret == -EBUSY) {
546                         wait_current_trans(root);
547                         if (unlikely(type == TRANS_ATTACH))
548                                 ret = -ENOENT;
549                 }
550         } while (ret == -EBUSY);
551
552         if (ret < 0)
553                 goto join_fail;
554
555         cur_trans = root->fs_info->running_transaction;
556
557         h->transid = cur_trans->transid;
558         h->transaction = cur_trans;
559         h->root = root;
560         h->use_count = 1;
561         h->fs_info = root->fs_info;
562
563         h->type = type;
564         h->can_flush_pending_bgs = true;
565         INIT_LIST_HEAD(&h->qgroup_ref_list);
566         INIT_LIST_HEAD(&h->new_bgs);
567
568         smp_mb();
569         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
570             may_wait_transaction(root, type)) {
571                 current->journal_info = h;
572                 btrfs_commit_transaction(h, root);
573                 goto again;
574         }
575
576         if (num_bytes) {
577                 trace_btrfs_space_reservation(root->fs_info, "transaction",
578                                               h->transid, num_bytes, 1);
579                 h->block_rsv = &root->fs_info->trans_block_rsv;
580                 h->bytes_reserved = num_bytes;
581                 h->reloc_reserved = reloc_reserved;
582         }
583
584 got_it:
585         btrfs_record_root_in_trans(h, root);
586
587         if (!current->journal_info && type != TRANS_USERSPACE)
588                 current->journal_info = h;
589         return h;
590
591 join_fail:
592         if (type & __TRANS_FREEZABLE)
593                 sb_end_intwrite(root->fs_info->sb);
594         kmem_cache_free(btrfs_trans_handle_cachep, h);
595 alloc_fail:
596         if (num_bytes)
597                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
598                                         num_bytes);
599 reserve_fail:
600         btrfs_qgroup_free_meta(root, qgroup_reserved);
601         return ERR_PTR(ret);
602 }
603
604 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
605                                                    unsigned int num_items)
606 {
607         return start_transaction(root, num_items, TRANS_START,
608                                  BTRFS_RESERVE_FLUSH_ALL);
609 }
610 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
611                                         struct btrfs_root *root,
612                                         unsigned int num_items,
613                                         int min_factor)
614 {
615         struct btrfs_trans_handle *trans;
616         u64 num_bytes;
617         int ret;
618
619         trans = btrfs_start_transaction(root, num_items);
620         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
621                 return trans;
622
623         trans = btrfs_start_transaction(root, 0);
624         if (IS_ERR(trans))
625                 return trans;
626
627         num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
628         ret = btrfs_cond_migrate_bytes(root->fs_info,
629                                        &root->fs_info->trans_block_rsv,
630                                        num_bytes,
631                                        min_factor);
632         if (ret) {
633                 btrfs_end_transaction(trans, root);
634                 return ERR_PTR(ret);
635         }
636
637         trans->block_rsv = &root->fs_info->trans_block_rsv;
638         trans->bytes_reserved = num_bytes;
639         trace_btrfs_space_reservation(root->fs_info, "transaction",
640                                       trans->transid, num_bytes, 1);
641
642         return trans;
643 }
644
645 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
646                                         struct btrfs_root *root,
647                                         unsigned int num_items)
648 {
649         return start_transaction(root, num_items, TRANS_START,
650                                  BTRFS_RESERVE_FLUSH_LIMIT);
651 }
652
653 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
654 {
655         return start_transaction(root, 0, TRANS_JOIN,
656                                  BTRFS_RESERVE_NO_FLUSH);
657 }
658
659 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
660 {
661         return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
662                                  BTRFS_RESERVE_NO_FLUSH);
663 }
664
665 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
666 {
667         return start_transaction(root, 0, TRANS_USERSPACE,
668                                  BTRFS_RESERVE_NO_FLUSH);
669 }
670
671 /*
672  * btrfs_attach_transaction() - catch the running transaction
673  *
674  * It is used when we want to commit the current the transaction, but
675  * don't want to start a new one.
676  *
677  * Note: If this function return -ENOENT, it just means there is no
678  * running transaction. But it is possible that the inactive transaction
679  * is still in the memory, not fully on disk. If you hope there is no
680  * inactive transaction in the fs when -ENOENT is returned, you should
681  * invoke
682  *     btrfs_attach_transaction_barrier()
683  */
684 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
685 {
686         return start_transaction(root, 0, TRANS_ATTACH,
687                                  BTRFS_RESERVE_NO_FLUSH);
688 }
689
690 /*
691  * btrfs_attach_transaction_barrier() - catch the running transaction
692  *
693  * It is similar to the above function, the differentia is this one
694  * will wait for all the inactive transactions until they fully
695  * complete.
696  */
697 struct btrfs_trans_handle *
698 btrfs_attach_transaction_barrier(struct btrfs_root *root)
699 {
700         struct btrfs_trans_handle *trans;
701
702         trans = start_transaction(root, 0, TRANS_ATTACH,
703                                   BTRFS_RESERVE_NO_FLUSH);
704         if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
705                 btrfs_wait_for_commit(root, 0);
706
707         return trans;
708 }
709
710 /* wait for a transaction commit to be fully complete */
711 static noinline void wait_for_commit(struct btrfs_root *root,
712                                     struct btrfs_transaction *commit)
713 {
714         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
715 }
716
717 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
718 {
719         struct btrfs_transaction *cur_trans = NULL, *t;
720         int ret = 0;
721
722         if (transid) {
723                 if (transid <= root->fs_info->last_trans_committed)
724                         goto out;
725
726                 /* find specified transaction */
727                 spin_lock(&root->fs_info->trans_lock);
728                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
729                         if (t->transid == transid) {
730                                 cur_trans = t;
731                                 atomic_inc(&cur_trans->use_count);
732                                 ret = 0;
733                                 break;
734                         }
735                         if (t->transid > transid) {
736                                 ret = 0;
737                                 break;
738                         }
739                 }
740                 spin_unlock(&root->fs_info->trans_lock);
741
742                 /*
743                  * The specified transaction doesn't exist, or we
744                  * raced with btrfs_commit_transaction
745                  */
746                 if (!cur_trans) {
747                         if (transid > root->fs_info->last_trans_committed)
748                                 ret = -EINVAL;
749                         goto out;
750                 }
751         } else {
752                 /* find newest transaction that is committing | committed */
753                 spin_lock(&root->fs_info->trans_lock);
754                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
755                                             list) {
756                         if (t->state >= TRANS_STATE_COMMIT_START) {
757                                 if (t->state == TRANS_STATE_COMPLETED)
758                                         break;
759                                 cur_trans = t;
760                                 atomic_inc(&cur_trans->use_count);
761                                 break;
762                         }
763                 }
764                 spin_unlock(&root->fs_info->trans_lock);
765                 if (!cur_trans)
766                         goto out;  /* nothing committing|committed */
767         }
768
769         wait_for_commit(root, cur_trans);
770         btrfs_put_transaction(cur_trans);
771 out:
772         return ret;
773 }
774
775 void btrfs_throttle(struct btrfs_root *root)
776 {
777         if (!atomic_read(&root->fs_info->open_ioctl_trans))
778                 wait_current_trans(root);
779 }
780
781 static int should_end_transaction(struct btrfs_trans_handle *trans,
782                                   struct btrfs_root *root)
783 {
784         if (root->fs_info->global_block_rsv.space_info->full &&
785             btrfs_check_space_for_delayed_refs(trans, root))
786                 return 1;
787
788         return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
789 }
790
791 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
792                                  struct btrfs_root *root)
793 {
794         struct btrfs_transaction *cur_trans = trans->transaction;
795         int updates;
796         int err;
797
798         smp_mb();
799         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
800             cur_trans->delayed_refs.flushing)
801                 return 1;
802
803         updates = trans->delayed_ref_updates;
804         trans->delayed_ref_updates = 0;
805         if (updates) {
806                 err = btrfs_run_delayed_refs(trans, root, updates * 2);
807                 if (err) /* Error code will also eval true */
808                         return err;
809         }
810
811         return should_end_transaction(trans, root);
812 }
813
814 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
815                           struct btrfs_root *root, int throttle)
816 {
817         struct btrfs_transaction *cur_trans = trans->transaction;
818         struct btrfs_fs_info *info = root->fs_info;
819         u64 transid = trans->transid;
820         unsigned long cur = trans->delayed_ref_updates;
821         int lock = (trans->type != TRANS_JOIN_NOLOCK);
822         int err = 0;
823         int must_run_delayed_refs = 0;
824
825         if (trans->use_count > 1) {
826                 trans->use_count--;
827                 trans->block_rsv = trans->orig_rsv;
828                 return 0;
829         }
830
831         btrfs_trans_release_metadata(trans, root);
832         trans->block_rsv = NULL;
833
834         if (!list_empty(&trans->new_bgs))
835                 btrfs_create_pending_block_groups(trans, root);
836
837         trans->delayed_ref_updates = 0;
838         if (!trans->sync) {
839                 must_run_delayed_refs =
840                         btrfs_should_throttle_delayed_refs(trans, root);
841                 cur = max_t(unsigned long, cur, 32);
842
843                 /*
844                  * don't make the caller wait if they are from a NOLOCK
845                  * or ATTACH transaction, it will deadlock with commit
846                  */
847                 if (must_run_delayed_refs == 1 &&
848                     (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
849                         must_run_delayed_refs = 2;
850         }
851
852         btrfs_trans_release_metadata(trans, root);
853         trans->block_rsv = NULL;
854
855         if (!list_empty(&trans->new_bgs))
856                 btrfs_create_pending_block_groups(trans, root);
857
858         btrfs_trans_release_chunk_metadata(trans);
859
860         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
861             should_end_transaction(trans, root) &&
862             ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
863                 spin_lock(&info->trans_lock);
864                 if (cur_trans->state == TRANS_STATE_RUNNING)
865                         cur_trans->state = TRANS_STATE_BLOCKED;
866                 spin_unlock(&info->trans_lock);
867         }
868
869         if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
870                 if (throttle)
871                         return btrfs_commit_transaction(trans, root);
872                 else
873                         wake_up_process(info->transaction_kthread);
874         }
875
876         if (trans->type & __TRANS_FREEZABLE)
877                 sb_end_intwrite(root->fs_info->sb);
878
879         WARN_ON(cur_trans != info->running_transaction);
880         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
881         atomic_dec(&cur_trans->num_writers);
882         extwriter_counter_dec(cur_trans, trans->type);
883
884         /*
885          * Make sure counter is updated before we wake up waiters.
886          */
887         smp_mb();
888         if (waitqueue_active(&cur_trans->writer_wait))
889                 wake_up(&cur_trans->writer_wait);
890         btrfs_put_transaction(cur_trans);
891
892         if (current->journal_info == trans)
893                 current->journal_info = NULL;
894
895         if (throttle)
896                 btrfs_run_delayed_iputs(root);
897
898         if (trans->aborted ||
899             test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
900                 wake_up_process(info->transaction_kthread);
901                 err = -EIO;
902         }
903         assert_qgroups_uptodate(trans);
904
905         kmem_cache_free(btrfs_trans_handle_cachep, trans);
906         if (must_run_delayed_refs) {
907                 btrfs_async_run_delayed_refs(root, cur, transid,
908                                              must_run_delayed_refs == 1);
909         }
910         return err;
911 }
912
913 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
914                           struct btrfs_root *root)
915 {
916         return __btrfs_end_transaction(trans, root, 0);
917 }
918
919 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
920                                    struct btrfs_root *root)
921 {
922         return __btrfs_end_transaction(trans, root, 1);
923 }
924
925 /*
926  * when btree blocks are allocated, they have some corresponding bits set for
927  * them in one of two extent_io trees.  This is used to make sure all of
928  * those extents are sent to disk but does not wait on them
929  */
930 int btrfs_write_marked_extents(struct btrfs_root *root,
931                                struct extent_io_tree *dirty_pages, int mark)
932 {
933         int err = 0;
934         int werr = 0;
935         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
936         struct extent_state *cached_state = NULL;
937         u64 start = 0;
938         u64 end;
939
940         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
941                                       mark, &cached_state)) {
942                 bool wait_writeback = false;
943
944                 err = convert_extent_bit(dirty_pages, start, end,
945                                          EXTENT_NEED_WAIT,
946                                          mark, &cached_state);
947                 /*
948                  * convert_extent_bit can return -ENOMEM, which is most of the
949                  * time a temporary error. So when it happens, ignore the error
950                  * and wait for writeback of this range to finish - because we
951                  * failed to set the bit EXTENT_NEED_WAIT for the range, a call
952                  * to btrfs_wait_marked_extents() would not know that writeback
953                  * for this range started and therefore wouldn't wait for it to
954                  * finish - we don't want to commit a superblock that points to
955                  * btree nodes/leafs for which writeback hasn't finished yet
956                  * (and without errors).
957                  * We cleanup any entries left in the io tree when committing
958                  * the transaction (through clear_btree_io_tree()).
959                  */
960                 if (err == -ENOMEM) {
961                         err = 0;
962                         wait_writeback = true;
963                 }
964                 if (!err)
965                         err = filemap_fdatawrite_range(mapping, start, end);
966                 if (err)
967                         werr = err;
968                 else if (wait_writeback)
969                         werr = filemap_fdatawait_range(mapping, start, end);
970                 free_extent_state(cached_state);
971                 cached_state = NULL;
972                 cond_resched();
973                 start = end + 1;
974         }
975         return werr;
976 }
977
978 /*
979  * when btree blocks are allocated, they have some corresponding bits set for
980  * them in one of two extent_io trees.  This is used to make sure all of
981  * those extents are on disk for transaction or log commit.  We wait
982  * on all the pages and clear them from the dirty pages state tree
983  */
984 int btrfs_wait_marked_extents(struct btrfs_root *root,
985                               struct extent_io_tree *dirty_pages, int mark)
986 {
987         int err = 0;
988         int werr = 0;
989         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
990         struct extent_state *cached_state = NULL;
991         u64 start = 0;
992         u64 end;
993         bool errors = false;
994
995         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
996                                       EXTENT_NEED_WAIT, &cached_state)) {
997                 /*
998                  * Ignore -ENOMEM errors returned by clear_extent_bit().
999                  * When committing the transaction, we'll remove any entries
1000                  * left in the io tree. For a log commit, we don't remove them
1001                  * after committing the log because the tree can be accessed
1002                  * concurrently - we do it only at transaction commit time when
1003                  * it's safe to do it (through clear_btree_io_tree()).
1004                  */
1005                 err = clear_extent_bit(dirty_pages, start, end,
1006                                        EXTENT_NEED_WAIT,
1007                                        0, 0, &cached_state, GFP_NOFS);
1008                 if (err == -ENOMEM)
1009                         err = 0;
1010                 if (!err)
1011                         err = filemap_fdatawait_range(mapping, start, end);
1012                 if (err)
1013                         werr = err;
1014                 free_extent_state(cached_state);
1015                 cached_state = NULL;
1016                 cond_resched();
1017                 start = end + 1;
1018         }
1019         if (err)
1020                 werr = err;
1021
1022         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1023                 if ((mark & EXTENT_DIRTY) &&
1024                     test_and_clear_bit(BTRFS_FS_LOG1_ERR,
1025                                        &root->fs_info->flags))
1026                         errors = true;
1027
1028                 if ((mark & EXTENT_NEW) &&
1029                     test_and_clear_bit(BTRFS_FS_LOG2_ERR,
1030                                        &root->fs_info->flags))
1031                         errors = true;
1032         } else {
1033                 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
1034                                        &root->fs_info->flags))
1035                         errors = true;
1036         }
1037
1038         if (errors && !werr)
1039                 werr = -EIO;
1040
1041         return werr;
1042 }
1043
1044 /*
1045  * when btree blocks are allocated, they have some corresponding bits set for
1046  * them in one of two extent_io trees.  This is used to make sure all of
1047  * those extents are on disk for transaction or log commit
1048  */
1049 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
1050                                 struct extent_io_tree *dirty_pages, int mark)
1051 {
1052         int ret;
1053         int ret2;
1054         struct blk_plug plug;
1055
1056         blk_start_plug(&plug);
1057         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
1058         blk_finish_plug(&plug);
1059         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
1060
1061         if (ret)
1062                 return ret;
1063         if (ret2)
1064                 return ret2;
1065         return 0;
1066 }
1067
1068 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
1069                                      struct btrfs_root *root)
1070 {
1071         int ret;
1072
1073         ret = btrfs_write_and_wait_marked_extents(root,
1074                                            &trans->transaction->dirty_pages,
1075                                            EXTENT_DIRTY);
1076         clear_btree_io_tree(&trans->transaction->dirty_pages);
1077
1078         return ret;
1079 }
1080
1081 /*
1082  * this is used to update the root pointer in the tree of tree roots.
1083  *
1084  * But, in the case of the extent allocation tree, updating the root
1085  * pointer may allocate blocks which may change the root of the extent
1086  * allocation tree.
1087  *
1088  * So, this loops and repeats and makes sure the cowonly root didn't
1089  * change while the root pointer was being updated in the metadata.
1090  */
1091 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1092                                struct btrfs_root *root)
1093 {
1094         int ret;
1095         u64 old_root_bytenr;
1096         u64 old_root_used;
1097         struct btrfs_root *tree_root = root->fs_info->tree_root;
1098
1099         old_root_used = btrfs_root_used(&root->root_item);
1100
1101         while (1) {
1102                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1103                 if (old_root_bytenr == root->node->start &&
1104                     old_root_used == btrfs_root_used(&root->root_item))
1105                         break;
1106
1107                 btrfs_set_root_node(&root->root_item, root->node);
1108                 ret = btrfs_update_root(trans, tree_root,
1109                                         &root->root_key,
1110                                         &root->root_item);
1111                 if (ret)
1112                         return ret;
1113
1114                 old_root_used = btrfs_root_used(&root->root_item);
1115         }
1116
1117         return 0;
1118 }
1119
1120 /*
1121  * update all the cowonly tree roots on disk
1122  *
1123  * The error handling in this function may not be obvious. Any of the
1124  * failures will cause the file system to go offline. We still need
1125  * to clean up the delayed refs.
1126  */
1127 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1128                                          struct btrfs_root *root)
1129 {
1130         struct btrfs_fs_info *fs_info = root->fs_info;
1131         struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1132         struct list_head *io_bgs = &trans->transaction->io_bgs;
1133         struct list_head *next;
1134         struct extent_buffer *eb;
1135         int ret;
1136
1137         eb = btrfs_lock_root_node(fs_info->tree_root);
1138         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1139                               0, &eb);
1140         btrfs_tree_unlock(eb);
1141         free_extent_buffer(eb);
1142
1143         if (ret)
1144                 return ret;
1145
1146         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1147         if (ret)
1148                 return ret;
1149
1150         ret = btrfs_run_dev_stats(trans, root->fs_info);
1151         if (ret)
1152                 return ret;
1153         ret = btrfs_run_dev_replace(trans, root->fs_info);
1154         if (ret)
1155                 return ret;
1156         ret = btrfs_run_qgroups(trans, root->fs_info);
1157         if (ret)
1158                 return ret;
1159
1160         ret = btrfs_setup_space_cache(trans, root);
1161         if (ret)
1162                 return ret;
1163
1164         /* run_qgroups might have added some more refs */
1165         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1166         if (ret)
1167                 return ret;
1168 again:
1169         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1170                 next = fs_info->dirty_cowonly_roots.next;
1171                 list_del_init(next);
1172                 root = list_entry(next, struct btrfs_root, dirty_list);
1173                 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1174
1175                 if (root != fs_info->extent_root)
1176                         list_add_tail(&root->dirty_list,
1177                                       &trans->transaction->switch_commits);
1178                 ret = update_cowonly_root(trans, root);
1179                 if (ret)
1180                         return ret;
1181                 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1182                 if (ret)
1183                         return ret;
1184         }
1185
1186         while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1187                 ret = btrfs_write_dirty_block_groups(trans, root);
1188                 if (ret)
1189                         return ret;
1190                 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1191                 if (ret)
1192                         return ret;
1193         }
1194
1195         if (!list_empty(&fs_info->dirty_cowonly_roots))
1196                 goto again;
1197
1198         list_add_tail(&fs_info->extent_root->dirty_list,
1199                       &trans->transaction->switch_commits);
1200         btrfs_after_dev_replace_commit(fs_info);
1201
1202         return 0;
1203 }
1204
1205 /*
1206  * dead roots are old snapshots that need to be deleted.  This allocates
1207  * a dirty root struct and adds it into the list of dead roots that need to
1208  * be deleted
1209  */
1210 void btrfs_add_dead_root(struct btrfs_root *root)
1211 {
1212         spin_lock(&root->fs_info->trans_lock);
1213         if (list_empty(&root->root_list))
1214                 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
1215         spin_unlock(&root->fs_info->trans_lock);
1216 }
1217
1218 /*
1219  * update all the cowonly tree roots on disk
1220  */
1221 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1222                                     struct btrfs_root *root)
1223 {
1224         struct btrfs_root *gang[8];
1225         struct btrfs_fs_info *fs_info = root->fs_info;
1226         int i;
1227         int ret;
1228         int err = 0;
1229
1230         spin_lock(&fs_info->fs_roots_radix_lock);
1231         while (1) {
1232                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1233                                                  (void **)gang, 0,
1234                                                  ARRAY_SIZE(gang),
1235                                                  BTRFS_ROOT_TRANS_TAG);
1236                 if (ret == 0)
1237                         break;
1238                 for (i = 0; i < ret; i++) {
1239                         root = gang[i];
1240                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1241                                         (unsigned long)root->root_key.objectid,
1242                                         BTRFS_ROOT_TRANS_TAG);
1243                         spin_unlock(&fs_info->fs_roots_radix_lock);
1244
1245                         btrfs_free_log(trans, root);
1246                         btrfs_update_reloc_root(trans, root);
1247                         btrfs_orphan_commit_root(trans, root);
1248
1249                         btrfs_save_ino_cache(root, trans);
1250
1251                         /* see comments in should_cow_block() */
1252                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1253                         smp_mb__after_atomic();
1254
1255                         if (root->commit_root != root->node) {
1256                                 list_add_tail(&root->dirty_list,
1257                                         &trans->transaction->switch_commits);
1258                                 btrfs_set_root_node(&root->root_item,
1259                                                     root->node);
1260                         }
1261
1262                         err = btrfs_update_root(trans, fs_info->tree_root,
1263                                                 &root->root_key,
1264                                                 &root->root_item);
1265                         spin_lock(&fs_info->fs_roots_radix_lock);
1266                         if (err)
1267                                 break;
1268                         btrfs_qgroup_free_meta_all(root);
1269                 }
1270         }
1271         spin_unlock(&fs_info->fs_roots_radix_lock);
1272         return err;
1273 }
1274
1275 /*
1276  * defrag a given btree.
1277  * Every leaf in the btree is read and defragged.
1278  */
1279 int btrfs_defrag_root(struct btrfs_root *root)
1280 {
1281         struct btrfs_fs_info *info = root->fs_info;
1282         struct btrfs_trans_handle *trans;
1283         int ret;
1284
1285         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1286                 return 0;
1287
1288         while (1) {
1289                 trans = btrfs_start_transaction(root, 0);
1290                 if (IS_ERR(trans)) {
1291                         ret = PTR_ERR(trans);
1292                         break;
1293                 }
1294
1295                 ret = btrfs_defrag_leaves(trans, root);
1296
1297                 btrfs_end_transaction(trans, root);
1298                 btrfs_btree_balance_dirty(info->tree_root);
1299                 cond_resched();
1300
1301                 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1302                         break;
1303
1304                 if (btrfs_defrag_cancelled(info)) {
1305                         btrfs_debug(info, "defrag_root cancelled");
1306                         ret = -EAGAIN;
1307                         break;
1308                 }
1309         }
1310         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1311         return ret;
1312 }
1313
1314 /*
1315  * Do all special snapshot related qgroup dirty hack.
1316  *
1317  * Will do all needed qgroup inherit and dirty hack like switch commit
1318  * roots inside one transaction and write all btree into disk, to make
1319  * qgroup works.
1320  */
1321 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1322                                    struct btrfs_root *src,
1323                                    struct btrfs_root *parent,
1324                                    struct btrfs_qgroup_inherit *inherit,
1325                                    u64 dst_objectid)
1326 {
1327         struct btrfs_fs_info *fs_info = src->fs_info;
1328         int ret;
1329
1330         /*
1331          * Save some performance in the case that qgroups are not
1332          * enabled. If this check races with the ioctl, rescan will
1333          * kick in anyway.
1334          */
1335         mutex_lock(&fs_info->qgroup_ioctl_lock);
1336         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
1337                 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1338                 return 0;
1339         }
1340         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1341
1342         /*
1343          * We are going to commit transaction, see btrfs_commit_transaction()
1344          * comment for reason locking tree_log_mutex
1345          */
1346         mutex_lock(&fs_info->tree_log_mutex);
1347
1348         ret = commit_fs_roots(trans, src);
1349         if (ret)
1350                 goto out;
1351         ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
1352         if (ret < 0)
1353                 goto out;
1354         ret = btrfs_qgroup_account_extents(trans, fs_info);
1355         if (ret < 0)
1356                 goto out;
1357
1358         /* Now qgroup are all updated, we can inherit it to new qgroups */
1359         ret = btrfs_qgroup_inherit(trans, fs_info,
1360                                    src->root_key.objectid, dst_objectid,
1361                                    inherit);
1362         if (ret < 0)
1363                 goto out;
1364
1365         /*
1366          * Now we do a simplified commit transaction, which will:
1367          * 1) commit all subvolume and extent tree
1368          *    To ensure all subvolume and extent tree have a valid
1369          *    commit_root to accounting later insert_dir_item()
1370          * 2) write all btree blocks onto disk
1371          *    This is to make sure later btree modification will be cowed
1372          *    Or commit_root can be populated and cause wrong qgroup numbers
1373          * In this simplified commit, we don't really care about other trees
1374          * like chunk and root tree, as they won't affect qgroup.
1375          * And we don't write super to avoid half committed status.
1376          */
1377         ret = commit_cowonly_roots(trans, src);
1378         if (ret)
1379                 goto out;
1380         switch_commit_roots(trans->transaction, fs_info);
1381         ret = btrfs_write_and_wait_transaction(trans, src);
1382         if (ret)
1383                 btrfs_handle_fs_error(fs_info, ret,
1384                         "Error while writing out transaction for qgroup");
1385
1386 out:
1387         mutex_unlock(&fs_info->tree_log_mutex);
1388
1389         /*
1390          * Force parent root to be updated, as we recorded it before so its
1391          * last_trans == cur_transid.
1392          * Or it won't be committed again onto disk after later
1393          * insert_dir_item()
1394          */
1395         if (!ret)
1396                 record_root_in_trans(trans, parent, 1);
1397         return ret;
1398 }
1399
1400 /*
1401  * new snapshots need to be created at a very specific time in the
1402  * transaction commit.  This does the actual creation.
1403  *
1404  * Note:
1405  * If the error which may affect the commitment of the current transaction
1406  * happens, we should return the error number. If the error which just affect
1407  * the creation of the pending snapshots, just return 0.
1408  */
1409 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1410                                    struct btrfs_fs_info *fs_info,
1411                                    struct btrfs_pending_snapshot *pending)
1412 {
1413         struct btrfs_key key;
1414         struct btrfs_root_item *new_root_item;
1415         struct btrfs_root *tree_root = fs_info->tree_root;
1416         struct btrfs_root *root = pending->root;
1417         struct btrfs_root *parent_root;
1418         struct btrfs_block_rsv *rsv;
1419         struct inode *parent_inode;
1420         struct btrfs_path *path;
1421         struct btrfs_dir_item *dir_item;
1422         struct dentry *dentry;
1423         struct extent_buffer *tmp;
1424         struct extent_buffer *old;
1425         struct timespec cur_time;
1426         int ret = 0;
1427         u64 to_reserve = 0;
1428         u64 index = 0;
1429         u64 objectid;
1430         u64 root_flags;
1431         uuid_le new_uuid;
1432
1433         ASSERT(pending->path);
1434         path = pending->path;
1435
1436         ASSERT(pending->root_item);
1437         new_root_item = pending->root_item;
1438
1439         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1440         if (pending->error)
1441                 goto no_free_objectid;
1442
1443         /*
1444          * Make qgroup to skip current new snapshot's qgroupid, as it is
1445          * accounted by later btrfs_qgroup_inherit().
1446          */
1447         btrfs_set_skip_qgroup(trans, objectid);
1448
1449         btrfs_reloc_pre_snapshot(pending, &to_reserve);
1450
1451         if (to_reserve > 0) {
1452                 pending->error = btrfs_block_rsv_add(root,
1453                                                      &pending->block_rsv,
1454                                                      to_reserve,
1455                                                      BTRFS_RESERVE_NO_FLUSH);
1456                 if (pending->error)
1457                         goto clear_skip_qgroup;
1458         }
1459
1460         key.objectid = objectid;
1461         key.offset = (u64)-1;
1462         key.type = BTRFS_ROOT_ITEM_KEY;
1463
1464         rsv = trans->block_rsv;
1465         trans->block_rsv = &pending->block_rsv;
1466         trans->bytes_reserved = trans->block_rsv->reserved;
1467         trace_btrfs_space_reservation(root->fs_info, "transaction",
1468                                       trans->transid,
1469                                       trans->bytes_reserved, 1);
1470         dentry = pending->dentry;
1471         parent_inode = pending->dir;
1472         parent_root = BTRFS_I(parent_inode)->root;
1473         record_root_in_trans(trans, parent_root, 0);
1474
1475         cur_time = current_time(parent_inode);
1476
1477         /*
1478          * insert the directory item
1479          */
1480         ret = btrfs_set_inode_index(parent_inode, &index);
1481         BUG_ON(ret); /* -ENOMEM */
1482
1483         /* check if there is a file/dir which has the same name. */
1484         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1485                                          btrfs_ino(parent_inode),
1486                                          dentry->d_name.name,
1487                                          dentry->d_name.len, 0);
1488         if (dir_item != NULL && !IS_ERR(dir_item)) {
1489                 pending->error = -EEXIST;
1490                 goto dir_item_existed;
1491         } else if (IS_ERR(dir_item)) {
1492                 ret = PTR_ERR(dir_item);
1493                 btrfs_abort_transaction(trans, ret);
1494                 goto fail;
1495         }
1496         btrfs_release_path(path);
1497
1498         /*
1499          * pull in the delayed directory update
1500          * and the delayed inode item
1501          * otherwise we corrupt the FS during
1502          * snapshot
1503          */
1504         ret = btrfs_run_delayed_items(trans, root);
1505         if (ret) {      /* Transaction aborted */
1506                 btrfs_abort_transaction(trans, ret);
1507                 goto fail;
1508         }
1509
1510         record_root_in_trans(trans, root, 0);
1511         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1512         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1513         btrfs_check_and_init_root_item(new_root_item);
1514
1515         root_flags = btrfs_root_flags(new_root_item);
1516         if (pending->readonly)
1517                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1518         else
1519                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1520         btrfs_set_root_flags(new_root_item, root_flags);
1521
1522         btrfs_set_root_generation_v2(new_root_item,
1523                         trans->transid);
1524         uuid_le_gen(&new_uuid);
1525         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1526         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1527                         BTRFS_UUID_SIZE);
1528         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1529                 memset(new_root_item->received_uuid, 0,
1530                        sizeof(new_root_item->received_uuid));
1531                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1532                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1533                 btrfs_set_root_stransid(new_root_item, 0);
1534                 btrfs_set_root_rtransid(new_root_item, 0);
1535         }
1536         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1537         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1538         btrfs_set_root_otransid(new_root_item, trans->transid);
1539
1540         old = btrfs_lock_root_node(root);
1541         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1542         if (ret) {
1543                 btrfs_tree_unlock(old);
1544                 free_extent_buffer(old);
1545                 btrfs_abort_transaction(trans, ret);
1546                 goto fail;
1547         }
1548
1549         btrfs_set_lock_blocking(old);
1550
1551         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1552         /* clean up in any case */
1553         btrfs_tree_unlock(old);
1554         free_extent_buffer(old);
1555         if (ret) {
1556                 btrfs_abort_transaction(trans, ret);
1557                 goto fail;
1558         }
1559         /* see comments in should_cow_block() */
1560         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1561         smp_wmb();
1562
1563         btrfs_set_root_node(new_root_item, tmp);
1564         /* record when the snapshot was created in key.offset */
1565         key.offset = trans->transid;
1566         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1567         btrfs_tree_unlock(tmp);
1568         free_extent_buffer(tmp);
1569         if (ret) {
1570                 btrfs_abort_transaction(trans, ret);
1571                 goto fail;
1572         }
1573
1574         /*
1575          * insert root back/forward references
1576          */
1577         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1578                                  parent_root->root_key.objectid,
1579                                  btrfs_ino(parent_inode), index,
1580                                  dentry->d_name.name, dentry->d_name.len);
1581         if (ret) {
1582                 btrfs_abort_transaction(trans, ret);
1583                 goto fail;
1584         }
1585
1586         key.offset = (u64)-1;
1587         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1588         if (IS_ERR(pending->snap)) {
1589                 ret = PTR_ERR(pending->snap);
1590                 btrfs_abort_transaction(trans, ret);
1591                 goto fail;
1592         }
1593
1594         ret = btrfs_reloc_post_snapshot(trans, pending);
1595         if (ret) {
1596                 btrfs_abort_transaction(trans, ret);
1597                 goto fail;
1598         }
1599
1600         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1601         if (ret) {
1602                 btrfs_abort_transaction(trans, ret);
1603                 goto fail;
1604         }
1605
1606         /*
1607          * Do special qgroup accounting for snapshot, as we do some qgroup
1608          * snapshot hack to do fast snapshot.
1609          * To co-operate with that hack, we do hack again.
1610          * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1611          */
1612         ret = qgroup_account_snapshot(trans, root, parent_root,
1613                                       pending->inherit, objectid);
1614         if (ret < 0)
1615                 goto fail;
1616
1617         ret = btrfs_insert_dir_item(trans, parent_root,
1618                                     dentry->d_name.name, dentry->d_name.len,
1619                                     parent_inode, &key,
1620                                     BTRFS_FT_DIR, index);
1621         /* We have check then name at the beginning, so it is impossible. */
1622         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1623         if (ret) {
1624                 btrfs_abort_transaction(trans, ret);
1625                 goto fail;
1626         }
1627
1628         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1629                                          dentry->d_name.len * 2);
1630         parent_inode->i_mtime = parent_inode->i_ctime =
1631                 current_time(parent_inode);
1632         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1633         if (ret) {
1634                 btrfs_abort_transaction(trans, ret);
1635                 goto fail;
1636         }
1637         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1638                                   BTRFS_UUID_KEY_SUBVOL, objectid);
1639         if (ret) {
1640                 btrfs_abort_transaction(trans, ret);
1641                 goto fail;
1642         }
1643         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1644                 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1645                                           new_root_item->received_uuid,
1646                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1647                                           objectid);
1648                 if (ret && ret != -EEXIST) {
1649                         btrfs_abort_transaction(trans, ret);
1650                         goto fail;
1651                 }
1652         }
1653
1654         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1655         if (ret) {
1656                 btrfs_abort_transaction(trans, ret);
1657                 goto fail;
1658         }
1659
1660 fail:
1661         pending->error = ret;
1662 dir_item_existed:
1663         trans->block_rsv = rsv;
1664         trans->bytes_reserved = 0;
1665 clear_skip_qgroup:
1666         btrfs_clear_skip_qgroup(trans);
1667 no_free_objectid:
1668         kfree(new_root_item);
1669         pending->root_item = NULL;
1670         btrfs_free_path(path);
1671         pending->path = NULL;
1672
1673         return ret;
1674 }
1675
1676 /*
1677  * create all the snapshots we've scheduled for creation
1678  */
1679 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1680                                              struct btrfs_fs_info *fs_info)
1681 {
1682         struct btrfs_pending_snapshot *pending, *next;
1683         struct list_head *head = &trans->transaction->pending_snapshots;
1684         int ret = 0;
1685
1686         list_for_each_entry_safe(pending, next, head, list) {
1687                 list_del(&pending->list);
1688                 ret = create_pending_snapshot(trans, fs_info, pending);
1689                 if (ret)
1690                         break;
1691         }
1692         return ret;
1693 }
1694
1695 static void update_super_roots(struct btrfs_root *root)
1696 {
1697         struct btrfs_root_item *root_item;
1698         struct btrfs_super_block *super;
1699
1700         super = root->fs_info->super_copy;
1701
1702         root_item = &root->fs_info->chunk_root->root_item;
1703         super->chunk_root = root_item->bytenr;
1704         super->chunk_root_generation = root_item->generation;
1705         super->chunk_root_level = root_item->level;
1706
1707         root_item = &root->fs_info->tree_root->root_item;
1708         super->root = root_item->bytenr;
1709         super->generation = root_item->generation;
1710         super->root_level = root_item->level;
1711         if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
1712                 super->cache_generation = root_item->generation;
1713         if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
1714                 super->uuid_tree_generation = root_item->generation;
1715 }
1716
1717 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1718 {
1719         struct btrfs_transaction *trans;
1720         int ret = 0;
1721
1722         spin_lock(&info->trans_lock);
1723         trans = info->running_transaction;
1724         if (trans)
1725                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1726         spin_unlock(&info->trans_lock);
1727         return ret;
1728 }
1729
1730 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1731 {
1732         struct btrfs_transaction *trans;
1733         int ret = 0;
1734
1735         spin_lock(&info->trans_lock);
1736         trans = info->running_transaction;
1737         if (trans)
1738                 ret = is_transaction_blocked(trans);
1739         spin_unlock(&info->trans_lock);
1740         return ret;
1741 }
1742
1743 /*
1744  * wait for the current transaction commit to start and block subsequent
1745  * transaction joins
1746  */
1747 static void wait_current_trans_commit_start(struct btrfs_root *root,
1748                                             struct btrfs_transaction *trans)
1749 {
1750         wait_event(root->fs_info->transaction_blocked_wait,
1751                    trans->state >= TRANS_STATE_COMMIT_START ||
1752                    trans->aborted);
1753 }
1754
1755 /*
1756  * wait for the current transaction to start and then become unblocked.
1757  * caller holds ref.
1758  */
1759 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1760                                          struct btrfs_transaction *trans)
1761 {
1762         wait_event(root->fs_info->transaction_wait,
1763                    trans->state >= TRANS_STATE_UNBLOCKED ||
1764                    trans->aborted);
1765 }
1766
1767 /*
1768  * commit transactions asynchronously. once btrfs_commit_transaction_async
1769  * returns, any subsequent transaction will not be allowed to join.
1770  */
1771 struct btrfs_async_commit {
1772         struct btrfs_trans_handle *newtrans;
1773         struct btrfs_root *root;
1774         struct work_struct work;
1775 };
1776
1777 static void do_async_commit(struct work_struct *work)
1778 {
1779         struct btrfs_async_commit *ac =
1780                 container_of(work, struct btrfs_async_commit, work);
1781
1782         /*
1783          * We've got freeze protection passed with the transaction.
1784          * Tell lockdep about it.
1785          */
1786         if (ac->newtrans->type & __TRANS_FREEZABLE)
1787                 __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
1788
1789         current->journal_info = ac->newtrans;
1790
1791         btrfs_commit_transaction(ac->newtrans, ac->root);
1792         kfree(ac);
1793 }
1794
1795 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1796                                    struct btrfs_root *root,
1797                                    int wait_for_unblock)
1798 {
1799         struct btrfs_async_commit *ac;
1800         struct btrfs_transaction *cur_trans;
1801
1802         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1803         if (!ac)
1804                 return -ENOMEM;
1805
1806         INIT_WORK(&ac->work, do_async_commit);
1807         ac->root = root;
1808         ac->newtrans = btrfs_join_transaction(root);
1809         if (IS_ERR(ac->newtrans)) {
1810                 int err = PTR_ERR(ac->newtrans);
1811                 kfree(ac);
1812                 return err;
1813         }
1814
1815         /* take transaction reference */
1816         cur_trans = trans->transaction;
1817         atomic_inc(&cur_trans->use_count);
1818
1819         btrfs_end_transaction(trans, root);
1820
1821         /*
1822          * Tell lockdep we've released the freeze rwsem, since the
1823          * async commit thread will be the one to unlock it.
1824          */
1825         if (ac->newtrans->type & __TRANS_FREEZABLE)
1826                 __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
1827
1828         schedule_work(&ac->work);
1829
1830         /* wait for transaction to start and unblock */
1831         if (wait_for_unblock)
1832                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1833         else
1834                 wait_current_trans_commit_start(root, cur_trans);
1835
1836         if (current->journal_info == trans)
1837                 current->journal_info = NULL;
1838
1839         btrfs_put_transaction(cur_trans);
1840         return 0;
1841 }
1842
1843
1844 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1845                                 struct btrfs_root *root, int err)
1846 {
1847         struct btrfs_transaction *cur_trans = trans->transaction;
1848         DEFINE_WAIT(wait);
1849
1850         WARN_ON(trans->use_count > 1);
1851
1852         btrfs_abort_transaction(trans, err);
1853
1854         spin_lock(&root->fs_info->trans_lock);
1855
1856         /*
1857          * If the transaction is removed from the list, it means this
1858          * transaction has been committed successfully, so it is impossible
1859          * to call the cleanup function.
1860          */
1861         BUG_ON(list_empty(&cur_trans->list));
1862
1863         list_del_init(&cur_trans->list);
1864         if (cur_trans == root->fs_info->running_transaction) {
1865                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1866                 spin_unlock(&root->fs_info->trans_lock);
1867                 wait_event(cur_trans->writer_wait,
1868                            atomic_read(&cur_trans->num_writers) == 1);
1869
1870                 spin_lock(&root->fs_info->trans_lock);
1871         }
1872         spin_unlock(&root->fs_info->trans_lock);
1873
1874         btrfs_cleanup_one_transaction(trans->transaction, root);
1875
1876         spin_lock(&root->fs_info->trans_lock);
1877         if (cur_trans == root->fs_info->running_transaction)
1878                 root->fs_info->running_transaction = NULL;
1879         spin_unlock(&root->fs_info->trans_lock);
1880
1881         if (trans->type & __TRANS_FREEZABLE)
1882                 sb_end_intwrite(root->fs_info->sb);
1883         btrfs_put_transaction(cur_trans);
1884         btrfs_put_transaction(cur_trans);
1885
1886         trace_btrfs_transaction_commit(root);
1887
1888         if (current->journal_info == trans)
1889                 current->journal_info = NULL;
1890         btrfs_scrub_cancel(root->fs_info);
1891
1892         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1893 }
1894
1895 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1896 {
1897         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
1898                 return btrfs_start_delalloc_roots(fs_info, 1, -1);
1899         return 0;
1900 }
1901
1902 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1903 {
1904         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
1905                 btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
1906 }
1907
1908 static inline void
1909 btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
1910 {
1911         wait_event(cur_trans->pending_wait,
1912                    atomic_read(&cur_trans->pending_ordered) == 0);
1913 }
1914
1915 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1916                              struct btrfs_root *root)
1917 {
1918         struct btrfs_transaction *cur_trans = trans->transaction;
1919         struct btrfs_transaction *prev_trans = NULL;
1920         int ret;
1921
1922         /*
1923          * Some places just start a transaction to commit it.  We need to make
1924          * sure that if this commit fails that the abort code actually marks the
1925          * transaction as failed, so set trans->dirty to make the abort code do
1926          * the right thing.
1927          */
1928         trans->dirty = true;
1929
1930         /* Stop the commit early if ->aborted is set */
1931         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1932                 ret = cur_trans->aborted;
1933                 btrfs_end_transaction(trans, root);
1934                 return ret;
1935         }
1936
1937         btrfs_trans_release_metadata(trans, root);
1938         trans->block_rsv = NULL;
1939
1940         /* make a pass through all the delayed refs we have so far
1941          * any runnings procs may add more while we are here
1942          */
1943         ret = btrfs_run_delayed_refs(trans, root, 0);
1944         if (ret) {
1945                 btrfs_end_transaction(trans, root);
1946                 return ret;
1947         }
1948
1949         cur_trans = trans->transaction;
1950
1951         /*
1952          * set the flushing flag so procs in this transaction have to
1953          * start sending their work down.
1954          */
1955         cur_trans->delayed_refs.flushing = 1;
1956         smp_wmb();
1957
1958         if (!list_empty(&trans->new_bgs))
1959                 btrfs_create_pending_block_groups(trans, root);
1960
1961         ret = btrfs_run_delayed_refs(trans, root, 0);
1962         if (ret) {
1963                 btrfs_end_transaction(trans, root);
1964                 return ret;
1965         }
1966
1967         if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
1968                 int run_it = 0;
1969
1970                 /* this mutex is also taken before trying to set
1971                  * block groups readonly.  We need to make sure
1972                  * that nobody has set a block group readonly
1973                  * after a extents from that block group have been
1974                  * allocated for cache files.  btrfs_set_block_group_ro
1975                  * will wait for the transaction to commit if it
1976                  * finds BTRFS_TRANS_DIRTY_BG_RUN set.
1977                  *
1978                  * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
1979                  * only one process starts all the block group IO.  It wouldn't
1980                  * hurt to have more than one go through, but there's no
1981                  * real advantage to it either.
1982                  */
1983                 mutex_lock(&root->fs_info->ro_block_group_mutex);
1984                 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
1985                                       &cur_trans->flags))
1986                         run_it = 1;
1987                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
1988
1989                 if (run_it)
1990                         ret = btrfs_start_dirty_block_groups(trans, root);
1991         }
1992         if (ret) {
1993                 btrfs_end_transaction(trans, root);
1994                 return ret;
1995         }
1996
1997         spin_lock(&root->fs_info->trans_lock);
1998         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1999                 spin_unlock(&root->fs_info->trans_lock);
2000                 atomic_inc(&cur_trans->use_count);
2001                 ret = btrfs_end_transaction(trans, root);
2002
2003                 wait_for_commit(root, cur_trans);
2004
2005                 if (unlikely(cur_trans->aborted))
2006                         ret = cur_trans->aborted;
2007
2008                 btrfs_put_transaction(cur_trans);
2009
2010                 return ret;
2011         }
2012
2013         cur_trans->state = TRANS_STATE_COMMIT_START;
2014         wake_up(&root->fs_info->transaction_blocked_wait);
2015
2016         if (cur_trans->list.prev != &root->fs_info->trans_list) {
2017                 prev_trans = list_entry(cur_trans->list.prev,
2018                                         struct btrfs_transaction, list);
2019                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
2020                         atomic_inc(&prev_trans->use_count);
2021                         spin_unlock(&root->fs_info->trans_lock);
2022
2023                         wait_for_commit(root, prev_trans);
2024                         ret = prev_trans->aborted;
2025
2026                         btrfs_put_transaction(prev_trans);
2027                         if (ret)
2028                                 goto cleanup_transaction;
2029                 } else {
2030                         spin_unlock(&root->fs_info->trans_lock);
2031                 }
2032         } else {
2033                 spin_unlock(&root->fs_info->trans_lock);
2034         }
2035
2036         extwriter_counter_dec(cur_trans, trans->type);
2037
2038         ret = btrfs_start_delalloc_flush(root->fs_info);
2039         if (ret)
2040                 goto cleanup_transaction;
2041
2042         ret = btrfs_run_delayed_items(trans, root);
2043         if (ret)
2044                 goto cleanup_transaction;
2045
2046         wait_event(cur_trans->writer_wait,
2047                    extwriter_counter_read(cur_trans) == 0);
2048
2049         /* some pending stuffs might be added after the previous flush. */
2050         ret = btrfs_run_delayed_items(trans, root);
2051         if (ret)
2052                 goto cleanup_transaction;
2053
2054         btrfs_wait_delalloc_flush(root->fs_info);
2055
2056         btrfs_wait_pending_ordered(cur_trans);
2057
2058         btrfs_scrub_pause(root);
2059         /*
2060          * Ok now we need to make sure to block out any other joins while we
2061          * commit the transaction.  We could have started a join before setting
2062          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2063          */
2064         spin_lock(&root->fs_info->trans_lock);
2065         cur_trans->state = TRANS_STATE_COMMIT_DOING;
2066         spin_unlock(&root->fs_info->trans_lock);
2067         wait_event(cur_trans->writer_wait,
2068                    atomic_read(&cur_trans->num_writers) == 1);
2069
2070         /* ->aborted might be set after the previous check, so check it */
2071         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
2072                 ret = cur_trans->aborted;
2073                 goto scrub_continue;
2074         }
2075         /*
2076          * the reloc mutex makes sure that we stop
2077          * the balancing code from coming in and moving
2078          * extents around in the middle of the commit
2079          */
2080         mutex_lock(&root->fs_info->reloc_mutex);
2081
2082         /*
2083          * We needn't worry about the delayed items because we will
2084          * deal with them in create_pending_snapshot(), which is the
2085          * core function of the snapshot creation.
2086          */
2087         ret = create_pending_snapshots(trans, root->fs_info);
2088         if (ret) {
2089                 mutex_unlock(&root->fs_info->reloc_mutex);
2090                 goto scrub_continue;
2091         }
2092
2093         /*
2094          * We insert the dir indexes of the snapshots and update the inode
2095          * of the snapshots' parents after the snapshot creation, so there
2096          * are some delayed items which are not dealt with. Now deal with
2097          * them.
2098          *
2099          * We needn't worry that this operation will corrupt the snapshots,
2100          * because all the tree which are snapshoted will be forced to COW
2101          * the nodes and leaves.
2102          */
2103         ret = btrfs_run_delayed_items(trans, root);
2104         if (ret) {
2105                 mutex_unlock(&root->fs_info->reloc_mutex);
2106                 goto scrub_continue;
2107         }
2108
2109         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
2110         if (ret) {
2111                 mutex_unlock(&root->fs_info->reloc_mutex);
2112                 goto scrub_continue;
2113         }
2114
2115         /* Reocrd old roots for later qgroup accounting */
2116         ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
2117         if (ret) {
2118                 mutex_unlock(&root->fs_info->reloc_mutex);
2119                 goto scrub_continue;
2120         }
2121
2122         /*
2123          * make sure none of the code above managed to slip in a
2124          * delayed item
2125          */
2126         btrfs_assert_delayed_root_empty(root);
2127
2128         WARN_ON(cur_trans != trans->transaction);
2129
2130         /* btrfs_commit_tree_roots is responsible for getting the
2131          * various roots consistent with each other.  Every pointer
2132          * in the tree of tree roots has to point to the most up to date
2133          * root for every subvolume and other tree.  So, we have to keep
2134          * the tree logging code from jumping in and changing any
2135          * of the trees.
2136          *
2137          * At this point in the commit, there can't be any tree-log
2138          * writers, but a little lower down we drop the trans mutex
2139          * and let new people in.  By holding the tree_log_mutex
2140          * from now until after the super is written, we avoid races
2141          * with the tree-log code.
2142          */
2143         mutex_lock(&root->fs_info->tree_log_mutex);
2144
2145         ret = commit_fs_roots(trans, root);
2146         if (ret) {
2147                 mutex_unlock(&root->fs_info->tree_log_mutex);
2148                 mutex_unlock(&root->fs_info->reloc_mutex);
2149                 goto scrub_continue;
2150         }
2151
2152         /*
2153          * Since the transaction is done, we can apply the pending changes
2154          * before the next transaction.
2155          */
2156         btrfs_apply_pending_changes(root->fs_info);
2157
2158         /* commit_fs_roots gets rid of all the tree log roots, it is now
2159          * safe to free the root of tree log roots
2160          */
2161         btrfs_free_log_root_tree(trans, root->fs_info);
2162
2163         /*
2164          * Since fs roots are all committed, we can get a quite accurate
2165          * new_roots. So let's do quota accounting.
2166          */
2167         ret = btrfs_qgroup_account_extents(trans, root->fs_info);
2168         if (ret < 0) {
2169                 mutex_unlock(&root->fs_info->tree_log_mutex);
2170                 mutex_unlock(&root->fs_info->reloc_mutex);
2171                 goto scrub_continue;
2172         }
2173
2174         ret = commit_cowonly_roots(trans, root);
2175         if (ret) {
2176                 mutex_unlock(&root->fs_info->tree_log_mutex);
2177                 mutex_unlock(&root->fs_info->reloc_mutex);
2178                 goto scrub_continue;
2179         }
2180
2181         /*
2182          * The tasks which save the space cache and inode cache may also
2183          * update ->aborted, check it.
2184          */
2185         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
2186                 ret = cur_trans->aborted;
2187                 mutex_unlock(&root->fs_info->tree_log_mutex);
2188                 mutex_unlock(&root->fs_info->reloc_mutex);
2189                 goto scrub_continue;
2190         }
2191
2192         btrfs_prepare_extent_commit(trans, root);
2193
2194         cur_trans = root->fs_info->running_transaction;
2195
2196         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
2197                             root->fs_info->tree_root->node);
2198         list_add_tail(&root->fs_info->tree_root->dirty_list,
2199                       &cur_trans->switch_commits);
2200
2201         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
2202                             root->fs_info->chunk_root->node);
2203         list_add_tail(&root->fs_info->chunk_root->dirty_list,
2204                       &cur_trans->switch_commits);
2205
2206         switch_commit_roots(cur_trans, root->fs_info);
2207
2208         assert_qgroups_uptodate(trans);
2209         ASSERT(list_empty(&cur_trans->dirty_bgs));
2210         ASSERT(list_empty(&cur_trans->io_bgs));
2211         update_super_roots(root);
2212
2213         btrfs_set_super_log_root(root->fs_info->super_copy, 0);
2214         btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
2215         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
2216                sizeof(*root->fs_info->super_copy));
2217
2218         btrfs_update_commit_device_size(root->fs_info);
2219         btrfs_update_commit_device_bytes_used(root, cur_trans);
2220
2221         clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
2222         clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
2223
2224         btrfs_trans_release_chunk_metadata(trans);
2225
2226         spin_lock(&root->fs_info->trans_lock);
2227         cur_trans->state = TRANS_STATE_UNBLOCKED;
2228         root->fs_info->running_transaction = NULL;
2229         spin_unlock(&root->fs_info->trans_lock);
2230         mutex_unlock(&root->fs_info->reloc_mutex);
2231
2232         wake_up(&root->fs_info->transaction_wait);
2233
2234         ret = btrfs_write_and_wait_transaction(trans, root);
2235         if (ret) {
2236                 btrfs_handle_fs_error(root->fs_info, ret,
2237                             "Error while writing out transaction");
2238                 mutex_unlock(&root->fs_info->tree_log_mutex);
2239                 goto scrub_continue;
2240         }
2241
2242         ret = write_ctree_super(trans, root, 0);
2243         if (ret) {
2244                 mutex_unlock(&root->fs_info->tree_log_mutex);
2245                 goto scrub_continue;
2246         }
2247
2248         /*
2249          * the super is written, we can safely allow the tree-loggers
2250          * to go about their business
2251          */
2252         mutex_unlock(&root->fs_info->tree_log_mutex);
2253
2254         btrfs_finish_extent_commit(trans, root);
2255
2256         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2257                 btrfs_clear_space_info_full(root->fs_info);
2258
2259         root->fs_info->last_trans_committed = cur_trans->transid;
2260         /*
2261          * We needn't acquire the lock here because there is no other task
2262          * which can change it.
2263          */
2264         cur_trans->state = TRANS_STATE_COMPLETED;
2265         wake_up(&cur_trans->commit_wait);
2266
2267         spin_lock(&root->fs_info->trans_lock);
2268         list_del_init(&cur_trans->list);
2269         spin_unlock(&root->fs_info->trans_lock);
2270
2271         btrfs_put_transaction(cur_trans);
2272         btrfs_put_transaction(cur_trans);
2273
2274         if (trans->type & __TRANS_FREEZABLE)
2275                 sb_end_intwrite(root->fs_info->sb);
2276
2277         trace_btrfs_transaction_commit(root);
2278
2279         btrfs_scrub_continue(root);
2280
2281         if (current->journal_info == trans)
2282                 current->journal_info = NULL;
2283
2284         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2285
2286         /*
2287          * If fs has been frozen, we can not handle delayed iputs, otherwise
2288          * it'll result in deadlock about SB_FREEZE_FS.
2289          */
2290         if (current != root->fs_info->transaction_kthread &&
2291             current != root->fs_info->cleaner_kthread &&
2292             !root->fs_info->fs_frozen)
2293                 btrfs_run_delayed_iputs(root);
2294
2295         return ret;
2296
2297 scrub_continue:
2298         btrfs_scrub_continue(root);
2299 cleanup_transaction:
2300         btrfs_trans_release_metadata(trans, root);
2301         btrfs_trans_release_chunk_metadata(trans);
2302         trans->block_rsv = NULL;
2303         btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
2304         if (current->journal_info == trans)
2305                 current->journal_info = NULL;
2306         cleanup_transaction(trans, root, ret);
2307
2308         return ret;
2309 }
2310
2311 /*
2312  * return < 0 if error
2313  * 0 if there are no more dead_roots at the time of call
2314  * 1 there are more to be processed, call me again
2315  *
2316  * The return value indicates there are certainly more snapshots to delete, but
2317  * if there comes a new one during processing, it may return 0. We don't mind,
2318  * because btrfs_commit_super will poke cleaner thread and it will process it a
2319  * few seconds later.
2320  */
2321 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2322 {
2323         int ret;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325
2326         spin_lock(&fs_info->trans_lock);
2327         if (list_empty(&fs_info->dead_roots)) {
2328                 spin_unlock(&fs_info->trans_lock);
2329                 return 0;
2330         }
2331         root = list_first_entry(&fs_info->dead_roots,
2332                         struct btrfs_root, root_list);
2333         list_del_init(&root->root_list);
2334         spin_unlock(&fs_info->trans_lock);
2335
2336         btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
2337
2338         btrfs_kill_all_delayed_nodes(root);
2339
2340         if (btrfs_header_backref_rev(root->node) <
2341                         BTRFS_MIXED_BACKREF_REV)
2342                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2343         else
2344                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2345
2346         return (ret < 0) ? 0 : 1;
2347 }
2348
2349 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2350 {
2351         unsigned long prev;
2352         unsigned long bit;
2353
2354         prev = xchg(&fs_info->pending_changes, 0);
2355         if (!prev)
2356                 return;
2357
2358         bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2359         if (prev & bit)
2360                 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2361         prev &= ~bit;
2362
2363         bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2364         if (prev & bit)
2365                 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2366         prev &= ~bit;
2367
2368         bit = 1 << BTRFS_PENDING_COMMIT;
2369         if (prev & bit)
2370                 btrfs_debug(fs_info, "pending commit done");
2371         prev &= ~bit;
2372
2373         if (prev)
2374                 btrfs_warn(fs_info,
2375                         "unknown pending changes left 0x%lx, ignoring", prev);
2376 }