GNU Linux-libre 4.19.211-gnu1
[releases.git] / fs / btrfs / qgroup.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
15
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24
25
26 /* TODO XXX FIXME
27  *  - subvol delete -> delete when ref goes to 0? delete limits also?
28  *  - reorganize keys
29  *  - compressed
30  *  - sync
31  *  - copy also limits on subvol creation
32  *  - limit
33  *  - caches fuer ulists
34  *  - performance benchmarks
35  *  - check all ioctl parameters
36  */
37
38 /*
39  * Helpers to access qgroup reservation
40  *
41  * Callers should ensure the lock context and type are valid
42  */
43
44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 {
46         u64 ret = 0;
47         int i;
48
49         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
50                 ret += qgroup->rsv.values[i];
51
52         return ret;
53 }
54
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
57 {
58         if (type == BTRFS_QGROUP_RSV_DATA)
59                 return "data";
60         if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
61                 return "meta_pertrans";
62         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
63                 return "meta_prealloc";
64         return NULL;
65 }
66 #endif
67
68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
69                            struct btrfs_qgroup *qgroup, u64 num_bytes,
70                            enum btrfs_qgroup_rsv_type type)
71 {
72         trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
73         qgroup->rsv.values[type] += num_bytes;
74 }
75
76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
77                                struct btrfs_qgroup *qgroup, u64 num_bytes,
78                                enum btrfs_qgroup_rsv_type type)
79 {
80         trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
81         if (qgroup->rsv.values[type] >= num_bytes) {
82                 qgroup->rsv.values[type] -= num_bytes;
83                 return;
84         }
85 #ifdef CONFIG_BTRFS_DEBUG
86         WARN_RATELIMIT(1,
87                 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
88                 qgroup->qgroupid, qgroup_rsv_type_str(type),
89                 qgroup->rsv.values[type], num_bytes);
90 #endif
91         qgroup->rsv.values[type] = 0;
92 }
93
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
95                                      struct btrfs_qgroup *dest,
96                                      struct btrfs_qgroup *src)
97 {
98         int i;
99
100         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
101                 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
102 }
103
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
105                                          struct btrfs_qgroup *dest,
106                                           struct btrfs_qgroup *src)
107 {
108         int i;
109
110         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
111                 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
112 }
113
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
115                                            int mod)
116 {
117         if (qg->old_refcnt < seq)
118                 qg->old_refcnt = seq;
119         qg->old_refcnt += mod;
120 }
121
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
123                                            int mod)
124 {
125         if (qg->new_refcnt < seq)
126                 qg->new_refcnt = seq;
127         qg->new_refcnt += mod;
128 }
129
130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
131 {
132         if (qg->old_refcnt < seq)
133                 return 0;
134         return qg->old_refcnt - seq;
135 }
136
137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
138 {
139         if (qg->new_refcnt < seq)
140                 return 0;
141         return qg->new_refcnt - seq;
142 }
143
144 /*
145  * glue structure to represent the relations between qgroups.
146  */
147 struct btrfs_qgroup_list {
148         struct list_head next_group;
149         struct list_head next_member;
150         struct btrfs_qgroup *group;
151         struct btrfs_qgroup *member;
152 };
153
154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
155 {
156         return (u64)(uintptr_t)qg;
157 }
158
159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
160 {
161         return (struct btrfs_qgroup *)(uintptr_t)n->aux;
162 }
163
164 static int
165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
166                    int init_flags);
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
168
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
171                                            u64 qgroupid)
172 {
173         struct rb_node *n = fs_info->qgroup_tree.rb_node;
174         struct btrfs_qgroup *qgroup;
175
176         while (n) {
177                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
178                 if (qgroup->qgroupid < qgroupid)
179                         n = n->rb_left;
180                 else if (qgroup->qgroupid > qgroupid)
181                         n = n->rb_right;
182                 else
183                         return qgroup;
184         }
185         return NULL;
186 }
187
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
190                                           u64 qgroupid)
191 {
192         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
193         struct rb_node *parent = NULL;
194         struct btrfs_qgroup *qgroup;
195
196         while (*p) {
197                 parent = *p;
198                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
199
200                 if (qgroup->qgroupid < qgroupid)
201                         p = &(*p)->rb_left;
202                 else if (qgroup->qgroupid > qgroupid)
203                         p = &(*p)->rb_right;
204                 else
205                         return qgroup;
206         }
207
208         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
209         if (!qgroup)
210                 return ERR_PTR(-ENOMEM);
211
212         qgroup->qgroupid = qgroupid;
213         INIT_LIST_HEAD(&qgroup->groups);
214         INIT_LIST_HEAD(&qgroup->members);
215         INIT_LIST_HEAD(&qgroup->dirty);
216
217         rb_link_node(&qgroup->node, parent, p);
218         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
219
220         return qgroup;
221 }
222
223 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
224 {
225         struct btrfs_qgroup_list *list;
226
227         list_del(&qgroup->dirty);
228         while (!list_empty(&qgroup->groups)) {
229                 list = list_first_entry(&qgroup->groups,
230                                         struct btrfs_qgroup_list, next_group);
231                 list_del(&list->next_group);
232                 list_del(&list->next_member);
233                 kfree(list);
234         }
235
236         while (!list_empty(&qgroup->members)) {
237                 list = list_first_entry(&qgroup->members,
238                                         struct btrfs_qgroup_list, next_member);
239                 list_del(&list->next_group);
240                 list_del(&list->next_member);
241                 kfree(list);
242         }
243         kfree(qgroup);
244 }
245
246 /* must be called with qgroup_lock held */
247 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
248 {
249         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
250
251         if (!qgroup)
252                 return -ENOENT;
253
254         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
255         __del_qgroup_rb(qgroup);
256         return 0;
257 }
258
259 /* must be called with qgroup_lock held */
260 static int add_relation_rb(struct btrfs_fs_info *fs_info,
261                            u64 memberid, u64 parentid)
262 {
263         struct btrfs_qgroup *member;
264         struct btrfs_qgroup *parent;
265         struct btrfs_qgroup_list *list;
266
267         member = find_qgroup_rb(fs_info, memberid);
268         parent = find_qgroup_rb(fs_info, parentid);
269         if (!member || !parent)
270                 return -ENOENT;
271
272         list = kzalloc(sizeof(*list), GFP_ATOMIC);
273         if (!list)
274                 return -ENOMEM;
275
276         list->group = parent;
277         list->member = member;
278         list_add_tail(&list->next_group, &member->groups);
279         list_add_tail(&list->next_member, &parent->members);
280
281         return 0;
282 }
283
284 /* must be called with qgroup_lock held */
285 static int del_relation_rb(struct btrfs_fs_info *fs_info,
286                            u64 memberid, u64 parentid)
287 {
288         struct btrfs_qgroup *member;
289         struct btrfs_qgroup *parent;
290         struct btrfs_qgroup_list *list;
291
292         member = find_qgroup_rb(fs_info, memberid);
293         parent = find_qgroup_rb(fs_info, parentid);
294         if (!member || !parent)
295                 return -ENOENT;
296
297         list_for_each_entry(list, &member->groups, next_group) {
298                 if (list->group == parent) {
299                         list_del(&list->next_group);
300                         list_del(&list->next_member);
301                         kfree(list);
302                         return 0;
303                 }
304         }
305         return -ENOENT;
306 }
307
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
310                                u64 rfer, u64 excl)
311 {
312         struct btrfs_qgroup *qgroup;
313
314         qgroup = find_qgroup_rb(fs_info, qgroupid);
315         if (!qgroup)
316                 return -EINVAL;
317         if (qgroup->rfer != rfer || qgroup->excl != excl)
318                 return -EINVAL;
319         return 0;
320 }
321 #endif
322
323 /*
324  * The full config is read in one go, only called from open_ctree()
325  * It doesn't use any locking, as at this point we're still single-threaded
326  */
327 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
328 {
329         struct btrfs_key key;
330         struct btrfs_key found_key;
331         struct btrfs_root *quota_root = fs_info->quota_root;
332         struct btrfs_path *path = NULL;
333         struct extent_buffer *l;
334         int slot;
335         int ret = 0;
336         u64 flags = 0;
337         u64 rescan_progress = 0;
338
339         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
340                 return 0;
341
342         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
343         if (!fs_info->qgroup_ulist) {
344                 ret = -ENOMEM;
345                 goto out;
346         }
347
348         path = btrfs_alloc_path();
349         if (!path) {
350                 ret = -ENOMEM;
351                 goto out;
352         }
353
354         /* default this to quota off, in case no status key is found */
355         fs_info->qgroup_flags = 0;
356
357         /*
358          * pass 1: read status, all qgroup infos and limits
359          */
360         key.objectid = 0;
361         key.type = 0;
362         key.offset = 0;
363         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
364         if (ret)
365                 goto out;
366
367         while (1) {
368                 struct btrfs_qgroup *qgroup;
369
370                 slot = path->slots[0];
371                 l = path->nodes[0];
372                 btrfs_item_key_to_cpu(l, &found_key, slot);
373
374                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
375                         struct btrfs_qgroup_status_item *ptr;
376
377                         ptr = btrfs_item_ptr(l, slot,
378                                              struct btrfs_qgroup_status_item);
379
380                         if (btrfs_qgroup_status_version(l, ptr) !=
381                             BTRFS_QGROUP_STATUS_VERSION) {
382                                 btrfs_err(fs_info,
383                                  "old qgroup version, quota disabled");
384                                 goto out;
385                         }
386                         if (btrfs_qgroup_status_generation(l, ptr) !=
387                             fs_info->generation) {
388                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
389                                 btrfs_err(fs_info,
390                                         "qgroup generation mismatch, marked as inconsistent");
391                         }
392                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
393                                                                           ptr);
394                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
395                         goto next1;
396                 }
397
398                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
399                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
400                         goto next1;
401
402                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
403                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
404                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
405                         btrfs_err(fs_info, "inconsistent qgroup config");
406                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
407                 }
408                 if (!qgroup) {
409                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
410                         if (IS_ERR(qgroup)) {
411                                 ret = PTR_ERR(qgroup);
412                                 goto out;
413                         }
414                 }
415                 switch (found_key.type) {
416                 case BTRFS_QGROUP_INFO_KEY: {
417                         struct btrfs_qgroup_info_item *ptr;
418
419                         ptr = btrfs_item_ptr(l, slot,
420                                              struct btrfs_qgroup_info_item);
421                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
422                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
423                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
424                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
425                         /* generation currently unused */
426                         break;
427                 }
428                 case BTRFS_QGROUP_LIMIT_KEY: {
429                         struct btrfs_qgroup_limit_item *ptr;
430
431                         ptr = btrfs_item_ptr(l, slot,
432                                              struct btrfs_qgroup_limit_item);
433                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
434                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
435                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
436                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
437                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
438                         break;
439                 }
440                 }
441 next1:
442                 ret = btrfs_next_item(quota_root, path);
443                 if (ret < 0)
444                         goto out;
445                 if (ret)
446                         break;
447         }
448         btrfs_release_path(path);
449
450         /*
451          * pass 2: read all qgroup relations
452          */
453         key.objectid = 0;
454         key.type = BTRFS_QGROUP_RELATION_KEY;
455         key.offset = 0;
456         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
457         if (ret)
458                 goto out;
459         while (1) {
460                 slot = path->slots[0];
461                 l = path->nodes[0];
462                 btrfs_item_key_to_cpu(l, &found_key, slot);
463
464                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
465                         goto next2;
466
467                 if (found_key.objectid > found_key.offset) {
468                         /* parent <- member, not needed to build config */
469                         /* FIXME should we omit the key completely? */
470                         goto next2;
471                 }
472
473                 ret = add_relation_rb(fs_info, found_key.objectid,
474                                       found_key.offset);
475                 if (ret == -ENOENT) {
476                         btrfs_warn(fs_info,
477                                 "orphan qgroup relation 0x%llx->0x%llx",
478                                 found_key.objectid, found_key.offset);
479                         ret = 0;        /* ignore the error */
480                 }
481                 if (ret)
482                         goto out;
483 next2:
484                 ret = btrfs_next_item(quota_root, path);
485                 if (ret < 0)
486                         goto out;
487                 if (ret)
488                         break;
489         }
490 out:
491         btrfs_free_path(path);
492         fs_info->qgroup_flags |= flags;
493         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
494                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
495         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
496                  ret >= 0)
497                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
498
499         if (ret < 0) {
500                 ulist_free(fs_info->qgroup_ulist);
501                 fs_info->qgroup_ulist = NULL;
502                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
503         }
504
505         return ret < 0 ? ret : 0;
506 }
507
508 /*
509  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510  * first two are in single-threaded paths.And for the third one, we have set
511  * quota_root to be null with qgroup_lock held before, so it is safe to clean
512  * up the in-memory structures without qgroup_lock held.
513  */
514 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
515 {
516         struct rb_node *n;
517         struct btrfs_qgroup *qgroup;
518
519         while ((n = rb_first(&fs_info->qgroup_tree))) {
520                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
521                 rb_erase(n, &fs_info->qgroup_tree);
522                 __del_qgroup_rb(qgroup);
523         }
524         /*
525          * we call btrfs_free_qgroup_config() when umounting
526          * filesystem and disabling quota, so we set qgroup_ulist
527          * to be null here to avoid double free.
528          */
529         ulist_free(fs_info->qgroup_ulist);
530         fs_info->qgroup_ulist = NULL;
531 }
532
533 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
534                                     u64 dst)
535 {
536         int ret;
537         struct btrfs_root *quota_root = trans->fs_info->quota_root;
538         struct btrfs_path *path;
539         struct btrfs_key key;
540
541         path = btrfs_alloc_path();
542         if (!path)
543                 return -ENOMEM;
544
545         key.objectid = src;
546         key.type = BTRFS_QGROUP_RELATION_KEY;
547         key.offset = dst;
548
549         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
550
551         btrfs_mark_buffer_dirty(path->nodes[0]);
552
553         btrfs_free_path(path);
554         return ret;
555 }
556
557 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
558                                     u64 dst)
559 {
560         int ret;
561         struct btrfs_root *quota_root = trans->fs_info->quota_root;
562         struct btrfs_path *path;
563         struct btrfs_key key;
564
565         path = btrfs_alloc_path();
566         if (!path)
567                 return -ENOMEM;
568
569         key.objectid = src;
570         key.type = BTRFS_QGROUP_RELATION_KEY;
571         key.offset = dst;
572
573         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
574         if (ret < 0)
575                 goto out;
576
577         if (ret > 0) {
578                 ret = -ENOENT;
579                 goto out;
580         }
581
582         ret = btrfs_del_item(trans, quota_root, path);
583 out:
584         btrfs_free_path(path);
585         return ret;
586 }
587
588 static int add_qgroup_item(struct btrfs_trans_handle *trans,
589                            struct btrfs_root *quota_root, u64 qgroupid)
590 {
591         int ret;
592         struct btrfs_path *path;
593         struct btrfs_qgroup_info_item *qgroup_info;
594         struct btrfs_qgroup_limit_item *qgroup_limit;
595         struct extent_buffer *leaf;
596         struct btrfs_key key;
597
598         if (btrfs_is_testing(quota_root->fs_info))
599                 return 0;
600
601         path = btrfs_alloc_path();
602         if (!path)
603                 return -ENOMEM;
604
605         key.objectid = 0;
606         key.type = BTRFS_QGROUP_INFO_KEY;
607         key.offset = qgroupid;
608
609         /*
610          * Avoid a transaction abort by catching -EEXIST here. In that
611          * case, we proceed by re-initializing the existing structure
612          * on disk.
613          */
614
615         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
616                                       sizeof(*qgroup_info));
617         if (ret && ret != -EEXIST)
618                 goto out;
619
620         leaf = path->nodes[0];
621         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
622                                  struct btrfs_qgroup_info_item);
623         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
624         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
625         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
626         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
627         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
628
629         btrfs_mark_buffer_dirty(leaf);
630
631         btrfs_release_path(path);
632
633         key.type = BTRFS_QGROUP_LIMIT_KEY;
634         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
635                                       sizeof(*qgroup_limit));
636         if (ret && ret != -EEXIST)
637                 goto out;
638
639         leaf = path->nodes[0];
640         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
641                                   struct btrfs_qgroup_limit_item);
642         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
643         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
644         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
645         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
646         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
647
648         btrfs_mark_buffer_dirty(leaf);
649
650         ret = 0;
651 out:
652         btrfs_free_path(path);
653         return ret;
654 }
655
656 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
657 {
658         int ret;
659         struct btrfs_root *quota_root = trans->fs_info->quota_root;
660         struct btrfs_path *path;
661         struct btrfs_key key;
662
663         path = btrfs_alloc_path();
664         if (!path)
665                 return -ENOMEM;
666
667         key.objectid = 0;
668         key.type = BTRFS_QGROUP_INFO_KEY;
669         key.offset = qgroupid;
670         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
671         if (ret < 0)
672                 goto out;
673
674         if (ret > 0) {
675                 ret = -ENOENT;
676                 goto out;
677         }
678
679         ret = btrfs_del_item(trans, quota_root, path);
680         if (ret)
681                 goto out;
682
683         btrfs_release_path(path);
684
685         key.type = BTRFS_QGROUP_LIMIT_KEY;
686         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
687         if (ret < 0)
688                 goto out;
689
690         if (ret > 0) {
691                 ret = -ENOENT;
692                 goto out;
693         }
694
695         ret = btrfs_del_item(trans, quota_root, path);
696
697 out:
698         btrfs_free_path(path);
699         return ret;
700 }
701
702 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
703                                     struct btrfs_qgroup *qgroup)
704 {
705         struct btrfs_root *quota_root = trans->fs_info->quota_root;
706         struct btrfs_path *path;
707         struct btrfs_key key;
708         struct extent_buffer *l;
709         struct btrfs_qgroup_limit_item *qgroup_limit;
710         int ret;
711         int slot;
712
713         key.objectid = 0;
714         key.type = BTRFS_QGROUP_LIMIT_KEY;
715         key.offset = qgroup->qgroupid;
716
717         path = btrfs_alloc_path();
718         if (!path)
719                 return -ENOMEM;
720
721         ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
722         if (ret > 0)
723                 ret = -ENOENT;
724
725         if (ret)
726                 goto out;
727
728         l = path->nodes[0];
729         slot = path->slots[0];
730         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
731         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
732         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
733         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
734         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
735         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
736
737         btrfs_mark_buffer_dirty(l);
738
739 out:
740         btrfs_free_path(path);
741         return ret;
742 }
743
744 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
745                                    struct btrfs_qgroup *qgroup)
746 {
747         struct btrfs_fs_info *fs_info = trans->fs_info;
748         struct btrfs_root *quota_root = fs_info->quota_root;
749         struct btrfs_path *path;
750         struct btrfs_key key;
751         struct extent_buffer *l;
752         struct btrfs_qgroup_info_item *qgroup_info;
753         int ret;
754         int slot;
755
756         if (btrfs_is_testing(fs_info))
757                 return 0;
758
759         key.objectid = 0;
760         key.type = BTRFS_QGROUP_INFO_KEY;
761         key.offset = qgroup->qgroupid;
762
763         path = btrfs_alloc_path();
764         if (!path)
765                 return -ENOMEM;
766
767         ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
768         if (ret > 0)
769                 ret = -ENOENT;
770
771         if (ret)
772                 goto out;
773
774         l = path->nodes[0];
775         slot = path->slots[0];
776         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
777         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
778         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
779         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
780         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
781         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
782
783         btrfs_mark_buffer_dirty(l);
784
785 out:
786         btrfs_free_path(path);
787         return ret;
788 }
789
790 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
791 {
792         struct btrfs_fs_info *fs_info = trans->fs_info;
793         struct btrfs_root *quota_root = fs_info->quota_root;
794         struct btrfs_path *path;
795         struct btrfs_key key;
796         struct extent_buffer *l;
797         struct btrfs_qgroup_status_item *ptr;
798         int ret;
799         int slot;
800
801         key.objectid = 0;
802         key.type = BTRFS_QGROUP_STATUS_KEY;
803         key.offset = 0;
804
805         path = btrfs_alloc_path();
806         if (!path)
807                 return -ENOMEM;
808
809         ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
810         if (ret > 0)
811                 ret = -ENOENT;
812
813         if (ret)
814                 goto out;
815
816         l = path->nodes[0];
817         slot = path->slots[0];
818         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
819         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
820         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
821         btrfs_set_qgroup_status_rescan(l, ptr,
822                                 fs_info->qgroup_rescan_progress.objectid);
823
824         btrfs_mark_buffer_dirty(l);
825
826 out:
827         btrfs_free_path(path);
828         return ret;
829 }
830
831 /*
832  * called with qgroup_lock held
833  */
834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
835                                   struct btrfs_root *root)
836 {
837         struct btrfs_path *path;
838         struct btrfs_key key;
839         struct extent_buffer *leaf = NULL;
840         int ret;
841         int nr = 0;
842
843         path = btrfs_alloc_path();
844         if (!path)
845                 return -ENOMEM;
846
847         path->leave_spinning = 1;
848
849         key.objectid = 0;
850         key.offset = 0;
851         key.type = 0;
852
853         while (1) {
854                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855                 if (ret < 0)
856                         goto out;
857                 leaf = path->nodes[0];
858                 nr = btrfs_header_nritems(leaf);
859                 if (!nr)
860                         break;
861                 /*
862                  * delete the leaf one by one
863                  * since the whole tree is going
864                  * to be deleted.
865                  */
866                 path->slots[0] = 0;
867                 ret = btrfs_del_items(trans, root, path, 0, nr);
868                 if (ret)
869                         goto out;
870
871                 btrfs_release_path(path);
872         }
873         ret = 0;
874 out:
875         btrfs_free_path(path);
876         return ret;
877 }
878
879 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
880 {
881         struct btrfs_root *quota_root;
882         struct btrfs_root *tree_root = fs_info->tree_root;
883         struct btrfs_path *path = NULL;
884         struct btrfs_qgroup_status_item *ptr;
885         struct extent_buffer *leaf;
886         struct btrfs_key key;
887         struct btrfs_key found_key;
888         struct btrfs_qgroup *qgroup = NULL;
889         struct btrfs_trans_handle *trans = NULL;
890         int ret = 0;
891         int slot;
892
893         mutex_lock(&fs_info->qgroup_ioctl_lock);
894         if (fs_info->quota_root)
895                 goto out;
896
897         /*
898          * 1 for quota root item
899          * 1 for BTRFS_QGROUP_STATUS item
900          *
901          * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
902          * per subvolume. However those are not currently reserved since it
903          * would be a lot of overkill.
904          */
905         trans = btrfs_start_transaction(tree_root, 2);
906         if (IS_ERR(trans)) {
907                 ret = PTR_ERR(trans);
908                 trans = NULL;
909                 goto out;
910         }
911
912         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
913         if (!fs_info->qgroup_ulist) {
914                 ret = -ENOMEM;
915                 btrfs_abort_transaction(trans, ret);
916                 goto out;
917         }
918
919         /*
920          * initially create the quota tree
921          */
922         quota_root = btrfs_create_tree(trans, fs_info,
923                                        BTRFS_QUOTA_TREE_OBJECTID);
924         if (IS_ERR(quota_root)) {
925                 ret =  PTR_ERR(quota_root);
926                 btrfs_abort_transaction(trans, ret);
927                 goto out;
928         }
929
930         path = btrfs_alloc_path();
931         if (!path) {
932                 ret = -ENOMEM;
933                 btrfs_abort_transaction(trans, ret);
934                 goto out_free_root;
935         }
936
937         key.objectid = 0;
938         key.type = BTRFS_QGROUP_STATUS_KEY;
939         key.offset = 0;
940
941         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
942                                       sizeof(*ptr));
943         if (ret) {
944                 btrfs_abort_transaction(trans, ret);
945                 goto out_free_path;
946         }
947
948         leaf = path->nodes[0];
949         ptr = btrfs_item_ptr(leaf, path->slots[0],
950                                  struct btrfs_qgroup_status_item);
951         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
952         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
953         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
954                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
955         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
956         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
957
958         btrfs_mark_buffer_dirty(leaf);
959
960         key.objectid = 0;
961         key.type = BTRFS_ROOT_REF_KEY;
962         key.offset = 0;
963
964         btrfs_release_path(path);
965         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
966         if (ret > 0)
967                 goto out_add_root;
968         if (ret < 0) {
969                 btrfs_abort_transaction(trans, ret);
970                 goto out_free_path;
971         }
972
973         while (1) {
974                 slot = path->slots[0];
975                 leaf = path->nodes[0];
976                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
977
978                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
979                         ret = add_qgroup_item(trans, quota_root,
980                                               found_key.offset);
981                         if (ret) {
982                                 btrfs_abort_transaction(trans, ret);
983                                 goto out_free_path;
984                         }
985
986                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
987                         if (IS_ERR(qgroup)) {
988                                 ret = PTR_ERR(qgroup);
989                                 btrfs_abort_transaction(trans, ret);
990                                 goto out_free_path;
991                         }
992                 }
993                 ret = btrfs_next_item(tree_root, path);
994                 if (ret < 0) {
995                         btrfs_abort_transaction(trans, ret);
996                         goto out_free_path;
997                 }
998                 if (ret)
999                         break;
1000         }
1001
1002 out_add_root:
1003         btrfs_release_path(path);
1004         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1005         if (ret) {
1006                 btrfs_abort_transaction(trans, ret);
1007                 goto out_free_path;
1008         }
1009
1010         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1011         if (IS_ERR(qgroup)) {
1012                 ret = PTR_ERR(qgroup);
1013                 btrfs_abort_transaction(trans, ret);
1014                 goto out_free_path;
1015         }
1016
1017         ret = btrfs_commit_transaction(trans);
1018         trans = NULL;
1019         if (ret)
1020                 goto out_free_path;
1021
1022         /*
1023          * Set quota enabled flag after committing the transaction, to avoid
1024          * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1025          * creation.
1026          */
1027         spin_lock(&fs_info->qgroup_lock);
1028         fs_info->quota_root = quota_root;
1029         set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1030         spin_unlock(&fs_info->qgroup_lock);
1031
1032         ret = qgroup_rescan_init(fs_info, 0, 1);
1033         if (!ret) {
1034                 qgroup_rescan_zero_tracking(fs_info);
1035                 fs_info->qgroup_rescan_running = true;
1036                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
1037                                  &fs_info->qgroup_rescan_work);
1038         }
1039
1040 out_free_path:
1041         btrfs_free_path(path);
1042 out_free_root:
1043         if (ret) {
1044                 free_extent_buffer(quota_root->node);
1045                 free_extent_buffer(quota_root->commit_root);
1046                 kfree(quota_root);
1047         }
1048 out:
1049         if (ret) {
1050                 ulist_free(fs_info->qgroup_ulist);
1051                 fs_info->qgroup_ulist = NULL;
1052                 if (trans)
1053                         btrfs_end_transaction(trans);
1054         }
1055         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1056         return ret;
1057 }
1058
1059 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1060 {
1061         struct btrfs_root *quota_root;
1062         struct btrfs_trans_handle *trans = NULL;
1063         int ret = 0;
1064
1065         mutex_lock(&fs_info->qgroup_ioctl_lock);
1066         if (!fs_info->quota_root)
1067                 goto out;
1068
1069         /*
1070          * 1 For the root item
1071          *
1072          * We should also reserve enough items for the quota tree deletion in
1073          * btrfs_clean_quota_tree but this is not done.
1074          */
1075         trans = btrfs_start_transaction(fs_info->tree_root, 1);
1076         if (IS_ERR(trans)) {
1077                 ret = PTR_ERR(trans);
1078                 goto out;
1079         }
1080
1081         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1082         btrfs_qgroup_wait_for_completion(fs_info, false);
1083         spin_lock(&fs_info->qgroup_lock);
1084         quota_root = fs_info->quota_root;
1085         fs_info->quota_root = NULL;
1086         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1087         spin_unlock(&fs_info->qgroup_lock);
1088
1089         btrfs_free_qgroup_config(fs_info);
1090
1091         ret = btrfs_clean_quota_tree(trans, quota_root);
1092         if (ret) {
1093                 btrfs_abort_transaction(trans, ret);
1094                 goto end_trans;
1095         }
1096
1097         ret = btrfs_del_root(trans, &quota_root->root_key);
1098         if (ret) {
1099                 btrfs_abort_transaction(trans, ret);
1100                 goto end_trans;
1101         }
1102
1103         list_del(&quota_root->dirty_list);
1104
1105         btrfs_tree_lock(quota_root->node);
1106         clean_tree_block(fs_info, quota_root->node);
1107         btrfs_tree_unlock(quota_root->node);
1108         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1109
1110         free_extent_buffer(quota_root->node);
1111         free_extent_buffer(quota_root->commit_root);
1112         kfree(quota_root);
1113
1114 end_trans:
1115         ret = btrfs_end_transaction(trans);
1116 out:
1117         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1118         return ret;
1119 }
1120
1121 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1122                          struct btrfs_qgroup *qgroup)
1123 {
1124         if (list_empty(&qgroup->dirty))
1125                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1126 }
1127
1128 /*
1129  * The easy accounting, we're updating qgroup relationship whose child qgroup
1130  * only has exclusive extents.
1131  *
1132  * In this case, all exclsuive extents will also be exlusive for parent, so
1133  * excl/rfer just get added/removed.
1134  *
1135  * So is qgroup reservation space, which should also be added/removed to
1136  * parent.
1137  * Or when child tries to release reservation space, parent will underflow its
1138  * reservation (for relationship adding case).
1139  *
1140  * Caller should hold fs_info->qgroup_lock.
1141  */
1142 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1143                                     struct ulist *tmp, u64 ref_root,
1144                                     struct btrfs_qgroup *src, int sign)
1145 {
1146         struct btrfs_qgroup *qgroup;
1147         struct btrfs_qgroup_list *glist;
1148         struct ulist_node *unode;
1149         struct ulist_iterator uiter;
1150         u64 num_bytes = src->excl;
1151         int ret = 0;
1152
1153         qgroup = find_qgroup_rb(fs_info, ref_root);
1154         if (!qgroup)
1155                 goto out;
1156
1157         qgroup->rfer += sign * num_bytes;
1158         qgroup->rfer_cmpr += sign * num_bytes;
1159
1160         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1161         qgroup->excl += sign * num_bytes;
1162         qgroup->excl_cmpr += sign * num_bytes;
1163
1164         if (sign > 0)
1165                 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1166         else
1167                 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1168
1169         qgroup_dirty(fs_info, qgroup);
1170
1171         /* Get all of the parent groups that contain this qgroup */
1172         list_for_each_entry(glist, &qgroup->groups, next_group) {
1173                 ret = ulist_add(tmp, glist->group->qgroupid,
1174                                 qgroup_to_aux(glist->group), GFP_ATOMIC);
1175                 if (ret < 0)
1176                         goto out;
1177         }
1178
1179         /* Iterate all of the parents and adjust their reference counts */
1180         ULIST_ITER_INIT(&uiter);
1181         while ((unode = ulist_next(tmp, &uiter))) {
1182                 qgroup = unode_aux_to_qgroup(unode);
1183                 qgroup->rfer += sign * num_bytes;
1184                 qgroup->rfer_cmpr += sign * num_bytes;
1185                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1186                 qgroup->excl += sign * num_bytes;
1187                 if (sign > 0)
1188                         qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1189                 else
1190                         qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1191                 qgroup->excl_cmpr += sign * num_bytes;
1192                 qgroup_dirty(fs_info, qgroup);
1193
1194                 /* Add any parents of the parents */
1195                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1196                         ret = ulist_add(tmp, glist->group->qgroupid,
1197                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
1198                         if (ret < 0)
1199                                 goto out;
1200                 }
1201         }
1202         ret = 0;
1203 out:
1204         return ret;
1205 }
1206
1207
1208 /*
1209  * Quick path for updating qgroup with only excl refs.
1210  *
1211  * In that case, just update all parent will be enough.
1212  * Or we needs to do a full rescan.
1213  * Caller should also hold fs_info->qgroup_lock.
1214  *
1215  * Return 0 for quick update, return >0 for need to full rescan
1216  * and mark INCONSISTENT flag.
1217  * Return < 0 for other error.
1218  */
1219 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1220                                    struct ulist *tmp, u64 src, u64 dst,
1221                                    int sign)
1222 {
1223         struct btrfs_qgroup *qgroup;
1224         int ret = 1;
1225         int err = 0;
1226
1227         qgroup = find_qgroup_rb(fs_info, src);
1228         if (!qgroup)
1229                 goto out;
1230         if (qgroup->excl == qgroup->rfer) {
1231                 ret = 0;
1232                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1233                                                qgroup, sign);
1234                 if (err < 0) {
1235                         ret = err;
1236                         goto out;
1237                 }
1238         }
1239 out:
1240         if (ret)
1241                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1242         return ret;
1243 }
1244
1245 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1246                               u64 dst)
1247 {
1248         struct btrfs_fs_info *fs_info = trans->fs_info;
1249         struct btrfs_root *quota_root;
1250         struct btrfs_qgroup *parent;
1251         struct btrfs_qgroup *member;
1252         struct btrfs_qgroup_list *list;
1253         struct ulist *tmp;
1254         int ret = 0;
1255
1256         /* Check the level of src and dst first */
1257         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1258                 return -EINVAL;
1259
1260         tmp = ulist_alloc(GFP_KERNEL);
1261         if (!tmp)
1262                 return -ENOMEM;
1263
1264         mutex_lock(&fs_info->qgroup_ioctl_lock);
1265         quota_root = fs_info->quota_root;
1266         if (!quota_root) {
1267                 ret = -EINVAL;
1268                 goto out;
1269         }
1270         member = find_qgroup_rb(fs_info, src);
1271         parent = find_qgroup_rb(fs_info, dst);
1272         if (!member || !parent) {
1273                 ret = -EINVAL;
1274                 goto out;
1275         }
1276
1277         /* check if such qgroup relation exist firstly */
1278         list_for_each_entry(list, &member->groups, next_group) {
1279                 if (list->group == parent) {
1280                         ret = -EEXIST;
1281                         goto out;
1282                 }
1283         }
1284
1285         ret = add_qgroup_relation_item(trans, src, dst);
1286         if (ret)
1287                 goto out;
1288
1289         ret = add_qgroup_relation_item(trans, dst, src);
1290         if (ret) {
1291                 del_qgroup_relation_item(trans, src, dst);
1292                 goto out;
1293         }
1294
1295         spin_lock(&fs_info->qgroup_lock);
1296         ret = add_relation_rb(fs_info, src, dst);
1297         if (ret < 0) {
1298                 spin_unlock(&fs_info->qgroup_lock);
1299                 goto out;
1300         }
1301         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1302         spin_unlock(&fs_info->qgroup_lock);
1303 out:
1304         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1305         ulist_free(tmp);
1306         return ret;
1307 }
1308
1309 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1310                                  u64 dst)
1311 {
1312         struct btrfs_fs_info *fs_info = trans->fs_info;
1313         struct btrfs_root *quota_root;
1314         struct btrfs_qgroup *parent;
1315         struct btrfs_qgroup *member;
1316         struct btrfs_qgroup_list *list;
1317         struct ulist *tmp;
1318         int ret = 0;
1319         int err;
1320
1321         tmp = ulist_alloc(GFP_KERNEL);
1322         if (!tmp)
1323                 return -ENOMEM;
1324
1325         quota_root = fs_info->quota_root;
1326         if (!quota_root) {
1327                 ret = -EINVAL;
1328                 goto out;
1329         }
1330
1331         member = find_qgroup_rb(fs_info, src);
1332         parent = find_qgroup_rb(fs_info, dst);
1333         if (!member || !parent) {
1334                 ret = -EINVAL;
1335                 goto out;
1336         }
1337
1338         /* check if such qgroup relation exist firstly */
1339         list_for_each_entry(list, &member->groups, next_group) {
1340                 if (list->group == parent)
1341                         goto exist;
1342         }
1343         ret = -ENOENT;
1344         goto out;
1345 exist:
1346         ret = del_qgroup_relation_item(trans, src, dst);
1347         err = del_qgroup_relation_item(trans, dst, src);
1348         if (err && !ret)
1349                 ret = err;
1350
1351         spin_lock(&fs_info->qgroup_lock);
1352         del_relation_rb(fs_info, src, dst);
1353         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1354         spin_unlock(&fs_info->qgroup_lock);
1355 out:
1356         ulist_free(tmp);
1357         return ret;
1358 }
1359
1360 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1361                               u64 dst)
1362 {
1363         struct btrfs_fs_info *fs_info = trans->fs_info;
1364         int ret = 0;
1365
1366         mutex_lock(&fs_info->qgroup_ioctl_lock);
1367         ret = __del_qgroup_relation(trans, src, dst);
1368         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1369
1370         return ret;
1371 }
1372
1373 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1374 {
1375         struct btrfs_fs_info *fs_info = trans->fs_info;
1376         struct btrfs_root *quota_root;
1377         struct btrfs_qgroup *qgroup;
1378         int ret = 0;
1379
1380         mutex_lock(&fs_info->qgroup_ioctl_lock);
1381         quota_root = fs_info->quota_root;
1382         if (!quota_root) {
1383                 ret = -EINVAL;
1384                 goto out;
1385         }
1386         qgroup = find_qgroup_rb(fs_info, qgroupid);
1387         if (qgroup) {
1388                 ret = -EEXIST;
1389                 goto out;
1390         }
1391
1392         ret = add_qgroup_item(trans, quota_root, qgroupid);
1393         if (ret)
1394                 goto out;
1395
1396         spin_lock(&fs_info->qgroup_lock);
1397         qgroup = add_qgroup_rb(fs_info, qgroupid);
1398         spin_unlock(&fs_info->qgroup_lock);
1399
1400         if (IS_ERR(qgroup))
1401                 ret = PTR_ERR(qgroup);
1402 out:
1403         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1404         return ret;
1405 }
1406
1407 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1408 {
1409         struct btrfs_fs_info *fs_info = trans->fs_info;
1410         struct btrfs_root *quota_root;
1411         struct btrfs_qgroup *qgroup;
1412         struct btrfs_qgroup_list *list;
1413         int ret = 0;
1414
1415         mutex_lock(&fs_info->qgroup_ioctl_lock);
1416         quota_root = fs_info->quota_root;
1417         if (!quota_root) {
1418                 ret = -EINVAL;
1419                 goto out;
1420         }
1421
1422         qgroup = find_qgroup_rb(fs_info, qgroupid);
1423         if (!qgroup) {
1424                 ret = -ENOENT;
1425                 goto out;
1426         } else {
1427                 /* check if there are no children of this qgroup */
1428                 if (!list_empty(&qgroup->members)) {
1429                         ret = -EBUSY;
1430                         goto out;
1431                 }
1432         }
1433         ret = del_qgroup_item(trans, qgroupid);
1434         if (ret && ret != -ENOENT)
1435                 goto out;
1436
1437         while (!list_empty(&qgroup->groups)) {
1438                 list = list_first_entry(&qgroup->groups,
1439                                         struct btrfs_qgroup_list, next_group);
1440                 ret = __del_qgroup_relation(trans, qgroupid,
1441                                             list->group->qgroupid);
1442                 if (ret)
1443                         goto out;
1444         }
1445
1446         spin_lock(&fs_info->qgroup_lock);
1447         del_qgroup_rb(fs_info, qgroupid);
1448         spin_unlock(&fs_info->qgroup_lock);
1449 out:
1450         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1451         return ret;
1452 }
1453
1454 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1455                        struct btrfs_qgroup_limit *limit)
1456 {
1457         struct btrfs_fs_info *fs_info = trans->fs_info;
1458         struct btrfs_root *quota_root;
1459         struct btrfs_qgroup *qgroup;
1460         int ret = 0;
1461         /* Sometimes we would want to clear the limit on this qgroup.
1462          * To meet this requirement, we treat the -1 as a special value
1463          * which tell kernel to clear the limit on this qgroup.
1464          */
1465         const u64 CLEAR_VALUE = -1;
1466
1467         mutex_lock(&fs_info->qgroup_ioctl_lock);
1468         quota_root = fs_info->quota_root;
1469         if (!quota_root) {
1470                 ret = -EINVAL;
1471                 goto out;
1472         }
1473
1474         qgroup = find_qgroup_rb(fs_info, qgroupid);
1475         if (!qgroup) {
1476                 ret = -ENOENT;
1477                 goto out;
1478         }
1479
1480         spin_lock(&fs_info->qgroup_lock);
1481         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1482                 if (limit->max_rfer == CLEAR_VALUE) {
1483                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1484                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1485                         qgroup->max_rfer = 0;
1486                 } else {
1487                         qgroup->max_rfer = limit->max_rfer;
1488                 }
1489         }
1490         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1491                 if (limit->max_excl == CLEAR_VALUE) {
1492                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1493                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1494                         qgroup->max_excl = 0;
1495                 } else {
1496                         qgroup->max_excl = limit->max_excl;
1497                 }
1498         }
1499         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1500                 if (limit->rsv_rfer == CLEAR_VALUE) {
1501                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1502                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1503                         qgroup->rsv_rfer = 0;
1504                 } else {
1505                         qgroup->rsv_rfer = limit->rsv_rfer;
1506                 }
1507         }
1508         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1509                 if (limit->rsv_excl == CLEAR_VALUE) {
1510                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1511                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1512                         qgroup->rsv_excl = 0;
1513                 } else {
1514                         qgroup->rsv_excl = limit->rsv_excl;
1515                 }
1516         }
1517         qgroup->lim_flags |= limit->flags;
1518
1519         spin_unlock(&fs_info->qgroup_lock);
1520
1521         ret = update_qgroup_limit_item(trans, qgroup);
1522         if (ret) {
1523                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1524                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1525                        qgroupid);
1526         }
1527
1528 out:
1529         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1530         return ret;
1531 }
1532
1533 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1534                                 struct btrfs_delayed_ref_root *delayed_refs,
1535                                 struct btrfs_qgroup_extent_record *record)
1536 {
1537         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1538         struct rb_node *parent_node = NULL;
1539         struct btrfs_qgroup_extent_record *entry;
1540         u64 bytenr = record->bytenr;
1541
1542         lockdep_assert_held(&delayed_refs->lock);
1543         trace_btrfs_qgroup_trace_extent(fs_info, record);
1544
1545         while (*p) {
1546                 parent_node = *p;
1547                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1548                                  node);
1549                 if (bytenr < entry->bytenr)
1550                         p = &(*p)->rb_left;
1551                 else if (bytenr > entry->bytenr)
1552                         p = &(*p)->rb_right;
1553                 else
1554                         return 1;
1555         }
1556
1557         rb_link_node(&record->node, parent_node, p);
1558         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1559         return 0;
1560 }
1561
1562 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1563                                    struct btrfs_qgroup_extent_record *qrecord)
1564 {
1565         struct ulist *old_root;
1566         u64 bytenr = qrecord->bytenr;
1567         int ret;
1568
1569         ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1570         if (ret < 0) {
1571                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1572                 btrfs_warn(fs_info,
1573 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1574                         ret);
1575                 return 0;
1576         }
1577
1578         /*
1579          * Here we don't need to get the lock of
1580          * trans->transaction->delayed_refs, since inserted qrecord won't
1581          * be deleted, only qrecord->node may be modified (new qrecord insert)
1582          *
1583          * So modifying qrecord->old_roots is safe here
1584          */
1585         qrecord->old_roots = old_root;
1586         return 0;
1587 }
1588
1589 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1590                               u64 num_bytes, gfp_t gfp_flag)
1591 {
1592         struct btrfs_fs_info *fs_info = trans->fs_info;
1593         struct btrfs_qgroup_extent_record *record;
1594         struct btrfs_delayed_ref_root *delayed_refs;
1595         int ret;
1596
1597         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1598             || bytenr == 0 || num_bytes == 0)
1599                 return 0;
1600         record = kmalloc(sizeof(*record), gfp_flag);
1601         if (!record)
1602                 return -ENOMEM;
1603
1604         delayed_refs = &trans->transaction->delayed_refs;
1605         record->bytenr = bytenr;
1606         record->num_bytes = num_bytes;
1607         record->old_roots = NULL;
1608
1609         spin_lock(&delayed_refs->lock);
1610         ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1611         spin_unlock(&delayed_refs->lock);
1612         if (ret > 0) {
1613                 kfree(record);
1614                 return 0;
1615         }
1616         return btrfs_qgroup_trace_extent_post(fs_info, record);
1617 }
1618
1619 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1620                                   struct extent_buffer *eb)
1621 {
1622         struct btrfs_fs_info *fs_info = trans->fs_info;
1623         int nr = btrfs_header_nritems(eb);
1624         int i, extent_type, ret;
1625         struct btrfs_key key;
1626         struct btrfs_file_extent_item *fi;
1627         u64 bytenr, num_bytes;
1628
1629         /* We can be called directly from walk_up_proc() */
1630         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1631                 return 0;
1632
1633         for (i = 0; i < nr; i++) {
1634                 btrfs_item_key_to_cpu(eb, &key, i);
1635
1636                 if (key.type != BTRFS_EXTENT_DATA_KEY)
1637                         continue;
1638
1639                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1640                 /* filter out non qgroup-accountable extents  */
1641                 extent_type = btrfs_file_extent_type(eb, fi);
1642
1643                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1644                         continue;
1645
1646                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1647                 if (!bytenr)
1648                         continue;
1649
1650                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1651
1652                 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1653                                                 GFP_NOFS);
1654                 if (ret)
1655                         return ret;
1656         }
1657         cond_resched();
1658         return 0;
1659 }
1660
1661 /*
1662  * Walk up the tree from the bottom, freeing leaves and any interior
1663  * nodes which have had all slots visited. If a node (leaf or
1664  * interior) is freed, the node above it will have it's slot
1665  * incremented. The root node will never be freed.
1666  *
1667  * At the end of this function, we should have a path which has all
1668  * slots incremented to the next position for a search. If we need to
1669  * read a new node it will be NULL and the node above it will have the
1670  * correct slot selected for a later read.
1671  *
1672  * If we increment the root nodes slot counter past the number of
1673  * elements, 1 is returned to signal completion of the search.
1674  */
1675 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1676 {
1677         int level = 0;
1678         int nr, slot;
1679         struct extent_buffer *eb;
1680
1681         if (root_level == 0)
1682                 return 1;
1683
1684         while (level <= root_level) {
1685                 eb = path->nodes[level];
1686                 nr = btrfs_header_nritems(eb);
1687                 path->slots[level]++;
1688                 slot = path->slots[level];
1689                 if (slot >= nr || level == 0) {
1690                         /*
1691                          * Don't free the root -  we will detect this
1692                          * condition after our loop and return a
1693                          * positive value for caller to stop walking the tree.
1694                          */
1695                         if (level != root_level) {
1696                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
1697                                 path->locks[level] = 0;
1698
1699                                 free_extent_buffer(eb);
1700                                 path->nodes[level] = NULL;
1701                                 path->slots[level] = 0;
1702                         }
1703                 } else {
1704                         /*
1705                          * We have a valid slot to walk back down
1706                          * from. Stop here so caller can process these
1707                          * new nodes.
1708                          */
1709                         break;
1710                 }
1711
1712                 level++;
1713         }
1714
1715         eb = path->nodes[root_level];
1716         if (path->slots[root_level] >= btrfs_header_nritems(eb))
1717                 return 1;
1718
1719         return 0;
1720 }
1721
1722 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1723                                struct extent_buffer *root_eb,
1724                                u64 root_gen, int root_level)
1725 {
1726         struct btrfs_fs_info *fs_info = trans->fs_info;
1727         int ret = 0;
1728         int level;
1729         struct extent_buffer *eb = root_eb;
1730         struct btrfs_path *path = NULL;
1731
1732         BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
1733         BUG_ON(root_eb == NULL);
1734
1735         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1736                 return 0;
1737
1738         if (!extent_buffer_uptodate(root_eb)) {
1739                 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
1740                 if (ret)
1741                         goto out;
1742         }
1743
1744         if (root_level == 0) {
1745                 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
1746                 goto out;
1747         }
1748
1749         path = btrfs_alloc_path();
1750         if (!path)
1751                 return -ENOMEM;
1752
1753         /*
1754          * Walk down the tree.  Missing extent blocks are filled in as
1755          * we go. Metadata is accounted every time we read a new
1756          * extent block.
1757          *
1758          * When we reach a leaf, we account for file extent items in it,
1759          * walk back up the tree (adjusting slot pointers as we go)
1760          * and restart the search process.
1761          */
1762         extent_buffer_get(root_eb); /* For path */
1763         path->nodes[root_level] = root_eb;
1764         path->slots[root_level] = 0;
1765         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
1766 walk_down:
1767         level = root_level;
1768         while (level >= 0) {
1769                 if (path->nodes[level] == NULL) {
1770                         struct btrfs_key first_key;
1771                         int parent_slot;
1772                         u64 child_gen;
1773                         u64 child_bytenr;
1774
1775                         /*
1776                          * We need to get child blockptr/gen from parent before
1777                          * we can read it.
1778                           */
1779                         eb = path->nodes[level + 1];
1780                         parent_slot = path->slots[level + 1];
1781                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1782                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1783                         btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1784
1785                         eb = read_tree_block(fs_info, child_bytenr, child_gen,
1786                                              level, &first_key);
1787                         if (IS_ERR(eb)) {
1788                                 ret = PTR_ERR(eb);
1789                                 goto out;
1790                         } else if (!extent_buffer_uptodate(eb)) {
1791                                 free_extent_buffer(eb);
1792                                 ret = -EIO;
1793                                 goto out;
1794                         }
1795
1796                         path->nodes[level] = eb;
1797                         path->slots[level] = 0;
1798
1799                         btrfs_tree_read_lock(eb);
1800                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1801                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1802
1803                         ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
1804                                                         fs_info->nodesize,
1805                                                         GFP_NOFS);
1806                         if (ret)
1807                                 goto out;
1808                 }
1809
1810                 if (level == 0) {
1811                         ret = btrfs_qgroup_trace_leaf_items(trans,
1812                                                             path->nodes[level]);
1813                         if (ret)
1814                                 goto out;
1815
1816                         /* Nonzero return here means we completed our search */
1817                         ret = adjust_slots_upwards(path, root_level);
1818                         if (ret)
1819                                 break;
1820
1821                         /* Restart search with new slots */
1822                         goto walk_down;
1823                 }
1824
1825                 level--;
1826         }
1827
1828         ret = 0;
1829 out:
1830         btrfs_free_path(path);
1831
1832         return ret;
1833 }
1834
1835 #define UPDATE_NEW      0
1836 #define UPDATE_OLD      1
1837 /*
1838  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1839  */
1840 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1841                                 struct ulist *roots, struct ulist *tmp,
1842                                 struct ulist *qgroups, u64 seq, int update_old)
1843 {
1844         struct ulist_node *unode;
1845         struct ulist_iterator uiter;
1846         struct ulist_node *tmp_unode;
1847         struct ulist_iterator tmp_uiter;
1848         struct btrfs_qgroup *qg;
1849         int ret = 0;
1850
1851         if (!roots)
1852                 return 0;
1853         ULIST_ITER_INIT(&uiter);
1854         while ((unode = ulist_next(roots, &uiter))) {
1855                 qg = find_qgroup_rb(fs_info, unode->val);
1856                 if (!qg)
1857                         continue;
1858
1859                 ulist_reinit(tmp);
1860                 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
1861                                 GFP_ATOMIC);
1862                 if (ret < 0)
1863                         return ret;
1864                 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
1865                 if (ret < 0)
1866                         return ret;
1867                 ULIST_ITER_INIT(&tmp_uiter);
1868                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1869                         struct btrfs_qgroup_list *glist;
1870
1871                         qg = unode_aux_to_qgroup(tmp_unode);
1872                         if (update_old)
1873                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1874                         else
1875                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1876                         list_for_each_entry(glist, &qg->groups, next_group) {
1877                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1878                                                 qgroup_to_aux(glist->group),
1879                                                 GFP_ATOMIC);
1880                                 if (ret < 0)
1881                                         return ret;
1882                                 ret = ulist_add(tmp, glist->group->qgroupid,
1883                                                 qgroup_to_aux(glist->group),
1884                                                 GFP_ATOMIC);
1885                                 if (ret < 0)
1886                                         return ret;
1887                         }
1888                 }
1889         }
1890         return 0;
1891 }
1892
1893 /*
1894  * Update qgroup rfer/excl counters.
1895  * Rfer update is easy, codes can explain themselves.
1896  *
1897  * Excl update is tricky, the update is split into 2 part.
1898  * Part 1: Possible exclusive <-> sharing detect:
1899  *      |       A       |       !A      |
1900  *  -------------------------------------
1901  *  B   |       *       |       -       |
1902  *  -------------------------------------
1903  *  !B  |       +       |       **      |
1904  *  -------------------------------------
1905  *
1906  * Conditions:
1907  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1908  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1909  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1910  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1911  *
1912  * Results:
1913  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1914  * *: Definitely not changed.           **: Possible unchanged.
1915  *
1916  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1917  *
1918  * To make the logic clear, we first use condition A and B to split
1919  * combination into 4 results.
1920  *
1921  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1922  * only on variant maybe 0.
1923  *
1924  * Lastly, check result **, since there are 2 variants maybe 0, split them
1925  * again(2x2).
1926  * But this time we don't need to consider other things, the codes and logic
1927  * is easy to understand now.
1928  */
1929 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1930                                   struct ulist *qgroups,
1931                                   u64 nr_old_roots,
1932                                   u64 nr_new_roots,
1933                                   u64 num_bytes, u64 seq)
1934 {
1935         struct ulist_node *unode;
1936         struct ulist_iterator uiter;
1937         struct btrfs_qgroup *qg;
1938         u64 cur_new_count, cur_old_count;
1939
1940         ULIST_ITER_INIT(&uiter);
1941         while ((unode = ulist_next(qgroups, &uiter))) {
1942                 bool dirty = false;
1943
1944                 qg = unode_aux_to_qgroup(unode);
1945                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1946                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1947
1948                 trace_qgroup_update_counters(fs_info, qg, cur_old_count,
1949                                              cur_new_count);
1950
1951                 /* Rfer update part */
1952                 if (cur_old_count == 0 && cur_new_count > 0) {
1953                         qg->rfer += num_bytes;
1954                         qg->rfer_cmpr += num_bytes;
1955                         dirty = true;
1956                 }
1957                 if (cur_old_count > 0 && cur_new_count == 0) {
1958                         qg->rfer -= num_bytes;
1959                         qg->rfer_cmpr -= num_bytes;
1960                         dirty = true;
1961                 }
1962
1963                 /* Excl update part */
1964                 /* Exclusive/none -> shared case */
1965                 if (cur_old_count == nr_old_roots &&
1966                     cur_new_count < nr_new_roots) {
1967                         /* Exclusive -> shared */
1968                         if (cur_old_count != 0) {
1969                                 qg->excl -= num_bytes;
1970                                 qg->excl_cmpr -= num_bytes;
1971                                 dirty = true;
1972                         }
1973                 }
1974
1975                 /* Shared -> exclusive/none case */
1976                 if (cur_old_count < nr_old_roots &&
1977                     cur_new_count == nr_new_roots) {
1978                         /* Shared->exclusive */
1979                         if (cur_new_count != 0) {
1980                                 qg->excl += num_bytes;
1981                                 qg->excl_cmpr += num_bytes;
1982                                 dirty = true;
1983                         }
1984                 }
1985
1986                 /* Exclusive/none -> exclusive/none case */
1987                 if (cur_old_count == nr_old_roots &&
1988                     cur_new_count == nr_new_roots) {
1989                         if (cur_old_count == 0) {
1990                                 /* None -> exclusive/none */
1991
1992                                 if (cur_new_count != 0) {
1993                                         /* None -> exclusive */
1994                                         qg->excl += num_bytes;
1995                                         qg->excl_cmpr += num_bytes;
1996                                         dirty = true;
1997                                 }
1998                                 /* None -> none, nothing changed */
1999                         } else {
2000                                 /* Exclusive -> exclusive/none */
2001
2002                                 if (cur_new_count == 0) {
2003                                         /* Exclusive -> none */
2004                                         qg->excl -= num_bytes;
2005                                         qg->excl_cmpr -= num_bytes;
2006                                         dirty = true;
2007                                 }
2008                                 /* Exclusive -> exclusive, nothing changed */
2009                         }
2010                 }
2011
2012                 if (dirty)
2013                         qgroup_dirty(fs_info, qg);
2014         }
2015         return 0;
2016 }
2017
2018 /*
2019  * Check if the @roots potentially is a list of fs tree roots
2020  *
2021  * Return 0 for definitely not a fs/subvol tree roots ulist
2022  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2023  *          one as well)
2024  */
2025 static int maybe_fs_roots(struct ulist *roots)
2026 {
2027         struct ulist_node *unode;
2028         struct ulist_iterator uiter;
2029
2030         /* Empty one, still possible for fs roots */
2031         if (!roots || roots->nnodes == 0)
2032                 return 1;
2033
2034         ULIST_ITER_INIT(&uiter);
2035         unode = ulist_next(roots, &uiter);
2036         if (!unode)
2037                 return 1;
2038
2039         /*
2040          * If it contains fs tree roots, then it must belong to fs/subvol
2041          * trees.
2042          * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2043          */
2044         return is_fstree(unode->val);
2045 }
2046
2047 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2048                                 u64 num_bytes, struct ulist *old_roots,
2049                                 struct ulist *new_roots)
2050 {
2051         struct btrfs_fs_info *fs_info = trans->fs_info;
2052         struct ulist *qgroups = NULL;
2053         struct ulist *tmp = NULL;
2054         u64 seq;
2055         u64 nr_new_roots = 0;
2056         u64 nr_old_roots = 0;
2057         int ret = 0;
2058
2059         /*
2060          * If quotas get disabled meanwhile, the resouces need to be freed and
2061          * we can't just exit here.
2062          */
2063         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2064                 goto out_free;
2065
2066         if (new_roots) {
2067                 if (!maybe_fs_roots(new_roots))
2068                         goto out_free;
2069                 nr_new_roots = new_roots->nnodes;
2070         }
2071         if (old_roots) {
2072                 if (!maybe_fs_roots(old_roots))
2073                         goto out_free;
2074                 nr_old_roots = old_roots->nnodes;
2075         }
2076
2077         /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2078         if (nr_old_roots == 0 && nr_new_roots == 0)
2079                 goto out_free;
2080
2081         BUG_ON(!fs_info->quota_root);
2082
2083         trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2084                                         num_bytes, nr_old_roots, nr_new_roots);
2085
2086         qgroups = ulist_alloc(GFP_NOFS);
2087         if (!qgroups) {
2088                 ret = -ENOMEM;
2089                 goto out_free;
2090         }
2091         tmp = ulist_alloc(GFP_NOFS);
2092         if (!tmp) {
2093                 ret = -ENOMEM;
2094                 goto out_free;
2095         }
2096
2097         mutex_lock(&fs_info->qgroup_rescan_lock);
2098         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2099                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2100                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2101                         ret = 0;
2102                         goto out_free;
2103                 }
2104         }
2105         mutex_unlock(&fs_info->qgroup_rescan_lock);
2106
2107         spin_lock(&fs_info->qgroup_lock);
2108         seq = fs_info->qgroup_seq;
2109
2110         /* Update old refcnts using old_roots */
2111         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2112                                    UPDATE_OLD);
2113         if (ret < 0)
2114                 goto out;
2115
2116         /* Update new refcnts using new_roots */
2117         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2118                                    UPDATE_NEW);
2119         if (ret < 0)
2120                 goto out;
2121
2122         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2123                                num_bytes, seq);
2124
2125         /*
2126          * Bump qgroup_seq to avoid seq overlap
2127          */
2128         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2129 out:
2130         spin_unlock(&fs_info->qgroup_lock);
2131 out_free:
2132         ulist_free(tmp);
2133         ulist_free(qgroups);
2134         ulist_free(old_roots);
2135         ulist_free(new_roots);
2136         return ret;
2137 }
2138
2139 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2140 {
2141         struct btrfs_fs_info *fs_info = trans->fs_info;
2142         struct btrfs_qgroup_extent_record *record;
2143         struct btrfs_delayed_ref_root *delayed_refs;
2144         struct ulist *new_roots = NULL;
2145         struct rb_node *node;
2146         u64 qgroup_to_skip;
2147         int ret = 0;
2148
2149         delayed_refs = &trans->transaction->delayed_refs;
2150         qgroup_to_skip = delayed_refs->qgroup_to_skip;
2151         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2152                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2153                                   node);
2154
2155                 trace_btrfs_qgroup_account_extents(fs_info, record);
2156
2157                 if (!ret) {
2158                         /*
2159                          * Old roots should be searched when inserting qgroup
2160                          * extent record
2161                          */
2162                         if (WARN_ON(!record->old_roots)) {
2163                                 /* Search commit root to find old_roots */
2164                                 ret = btrfs_find_all_roots(NULL, fs_info,
2165                                                 record->bytenr, 0,
2166                                                 &record->old_roots, false);
2167                                 if (ret < 0)
2168                                         goto cleanup;
2169                         }
2170
2171                         /*
2172                          * Use SEQ_LAST as time_seq to do special search, which
2173                          * doesn't lock tree or delayed_refs and search current
2174                          * root. It's safe inside commit_transaction().
2175                          */
2176                         ret = btrfs_find_all_roots(trans, fs_info,
2177                                 record->bytenr, SEQ_LAST, &new_roots, false);
2178                         if (ret < 0)
2179                                 goto cleanup;
2180                         if (qgroup_to_skip) {
2181                                 ulist_del(new_roots, qgroup_to_skip, 0);
2182                                 ulist_del(record->old_roots, qgroup_to_skip,
2183                                           0);
2184                         }
2185                         ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2186                                                           record->num_bytes,
2187                                                           record->old_roots,
2188                                                           new_roots);
2189                         record->old_roots = NULL;
2190                         new_roots = NULL;
2191                 }
2192 cleanup:
2193                 ulist_free(record->old_roots);
2194                 ulist_free(new_roots);
2195                 new_roots = NULL;
2196                 rb_erase(node, &delayed_refs->dirty_extent_root);
2197                 kfree(record);
2198
2199         }
2200         return ret;
2201 }
2202
2203 /*
2204  * called from commit_transaction. Writes all changed qgroups to disk.
2205  */
2206 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2207 {
2208         struct btrfs_fs_info *fs_info = trans->fs_info;
2209         struct btrfs_root *quota_root = fs_info->quota_root;
2210         int ret = 0;
2211
2212         if (!quota_root)
2213                 return ret;
2214
2215         spin_lock(&fs_info->qgroup_lock);
2216         while (!list_empty(&fs_info->dirty_qgroups)) {
2217                 struct btrfs_qgroup *qgroup;
2218                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2219                                           struct btrfs_qgroup, dirty);
2220                 list_del_init(&qgroup->dirty);
2221                 spin_unlock(&fs_info->qgroup_lock);
2222                 ret = update_qgroup_info_item(trans, qgroup);
2223                 if (ret)
2224                         fs_info->qgroup_flags |=
2225                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2226                 ret = update_qgroup_limit_item(trans, qgroup);
2227                 if (ret)
2228                         fs_info->qgroup_flags |=
2229                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2230                 spin_lock(&fs_info->qgroup_lock);
2231         }
2232         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2233                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2234         else
2235                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2236         spin_unlock(&fs_info->qgroup_lock);
2237
2238         ret = update_qgroup_status_item(trans);
2239         if (ret)
2240                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2241
2242         return ret;
2243 }
2244
2245 /*
2246  * Copy the accounting information between qgroups. This is necessary
2247  * when a snapshot or a subvolume is created. Throwing an error will
2248  * cause a transaction abort so we take extra care here to only error
2249  * when a readonly fs is a reasonable outcome.
2250  */
2251 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2252                          u64 objectid, struct btrfs_qgroup_inherit *inherit)
2253 {
2254         int ret = 0;
2255         int i;
2256         u64 *i_qgroups;
2257         bool committing = false;
2258         struct btrfs_fs_info *fs_info = trans->fs_info;
2259         struct btrfs_root *quota_root;
2260         struct btrfs_qgroup *srcgroup;
2261         struct btrfs_qgroup *dstgroup;
2262         bool need_rescan = false;
2263         u32 level_size = 0;
2264         u64 nums;
2265
2266         /*
2267          * There are only two callers of this function.
2268          *
2269          * One in create_subvol() in the ioctl context, which needs to hold
2270          * the qgroup_ioctl_lock.
2271          *
2272          * The other one in create_pending_snapshot() where no other qgroup
2273          * code can modify the fs as they all need to either start a new trans
2274          * or hold a trans handler, thus we don't need to hold
2275          * qgroup_ioctl_lock.
2276          * This would avoid long and complex lock chain and make lockdep happy.
2277          */
2278         spin_lock(&fs_info->trans_lock);
2279         if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2280                 committing = true;
2281         spin_unlock(&fs_info->trans_lock);
2282
2283         if (!committing)
2284                 mutex_lock(&fs_info->qgroup_ioctl_lock);
2285         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2286                 goto out;
2287
2288         quota_root = fs_info->quota_root;
2289         if (!quota_root) {
2290                 ret = -EINVAL;
2291                 goto out;
2292         }
2293
2294         if (inherit) {
2295                 i_qgroups = (u64 *)(inherit + 1);
2296                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2297                        2 * inherit->num_excl_copies;
2298                 for (i = 0; i < nums; ++i) {
2299                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2300
2301                         /*
2302                          * Zero out invalid groups so we can ignore
2303                          * them later.
2304                          */
2305                         if (!srcgroup ||
2306                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2307                                 *i_qgroups = 0ULL;
2308
2309                         ++i_qgroups;
2310                 }
2311         }
2312
2313         /*
2314          * create a tracking group for the subvol itself
2315          */
2316         ret = add_qgroup_item(trans, quota_root, objectid);
2317         if (ret)
2318                 goto out;
2319
2320         /*
2321          * add qgroup to all inherited groups
2322          */
2323         if (inherit) {
2324                 i_qgroups = (u64 *)(inherit + 1);
2325                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2326                         if (*i_qgroups == 0)
2327                                 continue;
2328                         ret = add_qgroup_relation_item(trans, objectid,
2329                                                        *i_qgroups);
2330                         if (ret && ret != -EEXIST)
2331                                 goto out;
2332                         ret = add_qgroup_relation_item(trans, *i_qgroups,
2333                                                        objectid);
2334                         if (ret && ret != -EEXIST)
2335                                 goto out;
2336                 }
2337                 ret = 0;
2338         }
2339
2340
2341         spin_lock(&fs_info->qgroup_lock);
2342
2343         dstgroup = add_qgroup_rb(fs_info, objectid);
2344         if (IS_ERR(dstgroup)) {
2345                 ret = PTR_ERR(dstgroup);
2346                 goto unlock;
2347         }
2348
2349         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2350                 dstgroup->lim_flags = inherit->lim.flags;
2351                 dstgroup->max_rfer = inherit->lim.max_rfer;
2352                 dstgroup->max_excl = inherit->lim.max_excl;
2353                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2354                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2355
2356                 ret = update_qgroup_limit_item(trans, dstgroup);
2357                 if (ret) {
2358                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2359                         btrfs_info(fs_info,
2360                                    "unable to update quota limit for %llu",
2361                                    dstgroup->qgroupid);
2362                         goto unlock;
2363                 }
2364         }
2365
2366         if (srcid) {
2367                 srcgroup = find_qgroup_rb(fs_info, srcid);
2368                 if (!srcgroup)
2369                         goto unlock;
2370
2371                 /*
2372                  * We call inherit after we clone the root in order to make sure
2373                  * our counts don't go crazy, so at this point the only
2374                  * difference between the two roots should be the root node.
2375                  */
2376                 level_size = fs_info->nodesize;
2377                 dstgroup->rfer = srcgroup->rfer;
2378                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2379                 dstgroup->excl = level_size;
2380                 dstgroup->excl_cmpr = level_size;
2381                 srcgroup->excl = level_size;
2382                 srcgroup->excl_cmpr = level_size;
2383
2384                 /* inherit the limit info */
2385                 dstgroup->lim_flags = srcgroup->lim_flags;
2386                 dstgroup->max_rfer = srcgroup->max_rfer;
2387                 dstgroup->max_excl = srcgroup->max_excl;
2388                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2389                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2390
2391                 qgroup_dirty(fs_info, dstgroup);
2392                 qgroup_dirty(fs_info, srcgroup);
2393         }
2394
2395         if (!inherit)
2396                 goto unlock;
2397
2398         i_qgroups = (u64 *)(inherit + 1);
2399         for (i = 0; i < inherit->num_qgroups; ++i) {
2400                 if (*i_qgroups) {
2401                         ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2402                         if (ret)
2403                                 goto unlock;
2404                 }
2405                 ++i_qgroups;
2406
2407                 /*
2408                  * If we're doing a snapshot, and adding the snapshot to a new
2409                  * qgroup, the numbers are guaranteed to be incorrect.
2410                  */
2411                 if (srcid)
2412                         need_rescan = true;
2413         }
2414
2415         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2416                 struct btrfs_qgroup *src;
2417                 struct btrfs_qgroup *dst;
2418
2419                 if (!i_qgroups[0] || !i_qgroups[1])
2420                         continue;
2421
2422                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2423                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2424
2425                 if (!src || !dst) {
2426                         ret = -EINVAL;
2427                         goto unlock;
2428                 }
2429
2430                 dst->rfer = src->rfer - level_size;
2431                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2432
2433                 /* Manually tweaking numbers certainly needs a rescan */
2434                 need_rescan = true;
2435         }
2436         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2437                 struct btrfs_qgroup *src;
2438                 struct btrfs_qgroup *dst;
2439
2440                 if (!i_qgroups[0] || !i_qgroups[1])
2441                         continue;
2442
2443                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2444                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2445
2446                 if (!src || !dst) {
2447                         ret = -EINVAL;
2448                         goto unlock;
2449                 }
2450
2451                 dst->excl = src->excl + level_size;
2452                 dst->excl_cmpr = src->excl_cmpr + level_size;
2453                 need_rescan = true;
2454         }
2455
2456 unlock:
2457         spin_unlock(&fs_info->qgroup_lock);
2458 out:
2459         if (!committing)
2460                 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2461         if (need_rescan)
2462                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2463         return ret;
2464 }
2465
2466 /*
2467  * Two limits to commit transaction in advance.
2468  *
2469  * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
2470  * For SIZE, it will be in byte unit as threshold.
2471  */
2472 #define QGROUP_FREE_RATIO               32
2473 #define QGROUP_FREE_SIZE                SZ_32M
2474 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
2475                                 const struct btrfs_qgroup *qg, u64 num_bytes)
2476 {
2477         u64 free;
2478         u64 threshold;
2479
2480         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2481             qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2482                 return false;
2483
2484         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2485             qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2486                 return false;
2487
2488         /*
2489          * Even if we passed the check, it's better to check if reservation
2490          * for meta_pertrans is pushing us near limit.
2491          * If there is too much pertrans reservation or it's near the limit,
2492          * let's try commit transaction to free some, using transaction_kthread
2493          */
2494         if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
2495                               BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
2496                 if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
2497                         free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
2498                         threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
2499                                           QGROUP_FREE_SIZE);
2500                 } else {
2501                         free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
2502                         threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
2503                                           QGROUP_FREE_SIZE);
2504                 }
2505
2506                 /*
2507                  * Use transaction_kthread to commit transaction, so we no
2508                  * longer need to bother nested transaction nor lock context.
2509                  */
2510                 if (free < threshold)
2511                         btrfs_commit_transaction_locksafe(fs_info);
2512         }
2513
2514         return true;
2515 }
2516
2517 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2518                           enum btrfs_qgroup_rsv_type type)
2519 {
2520         struct btrfs_root *quota_root;
2521         struct btrfs_qgroup *qgroup;
2522         struct btrfs_fs_info *fs_info = root->fs_info;
2523         u64 ref_root = root->root_key.objectid;
2524         int ret = 0;
2525         struct ulist_node *unode;
2526         struct ulist_iterator uiter;
2527
2528         if (!is_fstree(ref_root))
2529                 return 0;
2530
2531         if (num_bytes == 0)
2532                 return 0;
2533
2534         if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2535             capable(CAP_SYS_RESOURCE))
2536                 enforce = false;
2537
2538         spin_lock(&fs_info->qgroup_lock);
2539         quota_root = fs_info->quota_root;
2540         if (!quota_root)
2541                 goto out;
2542
2543         qgroup = find_qgroup_rb(fs_info, ref_root);
2544         if (!qgroup)
2545                 goto out;
2546
2547         /*
2548          * in a first step, we check all affected qgroups if any limits would
2549          * be exceeded
2550          */
2551         ulist_reinit(fs_info->qgroup_ulist);
2552         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2553                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2554         if (ret < 0)
2555                 goto out;
2556         ULIST_ITER_INIT(&uiter);
2557         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2558                 struct btrfs_qgroup *qg;
2559                 struct btrfs_qgroup_list *glist;
2560
2561                 qg = unode_aux_to_qgroup(unode);
2562
2563                 if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
2564                         ret = -EDQUOT;
2565                         goto out;
2566                 }
2567
2568                 list_for_each_entry(glist, &qg->groups, next_group) {
2569                         ret = ulist_add(fs_info->qgroup_ulist,
2570                                         glist->group->qgroupid,
2571                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2572                         if (ret < 0)
2573                                 goto out;
2574                 }
2575         }
2576         ret = 0;
2577         /*
2578          * no limits exceeded, now record the reservation into all qgroups
2579          */
2580         ULIST_ITER_INIT(&uiter);
2581         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2582                 struct btrfs_qgroup *qg;
2583
2584                 qg = unode_aux_to_qgroup(unode);
2585
2586                 trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
2587                 qgroup_rsv_add(fs_info, qg, num_bytes, type);
2588         }
2589
2590 out:
2591         spin_unlock(&fs_info->qgroup_lock);
2592         return ret;
2593 }
2594
2595 /*
2596  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
2597  * qgroup).
2598  *
2599  * Will handle all higher level qgroup too.
2600  *
2601  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2602  * This special case is only used for META_PERTRANS type.
2603  */
2604 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2605                                u64 ref_root, u64 num_bytes,
2606                                enum btrfs_qgroup_rsv_type type)
2607 {
2608         struct btrfs_root *quota_root;
2609         struct btrfs_qgroup *qgroup;
2610         struct ulist_node *unode;
2611         struct ulist_iterator uiter;
2612         int ret = 0;
2613
2614         if (!is_fstree(ref_root))
2615                 return;
2616
2617         if (num_bytes == 0)
2618                 return;
2619
2620         if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
2621                 WARN(1, "%s: Invalid type to free", __func__);
2622                 return;
2623         }
2624         spin_lock(&fs_info->qgroup_lock);
2625
2626         quota_root = fs_info->quota_root;
2627         if (!quota_root)
2628                 goto out;
2629
2630         qgroup = find_qgroup_rb(fs_info, ref_root);
2631         if (!qgroup)
2632                 goto out;
2633
2634         if (num_bytes == (u64)-1)
2635                 /*
2636                  * We're freeing all pertrans rsv, get reserved value from
2637                  * level 0 qgroup as real num_bytes to free.
2638                  */
2639                 num_bytes = qgroup->rsv.values[type];
2640
2641         ulist_reinit(fs_info->qgroup_ulist);
2642         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2643                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2644         if (ret < 0)
2645                 goto out;
2646         ULIST_ITER_INIT(&uiter);
2647         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2648                 struct btrfs_qgroup *qg;
2649                 struct btrfs_qgroup_list *glist;
2650
2651                 qg = unode_aux_to_qgroup(unode);
2652
2653                 trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
2654                 qgroup_rsv_release(fs_info, qg, num_bytes, type);
2655
2656                 list_for_each_entry(glist, &qg->groups, next_group) {
2657                         ret = ulist_add(fs_info->qgroup_ulist,
2658                                         glist->group->qgroupid,
2659                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2660                         if (ret < 0)
2661                                 goto out;
2662                 }
2663         }
2664
2665 out:
2666         spin_unlock(&fs_info->qgroup_lock);
2667 }
2668
2669 /*
2670  * Check if the leaf is the last leaf. Which means all node pointers
2671  * are at their last position.
2672  */
2673 static bool is_last_leaf(struct btrfs_path *path)
2674 {
2675         int i;
2676
2677         for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
2678                 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
2679                         return false;
2680         }
2681         return true;
2682 }
2683
2684 /*
2685  * returns < 0 on error, 0 when more leafs are to be scanned.
2686  * returns 1 when done.
2687  */
2688 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
2689                               struct btrfs_path *path)
2690 {
2691         struct btrfs_fs_info *fs_info = trans->fs_info;
2692         struct btrfs_key found;
2693         struct extent_buffer *scratch_leaf = NULL;
2694         struct ulist *roots = NULL;
2695         u64 num_bytes;
2696         bool done;
2697         int slot;
2698         int ret;
2699
2700         mutex_lock(&fs_info->qgroup_rescan_lock);
2701         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2702                                          &fs_info->qgroup_rescan_progress,
2703                                          path, 1, 0);
2704
2705         btrfs_debug(fs_info,
2706                 "current progress key (%llu %u %llu), search_slot ret %d",
2707                 fs_info->qgroup_rescan_progress.objectid,
2708                 fs_info->qgroup_rescan_progress.type,
2709                 fs_info->qgroup_rescan_progress.offset, ret);
2710
2711         if (ret) {
2712                 /*
2713                  * The rescan is about to end, we will not be scanning any
2714                  * further blocks. We cannot unset the RESCAN flag here, because
2715                  * we want to commit the transaction if everything went well.
2716                  * To make the live accounting work in this phase, we set our
2717                  * scan progress pointer such that every real extent objectid
2718                  * will be smaller.
2719                  */
2720                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2721                 btrfs_release_path(path);
2722                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2723                 return ret;
2724         }
2725         done = is_last_leaf(path);
2726
2727         btrfs_item_key_to_cpu(path->nodes[0], &found,
2728                               btrfs_header_nritems(path->nodes[0]) - 1);
2729         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2730
2731         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2732         if (!scratch_leaf) {
2733                 ret = -ENOMEM;
2734                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2735                 goto out;
2736         }
2737         extent_buffer_get(scratch_leaf);
2738         btrfs_tree_read_lock(scratch_leaf);
2739         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2740         slot = path->slots[0];
2741         btrfs_release_path(path);
2742         mutex_unlock(&fs_info->qgroup_rescan_lock);
2743
2744         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2745                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2746                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2747                     found.type != BTRFS_METADATA_ITEM_KEY)
2748                         continue;
2749                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2750                         num_bytes = fs_info->nodesize;
2751                 else
2752                         num_bytes = found.offset;
2753
2754                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2755                                            &roots, false);
2756                 if (ret < 0)
2757                         goto out;
2758                 /* For rescan, just pass old_roots as NULL */
2759                 ret = btrfs_qgroup_account_extent(trans, found.objectid,
2760                                                   num_bytes, NULL, roots);
2761                 if (ret < 0)
2762                         goto out;
2763         }
2764 out:
2765         if (scratch_leaf) {
2766                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2767                 free_extent_buffer(scratch_leaf);
2768         }
2769
2770         if (done && !ret) {
2771                 ret = 1;
2772                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2773         }
2774         return ret;
2775 }
2776
2777 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
2778 {
2779         return btrfs_fs_closing(fs_info) ||
2780                 test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2781 }
2782
2783 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2784 {
2785         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2786                                                      qgroup_rescan_work);
2787         struct btrfs_path *path;
2788         struct btrfs_trans_handle *trans = NULL;
2789         int err = -ENOMEM;
2790         int ret = 0;
2791         bool stopped = false;
2792
2793         path = btrfs_alloc_path();
2794         if (!path)
2795                 goto out;
2796         /*
2797          * Rescan should only search for commit root, and any later difference
2798          * should be recorded by qgroup
2799          */
2800         path->search_commit_root = 1;
2801         path->skip_locking = 1;
2802
2803         err = 0;
2804         while (!err && !(stopped = rescan_should_stop(fs_info))) {
2805                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2806                 if (IS_ERR(trans)) {
2807                         err = PTR_ERR(trans);
2808                         break;
2809                 }
2810                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2811                         err = -EINTR;
2812                 } else {
2813                         err = qgroup_rescan_leaf(trans, path);
2814                 }
2815                 if (err > 0)
2816                         btrfs_commit_transaction(trans);
2817                 else
2818                         btrfs_end_transaction(trans);
2819         }
2820
2821 out:
2822         btrfs_free_path(path);
2823
2824         mutex_lock(&fs_info->qgroup_rescan_lock);
2825         if (err > 0 &&
2826             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2827                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2828         } else if (err < 0) {
2829                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2830         }
2831         mutex_unlock(&fs_info->qgroup_rescan_lock);
2832
2833         /*
2834          * only update status, since the previous part has already updated the
2835          * qgroup info.
2836          */
2837         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2838         if (IS_ERR(trans)) {
2839                 err = PTR_ERR(trans);
2840                 trans = NULL;
2841                 btrfs_err(fs_info,
2842                           "fail to start transaction for status update: %d",
2843                           err);
2844         }
2845
2846         mutex_lock(&fs_info->qgroup_rescan_lock);
2847         if (!stopped)
2848                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2849         if (trans) {
2850                 ret = update_qgroup_status_item(trans);
2851                 if (ret < 0) {
2852                         err = ret;
2853                         btrfs_err(fs_info, "fail to update qgroup status: %d",
2854                                   err);
2855                 }
2856         }
2857         fs_info->qgroup_rescan_running = false;
2858         complete_all(&fs_info->qgroup_rescan_completion);
2859         mutex_unlock(&fs_info->qgroup_rescan_lock);
2860
2861         if (!trans)
2862                 return;
2863
2864         btrfs_end_transaction(trans);
2865
2866         if (stopped) {
2867                 btrfs_info(fs_info, "qgroup scan paused");
2868         } else if (err >= 0) {
2869                 btrfs_info(fs_info, "qgroup scan completed%s",
2870                         err > 0 ? " (inconsistency flag cleared)" : "");
2871         } else {
2872                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2873         }
2874 }
2875
2876 /*
2877  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2878  * memory required for the rescan context.
2879  */
2880 static int
2881 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2882                    int init_flags)
2883 {
2884         int ret = 0;
2885
2886         if (!init_flags) {
2887                 /* we're resuming qgroup rescan at mount time */
2888                 if (!(fs_info->qgroup_flags &
2889                       BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
2890                         btrfs_warn(fs_info,
2891                         "qgroup rescan init failed, qgroup rescan is not queued");
2892                         ret = -EINVAL;
2893                 } else if (!(fs_info->qgroup_flags &
2894                              BTRFS_QGROUP_STATUS_FLAG_ON)) {
2895                         btrfs_warn(fs_info,
2896                         "qgroup rescan init failed, qgroup is not enabled");
2897                         ret = -EINVAL;
2898                 }
2899
2900                 if (ret)
2901                         return ret;
2902         }
2903
2904         mutex_lock(&fs_info->qgroup_rescan_lock);
2905         spin_lock(&fs_info->qgroup_lock);
2906
2907         if (init_flags) {
2908                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2909                         btrfs_warn(fs_info,
2910                                    "qgroup rescan is already in progress");
2911                         ret = -EINPROGRESS;
2912                 } else if (!(fs_info->qgroup_flags &
2913                              BTRFS_QGROUP_STATUS_FLAG_ON)) {
2914                         btrfs_warn(fs_info,
2915                         "qgroup rescan init failed, qgroup is not enabled");
2916                         ret = -EINVAL;
2917                 }
2918
2919                 if (ret) {
2920                         spin_unlock(&fs_info->qgroup_lock);
2921                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2922                         return ret;
2923                 }
2924                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2925         }
2926
2927         memset(&fs_info->qgroup_rescan_progress, 0,
2928                 sizeof(fs_info->qgroup_rescan_progress));
2929         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2930         init_completion(&fs_info->qgroup_rescan_completion);
2931
2932         spin_unlock(&fs_info->qgroup_lock);
2933         mutex_unlock(&fs_info->qgroup_rescan_lock);
2934
2935         memset(&fs_info->qgroup_rescan_work, 0,
2936                sizeof(fs_info->qgroup_rescan_work));
2937         btrfs_init_work(&fs_info->qgroup_rescan_work,
2938                         btrfs_qgroup_rescan_helper,
2939                         btrfs_qgroup_rescan_worker, NULL, NULL);
2940         return 0;
2941 }
2942
2943 static void
2944 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2945 {
2946         struct rb_node *n;
2947         struct btrfs_qgroup *qgroup;
2948
2949         spin_lock(&fs_info->qgroup_lock);
2950         /* clear all current qgroup tracking information */
2951         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2952                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2953                 qgroup->rfer = 0;
2954                 qgroup->rfer_cmpr = 0;
2955                 qgroup->excl = 0;
2956                 qgroup->excl_cmpr = 0;
2957                 qgroup_dirty(fs_info, qgroup);
2958         }
2959         spin_unlock(&fs_info->qgroup_lock);
2960 }
2961
2962 int
2963 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2964 {
2965         int ret = 0;
2966         struct btrfs_trans_handle *trans;
2967
2968         ret = qgroup_rescan_init(fs_info, 0, 1);
2969         if (ret)
2970                 return ret;
2971
2972         /*
2973          * We have set the rescan_progress to 0, which means no more
2974          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2975          * However, btrfs_qgroup_account_ref may be right after its call
2976          * to btrfs_find_all_roots, in which case it would still do the
2977          * accounting.
2978          * To solve this, we're committing the transaction, which will
2979          * ensure we run all delayed refs and only after that, we are
2980          * going to clear all tracking information for a clean start.
2981          */
2982
2983         trans = btrfs_join_transaction(fs_info->fs_root);
2984         if (IS_ERR(trans)) {
2985                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2986                 return PTR_ERR(trans);
2987         }
2988         ret = btrfs_commit_transaction(trans);
2989         if (ret) {
2990                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2991                 return ret;
2992         }
2993
2994         qgroup_rescan_zero_tracking(fs_info);
2995
2996         mutex_lock(&fs_info->qgroup_rescan_lock);
2997         fs_info->qgroup_rescan_running = true;
2998         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2999                          &fs_info->qgroup_rescan_work);
3000         mutex_unlock(&fs_info->qgroup_rescan_lock);
3001
3002         return 0;
3003 }
3004
3005 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3006                                      bool interruptible)
3007 {
3008         int running;
3009         int ret = 0;
3010
3011         mutex_lock(&fs_info->qgroup_rescan_lock);
3012         spin_lock(&fs_info->qgroup_lock);
3013         running = fs_info->qgroup_rescan_running;
3014         spin_unlock(&fs_info->qgroup_lock);
3015         mutex_unlock(&fs_info->qgroup_rescan_lock);
3016
3017         if (!running)
3018                 return 0;
3019
3020         if (interruptible)
3021                 ret = wait_for_completion_interruptible(
3022                                         &fs_info->qgroup_rescan_completion);
3023         else
3024                 wait_for_completion(&fs_info->qgroup_rescan_completion);
3025
3026         return ret;
3027 }
3028
3029 /*
3030  * this is only called from open_ctree where we're still single threaded, thus
3031  * locking is omitted here.
3032  */
3033 void
3034 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3035 {
3036         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3037                 mutex_lock(&fs_info->qgroup_rescan_lock);
3038                 fs_info->qgroup_rescan_running = true;
3039                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
3040                                  &fs_info->qgroup_rescan_work);
3041                 mutex_unlock(&fs_info->qgroup_rescan_lock);
3042         }
3043 }
3044
3045 /*
3046  * Reserve qgroup space for range [start, start + len).
3047  *
3048  * This function will either reserve space from related qgroups or doing
3049  * nothing if the range is already reserved.
3050  *
3051  * Return 0 for successful reserve
3052  * Return <0 for error (including -EQUOT)
3053  *
3054  * NOTE: this function may sleep for memory allocation.
3055  *       if btrfs_qgroup_reserve_data() is called multiple times with
3056  *       same @reserved, caller must ensure when error happens it's OK
3057  *       to free *ALL* reserved space.
3058  */
3059 int btrfs_qgroup_reserve_data(struct inode *inode,
3060                         struct extent_changeset **reserved_ret, u64 start,
3061                         u64 len)
3062 {
3063         struct btrfs_root *root = BTRFS_I(inode)->root;
3064         struct ulist_node *unode;
3065         struct ulist_iterator uiter;
3066         struct extent_changeset *reserved;
3067         u64 orig_reserved;
3068         u64 to_reserve;
3069         int ret;
3070
3071         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3072             !is_fstree(root->objectid) || len == 0)
3073                 return 0;
3074
3075         /* @reserved parameter is mandatory for qgroup */
3076         if (WARN_ON(!reserved_ret))
3077                 return -EINVAL;
3078         if (!*reserved_ret) {
3079                 *reserved_ret = extent_changeset_alloc();
3080                 if (!*reserved_ret)
3081                         return -ENOMEM;
3082         }
3083         reserved = *reserved_ret;
3084         /* Record already reserved space */
3085         orig_reserved = reserved->bytes_changed;
3086         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3087                         start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3088
3089         /* Newly reserved space */
3090         to_reserve = reserved->bytes_changed - orig_reserved;
3091         trace_btrfs_qgroup_reserve_data(inode, start, len,
3092                                         to_reserve, QGROUP_RESERVE);
3093         if (ret < 0)
3094                 goto cleanup;
3095         ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3096         if (ret < 0)
3097                 goto cleanup;
3098
3099         return ret;
3100
3101 cleanup:
3102         /* cleanup *ALL* already reserved ranges */
3103         ULIST_ITER_INIT(&uiter);
3104         while ((unode = ulist_next(&reserved->range_changed, &uiter)))
3105                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
3106                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
3107         /* Also free data bytes of already reserved one */
3108         btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
3109                                   orig_reserved, BTRFS_QGROUP_RSV_DATA);
3110         extent_changeset_release(reserved);
3111         return ret;
3112 }
3113
3114 /* Free ranges specified by @reserved, normally in error path */
3115 static int qgroup_free_reserved_data(struct inode *inode,
3116                         struct extent_changeset *reserved, u64 start, u64 len)
3117 {
3118         struct btrfs_root *root = BTRFS_I(inode)->root;
3119         struct ulist_node *unode;
3120         struct ulist_iterator uiter;
3121         struct extent_changeset changeset;
3122         int freed = 0;
3123         int ret;
3124
3125         extent_changeset_init(&changeset);
3126         len = round_up(start + len, root->fs_info->sectorsize);
3127         start = round_down(start, root->fs_info->sectorsize);
3128
3129         ULIST_ITER_INIT(&uiter);
3130         while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3131                 u64 range_start = unode->val;
3132                 /* unode->aux is the inclusive end */
3133                 u64 range_len = unode->aux - range_start + 1;
3134                 u64 free_start;
3135                 u64 free_len;
3136
3137                 extent_changeset_release(&changeset);
3138
3139                 /* Only free range in range [start, start + len) */
3140                 if (range_start >= start + len ||
3141                     range_start + range_len <= start)
3142                         continue;
3143                 free_start = max(range_start, start);
3144                 free_len = min(start + len, range_start + range_len) -
3145                            free_start;
3146                 /*
3147                  * TODO: To also modify reserved->ranges_reserved to reflect
3148                  * the modification.
3149                  *
3150                  * However as long as we free qgroup reserved according to
3151                  * EXTENT_QGROUP_RESERVED, we won't double free.
3152                  * So not need to rush.
3153                  */
3154                 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
3155                                 free_start, free_start + free_len - 1,
3156                                 EXTENT_QGROUP_RESERVED, &changeset);
3157                 if (ret < 0)
3158                         goto out;
3159                 freed += changeset.bytes_changed;
3160         }
3161         btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed,
3162                                   BTRFS_QGROUP_RSV_DATA);
3163         ret = freed;
3164 out:
3165         extent_changeset_release(&changeset);
3166         return ret;
3167 }
3168
3169 static int __btrfs_qgroup_release_data(struct inode *inode,
3170                         struct extent_changeset *reserved, u64 start, u64 len,
3171                         int free)
3172 {
3173         struct extent_changeset changeset;
3174         int trace_op = QGROUP_RELEASE;
3175         int ret;
3176
3177         if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
3178                       &BTRFS_I(inode)->root->fs_info->flags))
3179                 return 0;
3180
3181         /* In release case, we shouldn't have @reserved */
3182         WARN_ON(!free && reserved);
3183         if (free && reserved)
3184                 return qgroup_free_reserved_data(inode, reserved, start, len);
3185         extent_changeset_init(&changeset);
3186         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
3187                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3188         if (ret < 0)
3189                 goto out;
3190
3191         if (free)
3192                 trace_op = QGROUP_FREE;
3193         trace_btrfs_qgroup_release_data(inode, start, len,
3194                                         changeset.bytes_changed, trace_op);
3195         if (free)
3196                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3197                                 BTRFS_I(inode)->root->objectid,
3198                                 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3199         ret = changeset.bytes_changed;
3200 out:
3201         extent_changeset_release(&changeset);
3202         return ret;
3203 }
3204
3205 /*
3206  * Free a reserved space range from io_tree and related qgroups
3207  *
3208  * Should be called when a range of pages get invalidated before reaching disk.
3209  * Or for error cleanup case.
3210  * if @reserved is given, only reserved range in [@start, @start + @len) will
3211  * be freed.
3212  *
3213  * For data written to disk, use btrfs_qgroup_release_data().
3214  *
3215  * NOTE: This function may sleep for memory allocation.
3216  */
3217 int btrfs_qgroup_free_data(struct inode *inode,
3218                         struct extent_changeset *reserved, u64 start, u64 len)
3219 {
3220         return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3221 }
3222
3223 /*
3224  * Release a reserved space range from io_tree only.
3225  *
3226  * Should be called when a range of pages get written to disk and corresponding
3227  * FILE_EXTENT is inserted into corresponding root.
3228  *
3229  * Since new qgroup accounting framework will only update qgroup numbers at
3230  * commit_transaction() time, its reserved space shouldn't be freed from
3231  * related qgroups.
3232  *
3233  * But we should release the range from io_tree, to allow further write to be
3234  * COWed.
3235  *
3236  * NOTE: This function may sleep for memory allocation.
3237  */
3238 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3239 {
3240         return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3241 }
3242
3243 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3244                               enum btrfs_qgroup_rsv_type type)
3245 {
3246         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3247             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3248                 return;
3249         if (num_bytes == 0)
3250                 return;
3251
3252         spin_lock(&root->qgroup_meta_rsv_lock);
3253         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3254                 root->qgroup_meta_rsv_prealloc += num_bytes;
3255         else
3256                 root->qgroup_meta_rsv_pertrans += num_bytes;
3257         spin_unlock(&root->qgroup_meta_rsv_lock);
3258 }
3259
3260 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3261                              enum btrfs_qgroup_rsv_type type)
3262 {
3263         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3264             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3265                 return 0;
3266         if (num_bytes == 0)
3267                 return 0;
3268
3269         spin_lock(&root->qgroup_meta_rsv_lock);
3270         if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3271                 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3272                                   num_bytes);
3273                 root->qgroup_meta_rsv_prealloc -= num_bytes;
3274         } else {
3275                 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3276                                   num_bytes);
3277                 root->qgroup_meta_rsv_pertrans -= num_bytes;
3278         }
3279         spin_unlock(&root->qgroup_meta_rsv_lock);
3280         return num_bytes;
3281 }
3282
3283 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3284                                 enum btrfs_qgroup_rsv_type type, bool enforce)
3285 {
3286         struct btrfs_fs_info *fs_info = root->fs_info;
3287         int ret;
3288
3289         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3290             !is_fstree(root->objectid) || num_bytes == 0)
3291                 return 0;
3292
3293         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3294         trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
3295         ret = qgroup_reserve(root, num_bytes, enforce, type);
3296         if (ret < 0)
3297                 return ret;
3298         /*
3299          * Record what we have reserved into root.
3300          *
3301          * To avoid quota disabled->enabled underflow.
3302          * In that case, we may try to free space we haven't reserved
3303          * (since quota was disabled), so record what we reserved into root.
3304          * And ensure later release won't underflow this number.
3305          */
3306         add_root_meta_rsv(root, num_bytes, type);
3307         return ret;
3308 }
3309
3310 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3311 {
3312         struct btrfs_fs_info *fs_info = root->fs_info;
3313
3314         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3315             !is_fstree(root->objectid))
3316                 return;
3317
3318         /* TODO: Update trace point to handle such free */
3319         trace_qgroup_meta_free_all_pertrans(root);
3320         /* Special value -1 means to free all reserved space */
3321         btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1,
3322                                   BTRFS_QGROUP_RSV_META_PERTRANS);
3323 }
3324
3325 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3326                               enum btrfs_qgroup_rsv_type type)
3327 {
3328         struct btrfs_fs_info *fs_info = root->fs_info;
3329
3330         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3331             !is_fstree(root->objectid))
3332                 return;
3333
3334         /*
3335          * reservation for META_PREALLOC can happen before quota is enabled,
3336          * which can lead to underflow.
3337          * Here ensure we will only free what we really have reserved.
3338          */
3339         num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3340         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3341         trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
3342         btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
3343 }
3344
3345 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3346                                 int num_bytes)
3347 {
3348         struct btrfs_root *quota_root = fs_info->quota_root;
3349         struct btrfs_qgroup *qgroup;
3350         struct ulist_node *unode;
3351         struct ulist_iterator uiter;
3352         int ret = 0;
3353
3354         if (num_bytes == 0)
3355                 return;
3356         if (!quota_root)
3357                 return;
3358
3359         spin_lock(&fs_info->qgroup_lock);
3360         qgroup = find_qgroup_rb(fs_info, ref_root);
3361         if (!qgroup)
3362                 goto out;
3363         ulist_reinit(fs_info->qgroup_ulist);
3364         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3365                        qgroup_to_aux(qgroup), GFP_ATOMIC);
3366         if (ret < 0)
3367                 goto out;
3368         ULIST_ITER_INIT(&uiter);
3369         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3370                 struct btrfs_qgroup *qg;
3371                 struct btrfs_qgroup_list *glist;
3372
3373                 qg = unode_aux_to_qgroup(unode);
3374
3375                 qgroup_rsv_release(fs_info, qg, num_bytes,
3376                                 BTRFS_QGROUP_RSV_META_PREALLOC);
3377                 qgroup_rsv_add(fs_info, qg, num_bytes,
3378                                 BTRFS_QGROUP_RSV_META_PERTRANS);
3379                 list_for_each_entry(glist, &qg->groups, next_group) {
3380                         ret = ulist_add(fs_info->qgroup_ulist,
3381                                         glist->group->qgroupid,
3382                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
3383                         if (ret < 0)
3384                                 goto out;
3385                 }
3386         }
3387 out:
3388         spin_unlock(&fs_info->qgroup_lock);
3389 }
3390
3391 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3392 {
3393         struct btrfs_fs_info *fs_info = root->fs_info;
3394
3395         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3396             !is_fstree(root->objectid))
3397                 return;
3398         /* Same as btrfs_qgroup_free_meta_prealloc() */
3399         num_bytes = sub_root_meta_rsv(root, num_bytes,
3400                                       BTRFS_QGROUP_RSV_META_PREALLOC);
3401         trace_qgroup_meta_convert(root, num_bytes);
3402         qgroup_convert_meta(fs_info, root->objectid, num_bytes);
3403 }
3404
3405 /*
3406  * Check qgroup reserved space leaking, normally at destroy inode
3407  * time
3408  */
3409 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3410 {
3411         struct extent_changeset changeset;
3412         struct ulist_node *unode;
3413         struct ulist_iterator iter;
3414         int ret;
3415
3416         extent_changeset_init(&changeset);
3417         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3418                         EXTENT_QGROUP_RESERVED, &changeset);
3419
3420         WARN_ON(ret < 0);
3421         if (WARN_ON(changeset.bytes_changed)) {
3422                 ULIST_ITER_INIT(&iter);
3423                 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3424                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
3425                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3426                                 inode->i_ino, unode->val, unode->aux);
3427                 }
3428                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3429                                 BTRFS_I(inode)->root->objectid,
3430                                 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3431
3432         }
3433         extent_changeset_release(&changeset);
3434 }