1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/list_sort.h>
6 #include "block-group.h"
7 #include "space-info.h"
9 #include "free-space-cache.h"
10 #include "free-space-tree.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
16 #include "delalloc-space.h"
22 * Return target flags in extended format or 0 if restripe for this chunk_type
25 * Should be called with balance_lock held
27 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
29 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
35 if (flags & BTRFS_BLOCK_GROUP_DATA &&
36 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
37 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
38 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
39 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
40 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
41 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
42 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
43 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
50 * @flags: available profiles in extended format (see ctree.h)
52 * Return reduced profile in chunk format. If profile changing is in progress
53 * (either running or paused) picks the target profile (if it's already
54 * available), otherwise falls back to plain reducing.
56 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
58 u64 num_devices = fs_info->fs_devices->rw_devices;
64 * See if restripe for this chunk_type is in progress, if so try to
65 * reduce to the target profile
67 spin_lock(&fs_info->balance_lock);
68 target = get_restripe_target(fs_info, flags);
70 spin_unlock(&fs_info->balance_lock);
71 return extended_to_chunk(target);
73 spin_unlock(&fs_info->balance_lock);
75 /* First, mask out the RAID levels which aren't possible */
76 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
77 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
78 allowed |= btrfs_raid_array[raid_type].bg_flag;
82 /* Select the highest-redundancy RAID level. */
83 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
84 allowed = BTRFS_BLOCK_GROUP_RAID1C4;
85 else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
86 allowed = BTRFS_BLOCK_GROUP_RAID6;
87 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
88 allowed = BTRFS_BLOCK_GROUP_RAID1C3;
89 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
90 allowed = BTRFS_BLOCK_GROUP_RAID5;
91 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
92 allowed = BTRFS_BLOCK_GROUP_RAID10;
93 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
94 allowed = BTRFS_BLOCK_GROUP_RAID1;
95 else if (allowed & BTRFS_BLOCK_GROUP_DUP)
96 allowed = BTRFS_BLOCK_GROUP_DUP;
97 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
98 allowed = BTRFS_BLOCK_GROUP_RAID0;
100 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
102 return extended_to_chunk(flags | allowed);
105 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
112 seq = read_seqbegin(&fs_info->profiles_lock);
114 if (flags & BTRFS_BLOCK_GROUP_DATA)
115 flags |= fs_info->avail_data_alloc_bits;
116 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
117 flags |= fs_info->avail_system_alloc_bits;
118 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
119 flags |= fs_info->avail_metadata_alloc_bits;
120 } while (read_seqretry(&fs_info->profiles_lock, seq));
122 return btrfs_reduce_alloc_profile(fs_info, flags);
125 void btrfs_get_block_group(struct btrfs_block_group *cache)
127 refcount_inc(&cache->refs);
130 void btrfs_put_block_group(struct btrfs_block_group *cache)
132 if (refcount_dec_and_test(&cache->refs)) {
133 WARN_ON(cache->pinned > 0);
135 * If there was a failure to cleanup a log tree, very likely due
136 * to an IO failure on a writeback attempt of one or more of its
137 * extent buffers, we could not do proper (and cheap) unaccounting
138 * of their reserved space, so don't warn on reserved > 0 in that
141 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
142 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
143 WARN_ON(cache->reserved > 0);
146 * A block_group shouldn't be on the discard_list anymore.
147 * Remove the block_group from the discard_list to prevent us
148 * from causing a panic due to NULL pointer dereference.
150 if (WARN_ON(!list_empty(&cache->discard_list)))
151 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
155 * If not empty, someone is still holding mutex of
156 * full_stripe_lock, which can only be released by caller.
157 * And it will definitely cause use-after-free when caller
158 * tries to release full stripe lock.
160 * No better way to resolve, but only to warn.
162 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
163 kfree(cache->free_space_ctl);
164 kfree(cache->physical_map);
170 * This adds the block group to the fs_info rb tree for the block group cache
172 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
173 struct btrfs_block_group *block_group)
176 struct rb_node *parent = NULL;
177 struct btrfs_block_group *cache;
178 bool leftmost = true;
180 ASSERT(block_group->length != 0);
182 write_lock(&info->block_group_cache_lock);
183 p = &info->block_group_cache_tree.rb_root.rb_node;
187 cache = rb_entry(parent, struct btrfs_block_group, cache_node);
188 if (block_group->start < cache->start) {
190 } else if (block_group->start > cache->start) {
194 write_unlock(&info->block_group_cache_lock);
199 rb_link_node(&block_group->cache_node, parent, p);
200 rb_insert_color_cached(&block_group->cache_node,
201 &info->block_group_cache_tree, leftmost);
203 write_unlock(&info->block_group_cache_lock);
209 * This will return the block group at or after bytenr if contains is 0, else
210 * it will return the block group that contains the bytenr
212 static struct btrfs_block_group *block_group_cache_tree_search(
213 struct btrfs_fs_info *info, u64 bytenr, int contains)
215 struct btrfs_block_group *cache, *ret = NULL;
219 read_lock(&info->block_group_cache_lock);
220 n = info->block_group_cache_tree.rb_root.rb_node;
223 cache = rb_entry(n, struct btrfs_block_group, cache_node);
224 end = cache->start + cache->length - 1;
225 start = cache->start;
227 if (bytenr < start) {
228 if (!contains && (!ret || start < ret->start))
231 } else if (bytenr > start) {
232 if (contains && bytenr <= end) {
243 btrfs_get_block_group(ret);
244 read_unlock(&info->block_group_cache_lock);
250 * Return the block group that starts at or after bytenr
252 struct btrfs_block_group *btrfs_lookup_first_block_group(
253 struct btrfs_fs_info *info, u64 bytenr)
255 return block_group_cache_tree_search(info, bytenr, 0);
259 * Return the block group that contains the given bytenr
261 struct btrfs_block_group *btrfs_lookup_block_group(
262 struct btrfs_fs_info *info, u64 bytenr)
264 return block_group_cache_tree_search(info, bytenr, 1);
267 struct btrfs_block_group *btrfs_next_block_group(
268 struct btrfs_block_group *cache)
270 struct btrfs_fs_info *fs_info = cache->fs_info;
271 struct rb_node *node;
273 read_lock(&fs_info->block_group_cache_lock);
275 /* If our block group was removed, we need a full search. */
276 if (RB_EMPTY_NODE(&cache->cache_node)) {
277 const u64 next_bytenr = cache->start + cache->length;
279 read_unlock(&fs_info->block_group_cache_lock);
280 btrfs_put_block_group(cache);
281 return btrfs_lookup_first_block_group(fs_info, next_bytenr);
283 node = rb_next(&cache->cache_node);
284 btrfs_put_block_group(cache);
286 cache = rb_entry(node, struct btrfs_block_group, cache_node);
287 btrfs_get_block_group(cache);
290 read_unlock(&fs_info->block_group_cache_lock);
295 * Check if we can do a NOCOW write for a given extent.
297 * @fs_info: The filesystem information object.
298 * @bytenr: Logical start address of the extent.
300 * Check if we can do a NOCOW write for the given extent, and increments the
301 * number of NOCOW writers in the block group that contains the extent, as long
302 * as the block group exists and it's currently not in read-only mode.
304 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
305 * is responsible for calling btrfs_dec_nocow_writers() later.
307 * Or NULL if we can not do a NOCOW write
309 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
312 struct btrfs_block_group *bg;
313 bool can_nocow = true;
315 bg = btrfs_lookup_block_group(fs_info, bytenr);
319 spin_lock(&bg->lock);
323 atomic_inc(&bg->nocow_writers);
324 spin_unlock(&bg->lock);
327 btrfs_put_block_group(bg);
331 /* No put on block group, done by btrfs_dec_nocow_writers(). */
336 * Decrement the number of NOCOW writers in a block group.
338 * @bg: The block group.
340 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
341 * and on the block group returned by that call. Typically this is called after
342 * creating an ordered extent for a NOCOW write, to prevent races with scrub and
345 * After this call, the caller should not use the block group anymore. It it wants
346 * to use it, then it should get a reference on it before calling this function.
348 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
350 if (atomic_dec_and_test(&bg->nocow_writers))
351 wake_up_var(&bg->nocow_writers);
353 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
354 btrfs_put_block_group(bg);
357 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
359 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
362 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
365 struct btrfs_block_group *bg;
367 bg = btrfs_lookup_block_group(fs_info, start);
369 if (atomic_dec_and_test(&bg->reservations))
370 wake_up_var(&bg->reservations);
371 btrfs_put_block_group(bg);
374 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
376 struct btrfs_space_info *space_info = bg->space_info;
380 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
384 * Our block group is read only but before we set it to read only,
385 * some task might have had allocated an extent from it already, but it
386 * has not yet created a respective ordered extent (and added it to a
387 * root's list of ordered extents).
388 * Therefore wait for any task currently allocating extents, since the
389 * block group's reservations counter is incremented while a read lock
390 * on the groups' semaphore is held and decremented after releasing
391 * the read access on that semaphore and creating the ordered extent.
393 down_write(&space_info->groups_sem);
394 up_write(&space_info->groups_sem);
396 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
399 struct btrfs_caching_control *btrfs_get_caching_control(
400 struct btrfs_block_group *cache)
402 struct btrfs_caching_control *ctl;
404 spin_lock(&cache->lock);
405 if (!cache->caching_ctl) {
406 spin_unlock(&cache->lock);
410 ctl = cache->caching_ctl;
411 refcount_inc(&ctl->count);
412 spin_unlock(&cache->lock);
416 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
418 if (refcount_dec_and_test(&ctl->count))
423 * When we wait for progress in the block group caching, its because our
424 * allocation attempt failed at least once. So, we must sleep and let some
425 * progress happen before we try again.
427 * This function will sleep at least once waiting for new free space to show
428 * up, and then it will check the block group free space numbers for our min
429 * num_bytes. Another option is to have it go ahead and look in the rbtree for
430 * a free extent of a given size, but this is a good start.
432 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
433 * any of the information in this block group.
435 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
438 struct btrfs_caching_control *caching_ctl;
441 caching_ctl = btrfs_get_caching_control(cache);
446 * We've already failed to allocate from this block group, so even if
447 * there's enough space in the block group it isn't contiguous enough to
448 * allow for an allocation, so wait for at least the next wakeup tick,
449 * or for the thing to be done.
451 progress = atomic_read(&caching_ctl->progress);
453 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
454 (progress != atomic_read(&caching_ctl->progress) &&
455 (cache->free_space_ctl->free_space >= num_bytes)));
457 btrfs_put_caching_control(caching_ctl);
460 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
461 struct btrfs_caching_control *caching_ctl)
463 wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
464 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
467 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
469 struct btrfs_caching_control *caching_ctl;
472 caching_ctl = btrfs_get_caching_control(cache);
474 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
475 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
476 btrfs_put_caching_control(caching_ctl);
480 #ifdef CONFIG_BTRFS_DEBUG
481 static void fragment_free_space(struct btrfs_block_group *block_group)
483 struct btrfs_fs_info *fs_info = block_group->fs_info;
484 u64 start = block_group->start;
485 u64 len = block_group->length;
486 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
487 fs_info->nodesize : fs_info->sectorsize;
488 u64 step = chunk << 1;
490 while (len > chunk) {
491 btrfs_remove_free_space(block_group, start, chunk);
502 * This is only called by btrfs_cache_block_group, since we could have freed
503 * extents we need to check the pinned_extents for any extents that can't be
504 * used yet since their free space will be released as soon as the transaction
507 int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
508 u64 *total_added_ret)
510 struct btrfs_fs_info *info = block_group->fs_info;
511 u64 extent_start, extent_end, size;
515 *total_added_ret = 0;
517 while (start < end) {
518 ret = find_first_extent_bit(&info->excluded_extents, start,
519 &extent_start, &extent_end,
520 EXTENT_DIRTY | EXTENT_UPTODATE,
525 if (extent_start <= start) {
526 start = extent_end + 1;
527 } else if (extent_start > start && extent_start < end) {
528 size = extent_start - start;
529 ret = btrfs_add_free_space_async_trimmed(block_group,
534 *total_added_ret += size;
535 start = extent_end + 1;
543 ret = btrfs_add_free_space_async_trimmed(block_group, start,
548 *total_added_ret += size;
554 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
556 struct btrfs_block_group *block_group = caching_ctl->block_group;
557 struct btrfs_fs_info *fs_info = block_group->fs_info;
558 struct btrfs_root *extent_root;
559 struct btrfs_path *path;
560 struct extent_buffer *leaf;
561 struct btrfs_key key;
568 path = btrfs_alloc_path();
572 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
573 extent_root = btrfs_extent_root(fs_info, last);
575 #ifdef CONFIG_BTRFS_DEBUG
577 * If we're fragmenting we don't want to make anybody think we can
578 * allocate from this block group until we've had a chance to fragment
581 if (btrfs_should_fragment_free_space(block_group))
585 * We don't want to deadlock with somebody trying to allocate a new
586 * extent for the extent root while also trying to search the extent
587 * root to add free space. So we skip locking and search the commit
588 * root, since its read-only
590 path->skip_locking = 1;
591 path->search_commit_root = 1;
592 path->reada = READA_FORWARD;
596 key.type = BTRFS_EXTENT_ITEM_KEY;
599 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
603 leaf = path->nodes[0];
604 nritems = btrfs_header_nritems(leaf);
607 if (btrfs_fs_closing(fs_info) > 1) {
612 if (path->slots[0] < nritems) {
613 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
615 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
619 if (need_resched() ||
620 rwsem_is_contended(&fs_info->commit_root_sem)) {
621 btrfs_release_path(path);
622 up_read(&fs_info->commit_root_sem);
623 mutex_unlock(&caching_ctl->mutex);
625 mutex_lock(&caching_ctl->mutex);
626 down_read(&fs_info->commit_root_sem);
630 ret = btrfs_next_leaf(extent_root, path);
635 leaf = path->nodes[0];
636 nritems = btrfs_header_nritems(leaf);
640 if (key.objectid < last) {
643 key.type = BTRFS_EXTENT_ITEM_KEY;
644 btrfs_release_path(path);
648 if (key.objectid < block_group->start) {
653 if (key.objectid >= block_group->start + block_group->length)
656 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
657 key.type == BTRFS_METADATA_ITEM_KEY) {
660 ret = add_new_free_space(block_group, last, key.objectid,
664 total_found += space_added;
665 if (key.type == BTRFS_METADATA_ITEM_KEY)
666 last = key.objectid +
669 last = key.objectid + key.offset;
671 if (total_found > CACHING_CTL_WAKE_UP) {
674 atomic_inc(&caching_ctl->progress);
675 wake_up(&caching_ctl->wait);
682 ret = add_new_free_space(block_group, last,
683 block_group->start + block_group->length,
686 btrfs_free_path(path);
690 static noinline void caching_thread(struct btrfs_work *work)
692 struct btrfs_block_group *block_group;
693 struct btrfs_fs_info *fs_info;
694 struct btrfs_caching_control *caching_ctl;
697 caching_ctl = container_of(work, struct btrfs_caching_control, work);
698 block_group = caching_ctl->block_group;
699 fs_info = block_group->fs_info;
701 mutex_lock(&caching_ctl->mutex);
702 down_read(&fs_info->commit_root_sem);
704 if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
705 ret = load_free_space_cache(block_group);
712 * We failed to load the space cache, set ourselves to
713 * CACHE_STARTED and carry on.
715 spin_lock(&block_group->lock);
716 block_group->cached = BTRFS_CACHE_STARTED;
717 spin_unlock(&block_group->lock);
718 wake_up(&caching_ctl->wait);
722 * If we are in the transaction that populated the free space tree we
723 * can't actually cache from the free space tree as our commit root and
724 * real root are the same, so we could change the contents of the blocks
725 * while caching. Instead do the slow caching in this case, and after
726 * the transaction has committed we will be safe.
728 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
729 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
730 ret = load_free_space_tree(caching_ctl);
732 ret = load_extent_tree_free(caching_ctl);
734 spin_lock(&block_group->lock);
735 block_group->caching_ctl = NULL;
736 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
737 spin_unlock(&block_group->lock);
739 #ifdef CONFIG_BTRFS_DEBUG
740 if (btrfs_should_fragment_free_space(block_group)) {
743 spin_lock(&block_group->space_info->lock);
744 spin_lock(&block_group->lock);
745 bytes_used = block_group->length - block_group->used;
746 block_group->space_info->bytes_used += bytes_used >> 1;
747 spin_unlock(&block_group->lock);
748 spin_unlock(&block_group->space_info->lock);
749 fragment_free_space(block_group);
753 up_read(&fs_info->commit_root_sem);
754 btrfs_free_excluded_extents(block_group);
755 mutex_unlock(&caching_ctl->mutex);
757 wake_up(&caching_ctl->wait);
759 btrfs_put_caching_control(caching_ctl);
760 btrfs_put_block_group(block_group);
763 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
765 struct btrfs_fs_info *fs_info = cache->fs_info;
766 struct btrfs_caching_control *caching_ctl = NULL;
769 /* Allocator for zoned filesystems does not use the cache at all */
770 if (btrfs_is_zoned(fs_info))
773 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
777 INIT_LIST_HEAD(&caching_ctl->list);
778 mutex_init(&caching_ctl->mutex);
779 init_waitqueue_head(&caching_ctl->wait);
780 caching_ctl->block_group = cache;
781 refcount_set(&caching_ctl->count, 2);
782 atomic_set(&caching_ctl->progress, 0);
783 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
785 spin_lock(&cache->lock);
786 if (cache->cached != BTRFS_CACHE_NO) {
789 caching_ctl = cache->caching_ctl;
791 refcount_inc(&caching_ctl->count);
792 spin_unlock(&cache->lock);
795 WARN_ON(cache->caching_ctl);
796 cache->caching_ctl = caching_ctl;
797 cache->cached = BTRFS_CACHE_STARTED;
798 spin_unlock(&cache->lock);
800 write_lock(&fs_info->block_group_cache_lock);
801 refcount_inc(&caching_ctl->count);
802 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
803 write_unlock(&fs_info->block_group_cache_lock);
805 btrfs_get_block_group(cache);
807 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
809 if (wait && caching_ctl)
810 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
812 btrfs_put_caching_control(caching_ctl);
817 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
819 u64 extra_flags = chunk_to_extended(flags) &
820 BTRFS_EXTENDED_PROFILE_MASK;
822 write_seqlock(&fs_info->profiles_lock);
823 if (flags & BTRFS_BLOCK_GROUP_DATA)
824 fs_info->avail_data_alloc_bits &= ~extra_flags;
825 if (flags & BTRFS_BLOCK_GROUP_METADATA)
826 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
827 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
828 fs_info->avail_system_alloc_bits &= ~extra_flags;
829 write_sequnlock(&fs_info->profiles_lock);
833 * Clear incompat bits for the following feature(s):
835 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
836 * in the whole filesystem
838 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
840 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
842 bool found_raid56 = false;
843 bool found_raid1c34 = false;
845 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
846 (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
847 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
848 struct list_head *head = &fs_info->space_info;
849 struct btrfs_space_info *sinfo;
851 list_for_each_entry_rcu(sinfo, head, list) {
852 down_read(&sinfo->groups_sem);
853 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
855 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
857 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
858 found_raid1c34 = true;
859 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
860 found_raid1c34 = true;
861 up_read(&sinfo->groups_sem);
864 btrfs_clear_fs_incompat(fs_info, RAID56);
866 btrfs_clear_fs_incompat(fs_info, RAID1C34);
870 static int remove_block_group_item(struct btrfs_trans_handle *trans,
871 struct btrfs_path *path,
872 struct btrfs_block_group *block_group)
874 struct btrfs_fs_info *fs_info = trans->fs_info;
875 struct btrfs_root *root;
876 struct btrfs_key key;
879 root = btrfs_block_group_root(fs_info);
880 key.objectid = block_group->start;
881 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
882 key.offset = block_group->length;
884 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
890 ret = btrfs_del_item(trans, root, path);
894 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
895 u64 group_start, struct extent_map *em)
897 struct btrfs_fs_info *fs_info = trans->fs_info;
898 struct btrfs_path *path;
899 struct btrfs_block_group *block_group;
900 struct btrfs_free_cluster *cluster;
902 struct kobject *kobj = NULL;
906 struct btrfs_caching_control *caching_ctl = NULL;
908 bool remove_rsv = false;
910 block_group = btrfs_lookup_block_group(fs_info, group_start);
911 BUG_ON(!block_group);
912 BUG_ON(!block_group->ro);
914 trace_btrfs_remove_block_group(block_group);
916 * Free the reserved super bytes from this block group before
919 btrfs_free_excluded_extents(block_group);
920 btrfs_free_ref_tree_range(fs_info, block_group->start,
921 block_group->length);
923 index = btrfs_bg_flags_to_raid_index(block_group->flags);
924 factor = btrfs_bg_type_to_factor(block_group->flags);
926 /* make sure this block group isn't part of an allocation cluster */
927 cluster = &fs_info->data_alloc_cluster;
928 spin_lock(&cluster->refill_lock);
929 btrfs_return_cluster_to_free_space(block_group, cluster);
930 spin_unlock(&cluster->refill_lock);
933 * make sure this block group isn't part of a metadata
936 cluster = &fs_info->meta_alloc_cluster;
937 spin_lock(&cluster->refill_lock);
938 btrfs_return_cluster_to_free_space(block_group, cluster);
939 spin_unlock(&cluster->refill_lock);
941 btrfs_clear_treelog_bg(block_group);
942 btrfs_clear_data_reloc_bg(block_group);
944 path = btrfs_alloc_path();
951 * get the inode first so any iput calls done for the io_list
952 * aren't the final iput (no unlinks allowed now)
954 inode = lookup_free_space_inode(block_group, path);
956 mutex_lock(&trans->transaction->cache_write_mutex);
958 * Make sure our free space cache IO is done before removing the
961 spin_lock(&trans->transaction->dirty_bgs_lock);
962 if (!list_empty(&block_group->io_list)) {
963 list_del_init(&block_group->io_list);
965 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
967 spin_unlock(&trans->transaction->dirty_bgs_lock);
968 btrfs_wait_cache_io(trans, block_group, path);
969 btrfs_put_block_group(block_group);
970 spin_lock(&trans->transaction->dirty_bgs_lock);
973 if (!list_empty(&block_group->dirty_list)) {
974 list_del_init(&block_group->dirty_list);
976 btrfs_put_block_group(block_group);
978 spin_unlock(&trans->transaction->dirty_bgs_lock);
979 mutex_unlock(&trans->transaction->cache_write_mutex);
981 ret = btrfs_remove_free_space_inode(trans, inode, block_group);
985 write_lock(&fs_info->block_group_cache_lock);
986 rb_erase_cached(&block_group->cache_node,
987 &fs_info->block_group_cache_tree);
988 RB_CLEAR_NODE(&block_group->cache_node);
990 /* Once for the block groups rbtree */
991 btrfs_put_block_group(block_group);
993 write_unlock(&fs_info->block_group_cache_lock);
995 down_write(&block_group->space_info->groups_sem);
997 * we must use list_del_init so people can check to see if they
998 * are still on the list after taking the semaphore
1000 list_del_init(&block_group->list);
1001 if (list_empty(&block_group->space_info->block_groups[index])) {
1002 kobj = block_group->space_info->block_group_kobjs[index];
1003 block_group->space_info->block_group_kobjs[index] = NULL;
1004 clear_avail_alloc_bits(fs_info, block_group->flags);
1006 up_write(&block_group->space_info->groups_sem);
1007 clear_incompat_bg_bits(fs_info, block_group->flags);
1013 if (block_group->cached == BTRFS_CACHE_STARTED)
1014 btrfs_wait_block_group_cache_done(block_group);
1016 write_lock(&fs_info->block_group_cache_lock);
1017 caching_ctl = btrfs_get_caching_control(block_group);
1019 struct btrfs_caching_control *ctl;
1021 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
1022 if (ctl->block_group == block_group) {
1024 refcount_inc(&caching_ctl->count);
1030 list_del_init(&caching_ctl->list);
1031 write_unlock(&fs_info->block_group_cache_lock);
1034 /* Once for the caching bgs list and once for us. */
1035 btrfs_put_caching_control(caching_ctl);
1036 btrfs_put_caching_control(caching_ctl);
1039 spin_lock(&trans->transaction->dirty_bgs_lock);
1040 WARN_ON(!list_empty(&block_group->dirty_list));
1041 WARN_ON(!list_empty(&block_group->io_list));
1042 spin_unlock(&trans->transaction->dirty_bgs_lock);
1044 btrfs_remove_free_space_cache(block_group);
1046 spin_lock(&block_group->space_info->lock);
1047 list_del_init(&block_group->ro_list);
1049 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1050 WARN_ON(block_group->space_info->total_bytes
1051 < block_group->length);
1052 WARN_ON(block_group->space_info->bytes_readonly
1053 < block_group->length - block_group->zone_unusable);
1054 WARN_ON(block_group->space_info->bytes_zone_unusable
1055 < block_group->zone_unusable);
1056 WARN_ON(block_group->space_info->disk_total
1057 < block_group->length * factor);
1058 WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
1059 &block_group->runtime_flags) &&
1060 block_group->space_info->active_total_bytes
1061 < block_group->length);
1063 block_group->space_info->total_bytes -= block_group->length;
1064 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1065 block_group->space_info->active_total_bytes -= block_group->length;
1066 block_group->space_info->bytes_readonly -=
1067 (block_group->length - block_group->zone_unusable);
1068 block_group->space_info->bytes_zone_unusable -=
1069 block_group->zone_unusable;
1070 block_group->space_info->disk_total -= block_group->length * factor;
1072 spin_unlock(&block_group->space_info->lock);
1075 * Remove the free space for the block group from the free space tree
1076 * and the block group's item from the extent tree before marking the
1077 * block group as removed. This is to prevent races with tasks that
1078 * freeze and unfreeze a block group, this task and another task
1079 * allocating a new block group - the unfreeze task ends up removing
1080 * the block group's extent map before the task calling this function
1081 * deletes the block group item from the extent tree, allowing for
1082 * another task to attempt to create another block group with the same
1083 * item key (and failing with -EEXIST and a transaction abort).
1085 ret = remove_block_group_free_space(trans, block_group);
1089 ret = remove_block_group_item(trans, path, block_group);
1093 spin_lock(&block_group->lock);
1094 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
1097 * At this point trimming or scrub can't start on this block group,
1098 * because we removed the block group from the rbtree
1099 * fs_info->block_group_cache_tree so no one can't find it anymore and
1100 * even if someone already got this block group before we removed it
1101 * from the rbtree, they have already incremented block_group->frozen -
1102 * if they didn't, for the trimming case they won't find any free space
1103 * entries because we already removed them all when we called
1104 * btrfs_remove_free_space_cache().
1106 * And we must not remove the extent map from the fs_info->mapping_tree
1107 * to prevent the same logical address range and physical device space
1108 * ranges from being reused for a new block group. This is needed to
1109 * avoid races with trimming and scrub.
1111 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1112 * completely transactionless, so while it is trimming a range the
1113 * currently running transaction might finish and a new one start,
1114 * allowing for new block groups to be created that can reuse the same
1115 * physical device locations unless we take this special care.
1117 * There may also be an implicit trim operation if the file system
1118 * is mounted with -odiscard. The same protections must remain
1119 * in place until the extents have been discarded completely when
1120 * the transaction commit has completed.
1122 remove_em = (atomic_read(&block_group->frozen) == 0);
1123 spin_unlock(&block_group->lock);
1126 struct extent_map_tree *em_tree;
1128 em_tree = &fs_info->mapping_tree;
1129 write_lock(&em_tree->lock);
1130 remove_extent_mapping(em_tree, em);
1131 write_unlock(&em_tree->lock);
1132 /* once for the tree */
1133 free_extent_map(em);
1137 /* Once for the lookup reference */
1138 btrfs_put_block_group(block_group);
1140 btrfs_delayed_refs_rsv_release(fs_info, 1);
1141 btrfs_free_path(path);
1145 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1146 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1148 struct btrfs_root *root = btrfs_block_group_root(fs_info);
1149 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1150 struct extent_map *em;
1151 struct map_lookup *map;
1152 unsigned int num_items;
1154 read_lock(&em_tree->lock);
1155 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1156 read_unlock(&em_tree->lock);
1157 ASSERT(em && em->start == chunk_offset);
1160 * We need to reserve 3 + N units from the metadata space info in order
1161 * to remove a block group (done at btrfs_remove_chunk() and at
1162 * btrfs_remove_block_group()), which are used for:
1164 * 1 unit for adding the free space inode's orphan (located in the tree
1166 * 1 unit for deleting the block group item (located in the extent
1168 * 1 unit for deleting the free space item (located in tree of tree
1170 * N units for deleting N device extent items corresponding to each
1171 * stripe (located in the device tree).
1173 * In order to remove a block group we also need to reserve units in the
1174 * system space info in order to update the chunk tree (update one or
1175 * more device items and remove one chunk item), but this is done at
1176 * btrfs_remove_chunk() through a call to check_system_chunk().
1178 map = em->map_lookup;
1179 num_items = 3 + map->num_stripes;
1180 free_extent_map(em);
1182 return btrfs_start_transaction_fallback_global_rsv(root, num_items);
1186 * Mark block group @cache read-only, so later write won't happen to block
1189 * If @force is not set, this function will only mark the block group readonly
1190 * if we have enough free space (1M) in other metadata/system block groups.
1191 * If @force is not set, this function will mark the block group readonly
1192 * without checking free space.
1194 * NOTE: This function doesn't care if other block groups can contain all the
1195 * data in this block group. That check should be done by relocation routine,
1196 * not this function.
1198 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1200 struct btrfs_space_info *sinfo = cache->space_info;
1204 spin_lock(&sinfo->lock);
1205 spin_lock(&cache->lock);
1207 if (cache->swap_extents) {
1218 num_bytes = cache->length - cache->reserved - cache->pinned -
1219 cache->bytes_super - cache->zone_unusable - cache->used;
1222 * Data never overcommits, even in mixed mode, so do just the straight
1223 * check of left over space in how much we have allocated.
1227 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
1228 u64 sinfo_used = btrfs_space_info_used(sinfo, true);
1231 * Here we make sure if we mark this bg RO, we still have enough
1232 * free space as buffer.
1234 if (sinfo_used + num_bytes <= sinfo->total_bytes)
1238 * We overcommit metadata, so we need to do the
1239 * btrfs_can_overcommit check here, and we need to pass in
1240 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1241 * leeway to allow us to mark this block group as read only.
1243 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1244 BTRFS_RESERVE_NO_FLUSH))
1249 sinfo->bytes_readonly += num_bytes;
1250 if (btrfs_is_zoned(cache->fs_info)) {
1251 /* Migrate zone_unusable bytes to readonly */
1252 sinfo->bytes_readonly += cache->zone_unusable;
1253 sinfo->bytes_zone_unusable -= cache->zone_unusable;
1254 cache->zone_unusable = 0;
1257 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1260 spin_unlock(&cache->lock);
1261 spin_unlock(&sinfo->lock);
1262 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1263 btrfs_info(cache->fs_info,
1264 "unable to make block group %llu ro", cache->start);
1265 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1270 static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
1271 struct btrfs_block_group *bg)
1273 struct btrfs_fs_info *fs_info = bg->fs_info;
1274 struct btrfs_transaction *prev_trans = NULL;
1275 const u64 start = bg->start;
1276 const u64 end = start + bg->length - 1;
1279 spin_lock(&fs_info->trans_lock);
1280 if (trans->transaction->list.prev != &fs_info->trans_list) {
1281 prev_trans = list_last_entry(&trans->transaction->list,
1282 struct btrfs_transaction, list);
1283 refcount_inc(&prev_trans->use_count);
1285 spin_unlock(&fs_info->trans_lock);
1288 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1289 * btrfs_finish_extent_commit(). If we are at transaction N, another
1290 * task might be running finish_extent_commit() for the previous
1291 * transaction N - 1, and have seen a range belonging to the block
1292 * group in pinned_extents before we were able to clear the whole block
1293 * group range from pinned_extents. This means that task can lookup for
1294 * the block group after we unpinned it from pinned_extents and removed
1295 * it, leading to a BUG_ON() at unpin_extent_range().
1297 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1299 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
1305 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
1308 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1310 btrfs_put_transaction(prev_trans);
1316 * Process the unused_bgs list and remove any that don't have any allocated
1317 * space inside of them.
1319 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1321 LIST_HEAD(retry_list);
1322 struct btrfs_block_group *block_group;
1323 struct btrfs_space_info *space_info;
1324 struct btrfs_trans_handle *trans;
1325 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
1328 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1331 if (btrfs_fs_closing(fs_info))
1335 * Long running balances can keep us blocked here for eternity, so
1336 * simply skip deletion if we're unable to get the mutex.
1338 if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1341 spin_lock(&fs_info->unused_bgs_lock);
1342 while (!list_empty(&fs_info->unused_bgs)) {
1346 block_group = list_first_entry(&fs_info->unused_bgs,
1347 struct btrfs_block_group,
1349 list_del_init(&block_group->bg_list);
1351 space_info = block_group->space_info;
1353 if (ret || btrfs_mixed_space_info(space_info)) {
1354 btrfs_put_block_group(block_group);
1357 spin_unlock(&fs_info->unused_bgs_lock);
1359 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1361 /* Don't want to race with allocators so take the groups_sem */
1362 down_write(&space_info->groups_sem);
1365 * Async discard moves the final block group discard to be prior
1366 * to the unused_bgs code path. Therefore, if it's not fully
1367 * trimmed, punt it back to the async discard lists.
1369 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1370 !btrfs_is_free_space_trimmed(block_group)) {
1371 trace_btrfs_skip_unused_block_group(block_group);
1372 up_write(&space_info->groups_sem);
1373 /* Requeue if we failed because of async discard */
1374 btrfs_discard_queue_work(&fs_info->discard_ctl,
1379 spin_lock(&space_info->lock);
1380 spin_lock(&block_group->lock);
1381 if (btrfs_is_block_group_used(block_group) || block_group->ro ||
1382 list_is_singular(&block_group->list)) {
1384 * We want to bail if we made new allocations or have
1385 * outstanding allocations in this block group. We do
1386 * the ro check in case balance is currently acting on
1389 trace_btrfs_skip_unused_block_group(block_group);
1390 spin_unlock(&block_group->lock);
1391 spin_unlock(&space_info->lock);
1392 up_write(&space_info->groups_sem);
1397 * The block group may be unused but there may be space reserved
1398 * accounting with the existence of that block group, that is,
1399 * space_info->bytes_may_use was incremented by a task but no
1400 * space was yet allocated from the block group by the task.
1401 * That space may or may not be allocated, as we are generally
1402 * pessimistic about space reservation for metadata as well as
1403 * for data when using compression (as we reserve space based on
1404 * the worst case, when data can't be compressed, and before
1405 * actually attempting compression, before starting writeback).
1407 * So check if the total space of the space_info minus the size
1408 * of this block group is less than the used space of the
1409 * space_info - if that's the case, then it means we have tasks
1410 * that might be relying on the block group in order to allocate
1411 * extents, and add back the block group to the unused list when
1412 * we finish, so that we retry later in case no tasks ended up
1413 * needing to allocate extents from the block group.
1415 used = btrfs_space_info_used(space_info, true);
1416 if (space_info->total_bytes - block_group->length < used &&
1417 block_group->zone_unusable < block_group->length) {
1419 * Add a reference for the list, compensate for the ref
1420 * drop under the "next" label for the
1421 * fs_info->unused_bgs list.
1423 btrfs_get_block_group(block_group);
1424 list_add_tail(&block_group->bg_list, &retry_list);
1426 trace_btrfs_skip_unused_block_group(block_group);
1427 spin_unlock(&block_group->lock);
1428 spin_unlock(&space_info->lock);
1429 up_write(&space_info->groups_sem);
1433 spin_unlock(&block_group->lock);
1434 spin_unlock(&space_info->lock);
1436 /* We don't want to force the issue, only flip if it's ok. */
1437 ret = inc_block_group_ro(block_group, 0);
1438 up_write(&space_info->groups_sem);
1444 ret = btrfs_zone_finish(block_group);
1446 btrfs_dec_block_group_ro(block_group);
1453 * Want to do this before we do anything else so we can recover
1454 * properly if we fail to join the transaction.
1456 trans = btrfs_start_trans_remove_block_group(fs_info,
1457 block_group->start);
1458 if (IS_ERR(trans)) {
1459 btrfs_dec_block_group_ro(block_group);
1460 ret = PTR_ERR(trans);
1465 * We could have pending pinned extents for this block group,
1466 * just delete them, we don't care about them anymore.
1468 if (!clean_pinned_extents(trans, block_group)) {
1469 btrfs_dec_block_group_ro(block_group);
1474 * At this point, the block_group is read only and should fail
1475 * new allocations. However, btrfs_finish_extent_commit() can
1476 * cause this block_group to be placed back on the discard
1477 * lists because now the block_group isn't fully discarded.
1478 * Bail here and try again later after discarding everything.
1480 spin_lock(&fs_info->discard_ctl.lock);
1481 if (!list_empty(&block_group->discard_list)) {
1482 spin_unlock(&fs_info->discard_ctl.lock);
1483 btrfs_dec_block_group_ro(block_group);
1484 btrfs_discard_queue_work(&fs_info->discard_ctl,
1488 spin_unlock(&fs_info->discard_ctl.lock);
1490 /* Reset pinned so btrfs_put_block_group doesn't complain */
1491 spin_lock(&space_info->lock);
1492 spin_lock(&block_group->lock);
1494 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1495 -block_group->pinned);
1496 space_info->bytes_readonly += block_group->pinned;
1497 block_group->pinned = 0;
1499 spin_unlock(&block_group->lock);
1500 spin_unlock(&space_info->lock);
1503 * The normal path here is an unused block group is passed here,
1504 * then trimming is handled in the transaction commit path.
1505 * Async discard interposes before this to do the trimming
1506 * before coming down the unused block group path as trimming
1507 * will no longer be done later in the transaction commit path.
1509 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1513 * DISCARD can flip during remount. On zoned filesystems, we
1514 * need to reset sequential-required zones.
1516 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
1517 btrfs_is_zoned(fs_info);
1519 /* Implicit trim during transaction commit. */
1521 btrfs_freeze_block_group(block_group);
1524 * Btrfs_remove_chunk will abort the transaction if things go
1527 ret = btrfs_remove_chunk(trans, block_group->start);
1531 btrfs_unfreeze_block_group(block_group);
1536 * If we're not mounted with -odiscard, we can just forget
1537 * about this block group. Otherwise we'll need to wait
1538 * until transaction commit to do the actual discard.
1541 spin_lock(&fs_info->unused_bgs_lock);
1543 * A concurrent scrub might have added us to the list
1544 * fs_info->unused_bgs, so use a list_move operation
1545 * to add the block group to the deleted_bgs list.
1547 list_move(&block_group->bg_list,
1548 &trans->transaction->deleted_bgs);
1549 spin_unlock(&fs_info->unused_bgs_lock);
1550 btrfs_get_block_group(block_group);
1553 btrfs_end_transaction(trans);
1555 btrfs_put_block_group(block_group);
1556 spin_lock(&fs_info->unused_bgs_lock);
1558 list_splice_tail(&retry_list, &fs_info->unused_bgs);
1559 spin_unlock(&fs_info->unused_bgs_lock);
1560 mutex_unlock(&fs_info->reclaim_bgs_lock);
1564 btrfs_end_transaction(trans);
1565 spin_lock(&fs_info->unused_bgs_lock);
1566 list_splice_tail(&retry_list, &fs_info->unused_bgs);
1567 spin_unlock(&fs_info->unused_bgs_lock);
1568 mutex_unlock(&fs_info->reclaim_bgs_lock);
1569 btrfs_put_block_group(block_group);
1570 btrfs_discard_punt_unused_bgs_list(fs_info);
1573 void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1575 struct btrfs_fs_info *fs_info = bg->fs_info;
1577 spin_lock(&fs_info->unused_bgs_lock);
1578 if (list_empty(&bg->bg_list)) {
1579 btrfs_get_block_group(bg);
1580 trace_btrfs_add_unused_block_group(bg);
1581 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1582 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
1583 /* Pull out the block group from the reclaim_bgs list. */
1584 trace_btrfs_add_unused_block_group(bg);
1585 list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
1587 spin_unlock(&fs_info->unused_bgs_lock);
1591 * We want block groups with a low number of used bytes to be in the beginning
1592 * of the list, so they will get reclaimed first.
1594 static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
1595 const struct list_head *b)
1597 const struct btrfs_block_group *bg1, *bg2;
1599 bg1 = list_entry(a, struct btrfs_block_group, bg_list);
1600 bg2 = list_entry(b, struct btrfs_block_group, bg_list);
1602 return bg1->used > bg2->used;
1605 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
1607 if (btrfs_is_zoned(fs_info))
1608 return btrfs_zoned_should_reclaim(fs_info);
1612 void btrfs_reclaim_bgs_work(struct work_struct *work)
1614 struct btrfs_fs_info *fs_info =
1615 container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
1616 struct btrfs_block_group *bg;
1617 struct btrfs_space_info *space_info;
1619 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1622 if (btrfs_fs_closing(fs_info))
1625 if (!btrfs_should_reclaim(fs_info))
1628 sb_start_write(fs_info->sb);
1630 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
1631 sb_end_write(fs_info->sb);
1636 * Long running balances can keep us blocked here for eternity, so
1637 * simply skip reclaim if we're unable to get the mutex.
1639 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
1640 btrfs_exclop_finish(fs_info);
1641 sb_end_write(fs_info->sb);
1645 spin_lock(&fs_info->unused_bgs_lock);
1647 * Sort happens under lock because we can't simply splice it and sort.
1648 * The block groups might still be in use and reachable via bg_list,
1649 * and their presence in the reclaim_bgs list must be preserved.
1651 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
1652 while (!list_empty(&fs_info->reclaim_bgs)) {
1656 bg = list_first_entry(&fs_info->reclaim_bgs,
1657 struct btrfs_block_group,
1659 list_del_init(&bg->bg_list);
1661 space_info = bg->space_info;
1662 spin_unlock(&fs_info->unused_bgs_lock);
1664 /* Don't race with allocators so take the groups_sem */
1665 down_write(&space_info->groups_sem);
1667 spin_lock(&bg->lock);
1668 if (bg->reserved || bg->pinned || bg->ro) {
1670 * We want to bail if we made new allocations or have
1671 * outstanding allocations in this block group. We do
1672 * the ro check in case balance is currently acting on
1675 spin_unlock(&bg->lock);
1676 up_write(&space_info->groups_sem);
1679 spin_unlock(&bg->lock);
1682 * Get out fast, in case we're read-only or unmounting the
1683 * filesystem. It is OK to drop block groups from the list even
1684 * for the read-only case. As we did sb_start_write(),
1685 * "mount -o remount,ro" won't happen and read-only filesystem
1686 * means it is forced read-only due to a fatal error. So, it
1687 * never gets back to read-write to let us reclaim again.
1689 if (btrfs_need_cleaner_sleep(fs_info)) {
1690 up_write(&space_info->groups_sem);
1695 * Cache the zone_unusable value before turning the block group
1696 * to read only. As soon as the blog group is read only it's
1697 * zone_unusable value gets moved to the block group's read-only
1698 * bytes and isn't available for calculations anymore.
1700 zone_unusable = bg->zone_unusable;
1701 ret = inc_block_group_ro(bg, 0);
1702 up_write(&space_info->groups_sem);
1707 "reclaiming chunk %llu with %llu%% used %llu%% unusable",
1709 div64_u64(bg->used * 100, bg->length),
1710 div64_u64(zone_unusable * 100, bg->length));
1711 trace_btrfs_reclaim_block_group(bg);
1712 ret = btrfs_relocate_chunk(fs_info, bg->start);
1714 btrfs_dec_block_group_ro(bg);
1715 btrfs_err(fs_info, "error relocating chunk %llu",
1721 btrfs_mark_bg_to_reclaim(bg);
1722 btrfs_put_block_group(bg);
1724 mutex_unlock(&fs_info->reclaim_bgs_lock);
1726 * Reclaiming all the block groups in the list can take really
1727 * long. Prioritize cleaning up unused block groups.
1729 btrfs_delete_unused_bgs(fs_info);
1731 * If we are interrupted by a balance, we can just bail out. The
1732 * cleaner thread restart again if necessary.
1734 if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1736 spin_lock(&fs_info->unused_bgs_lock);
1738 spin_unlock(&fs_info->unused_bgs_lock);
1739 mutex_unlock(&fs_info->reclaim_bgs_lock);
1741 btrfs_exclop_finish(fs_info);
1742 sb_end_write(fs_info->sb);
1745 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
1747 spin_lock(&fs_info->unused_bgs_lock);
1748 if (!list_empty(&fs_info->reclaim_bgs))
1749 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
1750 spin_unlock(&fs_info->unused_bgs_lock);
1753 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
1755 struct btrfs_fs_info *fs_info = bg->fs_info;
1757 spin_lock(&fs_info->unused_bgs_lock);
1758 if (list_empty(&bg->bg_list)) {
1759 btrfs_get_block_group(bg);
1760 trace_btrfs_add_reclaim_block_group(bg);
1761 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
1763 spin_unlock(&fs_info->unused_bgs_lock);
1766 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1767 struct btrfs_path *path)
1769 struct extent_map_tree *em_tree;
1770 struct extent_map *em;
1771 struct btrfs_block_group_item bg;
1772 struct extent_buffer *leaf;
1777 slot = path->slots[0];
1778 leaf = path->nodes[0];
1780 em_tree = &fs_info->mapping_tree;
1781 read_lock(&em_tree->lock);
1782 em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
1783 read_unlock(&em_tree->lock);
1786 "logical %llu len %llu found bg but no related chunk",
1787 key->objectid, key->offset);
1791 if (em->start != key->objectid || em->len != key->offset) {
1793 "block group %llu len %llu mismatch with chunk %llu len %llu",
1794 key->objectid, key->offset, em->start, em->len);
1799 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
1801 flags = btrfs_stack_block_group_flags(&bg) &
1802 BTRFS_BLOCK_GROUP_TYPE_MASK;
1804 if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1806 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1807 key->objectid, key->offset, flags,
1808 (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
1813 free_extent_map(em);
1817 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1818 struct btrfs_path *path,
1819 struct btrfs_key *key)
1821 struct btrfs_root *root = btrfs_block_group_root(fs_info);
1823 struct btrfs_key found_key;
1825 btrfs_for_each_slot(root, key, &found_key, path, ret) {
1826 if (found_key.objectid >= key->objectid &&
1827 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1828 return read_bg_from_eb(fs_info, &found_key, path);
1834 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1836 u64 extra_flags = chunk_to_extended(flags) &
1837 BTRFS_EXTENDED_PROFILE_MASK;
1839 write_seqlock(&fs_info->profiles_lock);
1840 if (flags & BTRFS_BLOCK_GROUP_DATA)
1841 fs_info->avail_data_alloc_bits |= extra_flags;
1842 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1843 fs_info->avail_metadata_alloc_bits |= extra_flags;
1844 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1845 fs_info->avail_system_alloc_bits |= extra_flags;
1846 write_sequnlock(&fs_info->profiles_lock);
1850 * Map a physical disk address to a list of logical addresses
1852 * @fs_info: the filesystem
1853 * @chunk_start: logical address of block group
1854 * @bdev: physical device to resolve, can be NULL to indicate any device
1855 * @physical: physical address to map to logical addresses
1856 * @logical: return array of logical addresses which map to @physical
1857 * @naddrs: length of @logical
1858 * @stripe_len: size of IO stripe for the given block group
1860 * Maps a particular @physical disk address to a list of @logical addresses.
1861 * Used primarily to exclude those portions of a block group that contain super
1864 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
1865 struct block_device *bdev, u64 physical, u64 **logical,
1866 int *naddrs, int *stripe_len)
1868 struct extent_map *em;
1869 struct map_lookup *map;
1872 u64 data_stripe_length;
1877 em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
1881 map = em->map_lookup;
1882 data_stripe_length = em->orig_block_len;
1883 io_stripe_size = map->stripe_len;
1884 chunk_start = em->start;
1886 /* For RAID5/6 adjust to a full IO stripe length */
1887 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
1888 io_stripe_size = map->stripe_len * nr_data_stripes(map);
1890 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
1896 for (i = 0; i < map->num_stripes; i++) {
1897 bool already_inserted = false;
1902 if (!in_range(physical, map->stripes[i].physical,
1903 data_stripe_length))
1906 if (bdev && map->stripes[i].dev->bdev != bdev)
1909 stripe_nr = physical - map->stripes[i].physical;
1910 stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
1912 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
1913 BTRFS_BLOCK_GROUP_RAID10)) {
1914 stripe_nr = stripe_nr * map->num_stripes + i;
1915 stripe_nr = div_u64(stripe_nr, map->sub_stripes);
1918 * The remaining case would be for RAID56, multiply by
1919 * nr_data_stripes(). Alternatively, just use rmap_len below
1920 * instead of map->stripe_len
1923 bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
1925 /* Ensure we don't add duplicate addresses */
1926 for (j = 0; j < nr; j++) {
1927 if (buf[j] == bytenr) {
1928 already_inserted = true;
1933 if (!already_inserted)
1939 *stripe_len = io_stripe_size;
1941 free_extent_map(em);
1945 static int exclude_super_stripes(struct btrfs_block_group *cache)
1947 struct btrfs_fs_info *fs_info = cache->fs_info;
1948 const bool zoned = btrfs_is_zoned(fs_info);
1954 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1955 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
1956 cache->bytes_super += stripe_len;
1957 ret = btrfs_add_excluded_extent(fs_info, cache->start,
1963 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1964 bytenr = btrfs_sb_offset(i);
1965 ret = btrfs_rmap_block(fs_info, cache->start, NULL,
1966 bytenr, &logical, &nr, &stripe_len);
1970 /* Shouldn't have super stripes in sequential zones */
1974 "zoned: block group %llu must not contain super block",
1980 u64 len = min_t(u64, stripe_len,
1981 cache->start + cache->length - logical[nr]);
1983 cache->bytes_super += len;
1984 ret = btrfs_add_excluded_extent(fs_info, logical[nr],
1997 static struct btrfs_block_group *btrfs_create_block_group_cache(
1998 struct btrfs_fs_info *fs_info, u64 start)
2000 struct btrfs_block_group *cache;
2002 cache = kzalloc(sizeof(*cache), GFP_NOFS);
2006 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
2008 if (!cache->free_space_ctl) {
2013 cache->start = start;
2015 cache->fs_info = fs_info;
2016 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
2018 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
2020 refcount_set(&cache->refs, 1);
2021 spin_lock_init(&cache->lock);
2022 init_rwsem(&cache->data_rwsem);
2023 INIT_LIST_HEAD(&cache->list);
2024 INIT_LIST_HEAD(&cache->cluster_list);
2025 INIT_LIST_HEAD(&cache->bg_list);
2026 INIT_LIST_HEAD(&cache->ro_list);
2027 INIT_LIST_HEAD(&cache->discard_list);
2028 INIT_LIST_HEAD(&cache->dirty_list);
2029 INIT_LIST_HEAD(&cache->io_list);
2030 INIT_LIST_HEAD(&cache->active_bg_list);
2031 btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
2032 atomic_set(&cache->frozen, 0);
2033 mutex_init(&cache->free_space_lock);
2034 cache->full_stripe_locks_root.root = RB_ROOT;
2035 mutex_init(&cache->full_stripe_locks_root.lock);
2041 * Iterate all chunks and verify that each of them has the corresponding block
2044 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
2046 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2047 struct extent_map *em;
2048 struct btrfs_block_group *bg;
2053 read_lock(&map_tree->lock);
2055 * lookup_extent_mapping will return the first extent map
2056 * intersecting the range, so setting @len to 1 is enough to
2057 * get the first chunk.
2059 em = lookup_extent_mapping(map_tree, start, 1);
2060 read_unlock(&map_tree->lock);
2064 bg = btrfs_lookup_block_group(fs_info, em->start);
2067 "chunk start=%llu len=%llu doesn't have corresponding block group",
2068 em->start, em->len);
2070 free_extent_map(em);
2073 if (bg->start != em->start || bg->length != em->len ||
2074 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2075 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2077 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
2079 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
2080 bg->start, bg->length,
2081 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2083 free_extent_map(em);
2084 btrfs_put_block_group(bg);
2087 start = em->start + em->len;
2088 free_extent_map(em);
2089 btrfs_put_block_group(bg);
2094 static int read_one_block_group(struct btrfs_fs_info *info,
2095 struct btrfs_block_group_item *bgi,
2096 const struct btrfs_key *key,
2099 struct btrfs_block_group *cache;
2100 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
2103 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
2105 cache = btrfs_create_block_group_cache(info, key->objectid);
2109 cache->length = key->offset;
2110 cache->used = btrfs_stack_block_group_used(bgi);
2111 cache->flags = btrfs_stack_block_group_flags(bgi);
2112 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2114 set_free_space_tree_thresholds(cache);
2118 * When we mount with old space cache, we need to
2119 * set BTRFS_DC_CLEAR and set dirty flag.
2121 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
2122 * truncate the old free space cache inode and
2124 * b) Setting 'dirty flag' makes sure that we flush
2125 * the new space cache info onto disk.
2127 if (btrfs_test_opt(info, SPACE_CACHE))
2128 cache->disk_cache_state = BTRFS_DC_CLEAR;
2130 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2131 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2133 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
2139 ret = btrfs_load_block_group_zone_info(cache, false);
2141 btrfs_err(info, "zoned: failed to load zone info of bg %llu",
2147 * We need to exclude the super stripes now so that the space info has
2148 * super bytes accounted for, otherwise we'll think we have more space
2149 * than we actually do.
2151 ret = exclude_super_stripes(cache);
2153 /* We may have excluded something, so call this just in case. */
2154 btrfs_free_excluded_extents(cache);
2159 * For zoned filesystem, space after the allocation offset is the only
2160 * free space for a block group. So, we don't need any caching work.
2161 * btrfs_calc_zone_unusable() will set the amount of free space and
2162 * zone_unusable space.
2164 * For regular filesystem, check for two cases, either we are full, and
2165 * therefore don't need to bother with the caching work since we won't
2166 * find any space, or we are empty, and we can just add all the space
2167 * in and be done with it. This saves us _a_lot_ of time, particularly
2170 if (btrfs_is_zoned(info)) {
2171 btrfs_calc_zone_unusable(cache);
2172 /* Should not have any excluded extents. Just in case, though. */
2173 btrfs_free_excluded_extents(cache);
2174 } else if (cache->length == cache->used) {
2175 cache->cached = BTRFS_CACHE_FINISHED;
2176 btrfs_free_excluded_extents(cache);
2177 } else if (cache->used == 0) {
2178 cache->cached = BTRFS_CACHE_FINISHED;
2179 ret = add_new_free_space(cache, cache->start,
2180 cache->start + cache->length, NULL);
2181 btrfs_free_excluded_extents(cache);
2186 ret = btrfs_add_block_group_cache(info, cache);
2188 btrfs_remove_free_space_cache(cache);
2191 trace_btrfs_add_block_group(info, cache, 0);
2192 btrfs_add_bg_to_space_info(info, cache);
2194 set_avail_alloc_bits(info, cache->flags);
2195 if (btrfs_chunk_writeable(info, cache->start)) {
2196 if (cache->used == 0) {
2197 ASSERT(list_empty(&cache->bg_list));
2198 if (btrfs_test_opt(info, DISCARD_ASYNC))
2199 btrfs_discard_queue_work(&info->discard_ctl, cache);
2201 btrfs_mark_bg_unused(cache);
2204 inc_block_group_ro(cache, 1);
2209 btrfs_put_block_group(cache);
2213 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
2215 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
2216 struct rb_node *node;
2219 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
2220 struct extent_map *em;
2221 struct map_lookup *map;
2222 struct btrfs_block_group *bg;
2224 em = rb_entry(node, struct extent_map, rb_node);
2225 map = em->map_lookup;
2226 bg = btrfs_create_block_group_cache(fs_info, em->start);
2232 /* Fill dummy cache as FULL */
2233 bg->length = em->len;
2234 bg->flags = map->type;
2235 bg->cached = BTRFS_CACHE_FINISHED;
2237 bg->flags = map->type;
2238 ret = btrfs_add_block_group_cache(fs_info, bg);
2240 * We may have some valid block group cache added already, in
2241 * that case we skip to the next one.
2243 if (ret == -EEXIST) {
2245 btrfs_put_block_group(bg);
2250 btrfs_remove_free_space_cache(bg);
2251 btrfs_put_block_group(bg);
2255 btrfs_add_bg_to_space_info(fs_info, bg);
2257 set_avail_alloc_bits(fs_info, bg->flags);
2260 btrfs_init_global_block_rsv(fs_info);
2264 int btrfs_read_block_groups(struct btrfs_fs_info *info)
2266 struct btrfs_root *root = btrfs_block_group_root(info);
2267 struct btrfs_path *path;
2269 struct btrfs_block_group *cache;
2270 struct btrfs_space_info *space_info;
2271 struct btrfs_key key;
2276 * Either no extent root (with ibadroots rescue option) or we have
2277 * unsupported RO options. The fs can never be mounted read-write, so no
2278 * need to waste time searching block group items.
2280 * This also allows new extent tree related changes to be RO compat,
2281 * no need for a full incompat flag.
2283 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
2284 ~BTRFS_FEATURE_COMPAT_RO_SUPP))
2285 return fill_dummy_bgs(info);
2289 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2290 path = btrfs_alloc_path();
2294 cache_gen = btrfs_super_cache_generation(info->super_copy);
2295 if (btrfs_test_opt(info, SPACE_CACHE) &&
2296 btrfs_super_generation(info->super_copy) != cache_gen)
2298 if (btrfs_test_opt(info, CLEAR_CACHE))
2302 struct btrfs_block_group_item bgi;
2303 struct extent_buffer *leaf;
2306 ret = find_first_block_group(info, path, &key);
2312 leaf = path->nodes[0];
2313 slot = path->slots[0];
2315 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
2318 btrfs_item_key_to_cpu(leaf, &key, slot);
2319 btrfs_release_path(path);
2320 ret = read_one_block_group(info, &bgi, &key, need_clear);
2323 key.objectid += key.offset;
2326 btrfs_release_path(path);
2328 list_for_each_entry(space_info, &info->space_info, list) {
2331 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2332 if (list_empty(&space_info->block_groups[i]))
2334 cache = list_first_entry(&space_info->block_groups[i],
2335 struct btrfs_block_group,
2337 btrfs_sysfs_add_block_group_type(cache);
2340 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
2341 (BTRFS_BLOCK_GROUP_RAID10 |
2342 BTRFS_BLOCK_GROUP_RAID1_MASK |
2343 BTRFS_BLOCK_GROUP_RAID56_MASK |
2344 BTRFS_BLOCK_GROUP_DUP)))
2347 * Avoid allocating from un-mirrored block group if there are
2348 * mirrored block groups.
2350 list_for_each_entry(cache,
2351 &space_info->block_groups[BTRFS_RAID_RAID0],
2353 inc_block_group_ro(cache, 1);
2354 list_for_each_entry(cache,
2355 &space_info->block_groups[BTRFS_RAID_SINGLE],
2357 inc_block_group_ro(cache, 1);
2360 btrfs_init_global_block_rsv(info);
2361 ret = check_chunk_block_group_mappings(info);
2363 btrfs_free_path(path);
2365 * We've hit some error while reading the extent tree, and have
2366 * rescue=ibadroots mount option.
2367 * Try to fill the tree using dummy block groups so that the user can
2368 * continue to mount and grab their data.
2370 if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
2371 ret = fill_dummy_bgs(info);
2376 * This function, insert_block_group_item(), belongs to the phase 2 of chunk
2379 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2382 static int insert_block_group_item(struct btrfs_trans_handle *trans,
2383 struct btrfs_block_group *block_group)
2385 struct btrfs_fs_info *fs_info = trans->fs_info;
2386 struct btrfs_block_group_item bgi;
2387 struct btrfs_root *root = btrfs_block_group_root(fs_info);
2388 struct btrfs_key key;
2390 spin_lock(&block_group->lock);
2391 btrfs_set_stack_block_group_used(&bgi, block_group->used);
2392 btrfs_set_stack_block_group_chunk_objectid(&bgi,
2393 block_group->global_root_id);
2394 btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2395 key.objectid = block_group->start;
2396 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2397 key.offset = block_group->length;
2398 spin_unlock(&block_group->lock);
2400 return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
2403 static int insert_dev_extent(struct btrfs_trans_handle *trans,
2404 struct btrfs_device *device, u64 chunk_offset,
2405 u64 start, u64 num_bytes)
2407 struct btrfs_fs_info *fs_info = device->fs_info;
2408 struct btrfs_root *root = fs_info->dev_root;
2409 struct btrfs_path *path;
2410 struct btrfs_dev_extent *extent;
2411 struct extent_buffer *leaf;
2412 struct btrfs_key key;
2415 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
2416 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
2417 path = btrfs_alloc_path();
2421 key.objectid = device->devid;
2422 key.type = BTRFS_DEV_EXTENT_KEY;
2424 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2428 leaf = path->nodes[0];
2429 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
2430 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
2431 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
2432 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2433 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
2435 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
2436 btrfs_mark_buffer_dirty(leaf);
2438 btrfs_free_path(path);
2443 * This function belongs to phase 2.
2445 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2448 static int insert_dev_extents(struct btrfs_trans_handle *trans,
2449 u64 chunk_offset, u64 chunk_size)
2451 struct btrfs_fs_info *fs_info = trans->fs_info;
2452 struct btrfs_device *device;
2453 struct extent_map *em;
2454 struct map_lookup *map;
2460 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
2464 map = em->map_lookup;
2465 stripe_size = em->orig_block_len;
2468 * Take the device list mutex to prevent races with the final phase of
2469 * a device replace operation that replaces the device object associated
2470 * with the map's stripes, because the device object's id can change
2471 * at any time during that final phase of the device replace operation
2472 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
2473 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
2474 * resulting in persisting a device extent item with such ID.
2476 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2477 for (i = 0; i < map->num_stripes; i++) {
2478 device = map->stripes[i].dev;
2479 dev_offset = map->stripes[i].physical;
2481 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
2486 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2488 free_extent_map(em);
2493 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
2496 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2499 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2501 struct btrfs_fs_info *fs_info = trans->fs_info;
2502 struct btrfs_block_group *block_group;
2505 while (!list_empty(&trans->new_bgs)) {
2508 block_group = list_first_entry(&trans->new_bgs,
2509 struct btrfs_block_group,
2514 index = btrfs_bg_flags_to_raid_index(block_group->flags);
2516 ret = insert_block_group_item(trans, block_group);
2518 btrfs_abort_transaction(trans, ret);
2519 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
2520 &block_group->runtime_flags)) {
2521 mutex_lock(&fs_info->chunk_mutex);
2522 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
2523 mutex_unlock(&fs_info->chunk_mutex);
2525 btrfs_abort_transaction(trans, ret);
2527 ret = insert_dev_extents(trans, block_group->start,
2528 block_group->length);
2530 btrfs_abort_transaction(trans, ret);
2531 add_block_group_free_space(trans, block_group);
2534 * If we restriped during balance, we may have added a new raid
2535 * type, so now add the sysfs entries when it is safe to do so.
2536 * We don't have to worry about locking here as it's handled in
2537 * btrfs_sysfs_add_block_group_type.
2539 if (block_group->space_info->block_group_kobjs[index] == NULL)
2540 btrfs_sysfs_add_block_group_type(block_group);
2542 /* Already aborted the transaction if it failed. */
2544 btrfs_delayed_refs_rsv_release(fs_info, 1);
2545 list_del_init(&block_group->bg_list);
2546 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
2548 btrfs_trans_release_chunk_metadata(trans);
2552 * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2553 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
2555 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
2560 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2561 return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2563 /* If we have a smaller fs index based on 128MiB. */
2564 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
2567 offset = div64_u64(offset, div);
2568 div64_u64_rem(offset, fs_info->nr_global_roots, &index);
2572 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
2573 u64 bytes_used, u64 type,
2574 u64 chunk_offset, u64 size)
2576 struct btrfs_fs_info *fs_info = trans->fs_info;
2577 struct btrfs_block_group *cache;
2580 btrfs_set_log_full_commit(trans);
2582 cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2584 return ERR_PTR(-ENOMEM);
2587 * Mark it as new before adding it to the rbtree of block groups or any
2588 * list, so that no other task finds it and calls btrfs_mark_bg_unused()
2589 * before the new flag is set.
2591 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
2593 cache->length = size;
2594 set_free_space_tree_thresholds(cache);
2595 cache->used = bytes_used;
2596 cache->flags = type;
2597 cache->cached = BTRFS_CACHE_FINISHED;
2598 cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2600 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
2601 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2603 ret = btrfs_load_block_group_zone_info(cache, true);
2605 btrfs_put_block_group(cache);
2606 return ERR_PTR(ret);
2609 ret = exclude_super_stripes(cache);
2611 /* We may have excluded something, so call this just in case */
2612 btrfs_free_excluded_extents(cache);
2613 btrfs_put_block_group(cache);
2614 return ERR_PTR(ret);
2617 ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
2618 btrfs_free_excluded_extents(cache);
2620 btrfs_put_block_group(cache);
2621 return ERR_PTR(ret);
2625 * Ensure the corresponding space_info object is created and
2626 * assigned to our block group. We want our bg to be added to the rbtree
2627 * with its ->space_info set.
2629 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2630 ASSERT(cache->space_info);
2632 ret = btrfs_add_block_group_cache(fs_info, cache);
2634 btrfs_remove_free_space_cache(cache);
2635 btrfs_put_block_group(cache);
2636 return ERR_PTR(ret);
2640 * Now that our block group has its ->space_info set and is inserted in
2641 * the rbtree, update the space info's counters.
2643 trace_btrfs_add_block_group(fs_info, cache, 1);
2644 btrfs_add_bg_to_space_info(fs_info, cache);
2645 btrfs_update_global_block_rsv(fs_info);
2647 #ifdef CONFIG_BTRFS_DEBUG
2648 if (btrfs_should_fragment_free_space(cache)) {
2649 u64 new_bytes_used = size - bytes_used;
2651 cache->space_info->bytes_used += new_bytes_used >> 1;
2652 fragment_free_space(cache);
2656 list_add_tail(&cache->bg_list, &trans->new_bgs);
2657 trans->delayed_ref_updates++;
2658 btrfs_update_delayed_refs_rsv(trans);
2660 set_avail_alloc_bits(fs_info, type);
2665 * Mark one block group RO, can be called several times for the same block
2668 * @cache: the destination block group
2669 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2670 * ensure we still have some free space after marking this
2673 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2674 bool do_chunk_alloc)
2676 struct btrfs_fs_info *fs_info = cache->fs_info;
2677 struct btrfs_trans_handle *trans;
2678 struct btrfs_root *root = btrfs_block_group_root(fs_info);
2681 bool dirty_bg_running;
2684 * This can only happen when we are doing read-only scrub on read-only
2686 * In that case we should not start a new transaction on read-only fs.
2687 * Thus here we skip all chunk allocations.
2689 if (sb_rdonly(fs_info->sb)) {
2690 mutex_lock(&fs_info->ro_block_group_mutex);
2691 ret = inc_block_group_ro(cache, 0);
2692 mutex_unlock(&fs_info->ro_block_group_mutex);
2697 trans = btrfs_join_transaction(root);
2699 return PTR_ERR(trans);
2701 dirty_bg_running = false;
2704 * We're not allowed to set block groups readonly after the dirty
2705 * block group cache has started writing. If it already started,
2706 * back off and let this transaction commit.
2708 mutex_lock(&fs_info->ro_block_group_mutex);
2709 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2710 u64 transid = trans->transid;
2712 mutex_unlock(&fs_info->ro_block_group_mutex);
2713 btrfs_end_transaction(trans);
2715 ret = btrfs_wait_for_commit(fs_info, transid);
2718 dirty_bg_running = true;
2720 } while (dirty_bg_running);
2722 if (do_chunk_alloc) {
2724 * If we are changing raid levels, try to allocate a
2725 * corresponding block group with the new raid level.
2727 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2728 if (alloc_flags != cache->flags) {
2729 ret = btrfs_chunk_alloc(trans, alloc_flags,
2732 * ENOSPC is allowed here, we may have enough space
2733 * already allocated at the new raid level to carry on
2742 ret = inc_block_group_ro(cache, 0);
2745 if (ret == -ETXTBSY)
2749 * Skip chunk alloction if the bg is SYSTEM, this is to avoid system
2750 * chunk allocation storm to exhaust the system chunk array. Otherwise
2751 * we still want to try our best to mark the block group read-only.
2753 if (!do_chunk_alloc && ret == -ENOSPC &&
2754 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
2757 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2758 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2762 * We have allocated a new chunk. We also need to activate that chunk to
2763 * grant metadata tickets for zoned filesystem.
2765 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
2769 ret = inc_block_group_ro(cache, 0);
2770 if (ret == -ETXTBSY)
2773 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2774 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2775 mutex_lock(&fs_info->chunk_mutex);
2776 check_system_chunk(trans, alloc_flags);
2777 mutex_unlock(&fs_info->chunk_mutex);
2780 mutex_unlock(&fs_info->ro_block_group_mutex);
2782 btrfs_end_transaction(trans);
2786 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2788 struct btrfs_space_info *sinfo = cache->space_info;
2793 spin_lock(&sinfo->lock);
2794 spin_lock(&cache->lock);
2796 if (btrfs_is_zoned(cache->fs_info)) {
2797 /* Migrate zone_unusable bytes back */
2798 cache->zone_unusable =
2799 (cache->alloc_offset - cache->used) +
2800 (cache->length - cache->zone_capacity);
2801 sinfo->bytes_zone_unusable += cache->zone_unusable;
2802 sinfo->bytes_readonly -= cache->zone_unusable;
2804 num_bytes = cache->length - cache->reserved -
2805 cache->pinned - cache->bytes_super -
2806 cache->zone_unusable - cache->used;
2807 sinfo->bytes_readonly -= num_bytes;
2808 list_del_init(&cache->ro_list);
2810 spin_unlock(&cache->lock);
2811 spin_unlock(&sinfo->lock);
2814 static int update_block_group_item(struct btrfs_trans_handle *trans,
2815 struct btrfs_path *path,
2816 struct btrfs_block_group *cache)
2818 struct btrfs_fs_info *fs_info = trans->fs_info;
2820 struct btrfs_root *root = btrfs_block_group_root(fs_info);
2822 struct extent_buffer *leaf;
2823 struct btrfs_block_group_item bgi;
2824 struct btrfs_key key;
2826 key.objectid = cache->start;
2827 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2828 key.offset = cache->length;
2830 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2837 leaf = path->nodes[0];
2838 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2839 btrfs_set_stack_block_group_used(&bgi, cache->used);
2840 btrfs_set_stack_block_group_chunk_objectid(&bgi,
2841 cache->global_root_id);
2842 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
2843 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
2844 btrfs_mark_buffer_dirty(leaf);
2846 btrfs_release_path(path);
2851 static int cache_save_setup(struct btrfs_block_group *block_group,
2852 struct btrfs_trans_handle *trans,
2853 struct btrfs_path *path)
2855 struct btrfs_fs_info *fs_info = block_group->fs_info;
2856 struct btrfs_root *root = fs_info->tree_root;
2857 struct inode *inode = NULL;
2858 struct extent_changeset *data_reserved = NULL;
2860 int dcs = BTRFS_DC_ERROR;
2865 if (!btrfs_test_opt(fs_info, SPACE_CACHE))
2869 * If this block group is smaller than 100 megs don't bother caching the
2872 if (block_group->length < (100 * SZ_1M)) {
2873 spin_lock(&block_group->lock);
2874 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2875 spin_unlock(&block_group->lock);
2879 if (TRANS_ABORTED(trans))
2882 inode = lookup_free_space_inode(block_group, path);
2883 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2884 ret = PTR_ERR(inode);
2885 btrfs_release_path(path);
2889 if (IS_ERR(inode)) {
2893 if (block_group->ro)
2896 ret = create_free_space_inode(trans, block_group, path);
2903 * We want to set the generation to 0, that way if anything goes wrong
2904 * from here on out we know not to trust this cache when we load up next
2907 BTRFS_I(inode)->generation = 0;
2908 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2911 * So theoretically we could recover from this, simply set the
2912 * super cache generation to 0 so we know to invalidate the
2913 * cache, but then we'd have to keep track of the block groups
2914 * that fail this way so we know we _have_ to reset this cache
2915 * before the next commit or risk reading stale cache. So to
2916 * limit our exposure to horrible edge cases lets just abort the
2917 * transaction, this only happens in really bad situations
2920 btrfs_abort_transaction(trans, ret);
2925 /* We've already setup this transaction, go ahead and exit */
2926 if (block_group->cache_generation == trans->transid &&
2927 i_size_read(inode)) {
2928 dcs = BTRFS_DC_SETUP;
2932 if (i_size_read(inode) > 0) {
2933 ret = btrfs_check_trunc_cache_free_space(fs_info,
2934 &fs_info->global_block_rsv);
2938 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2943 spin_lock(&block_group->lock);
2944 if (block_group->cached != BTRFS_CACHE_FINISHED ||
2945 !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2947 * don't bother trying to write stuff out _if_
2948 * a) we're not cached,
2949 * b) we're with nospace_cache mount option,
2950 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2952 dcs = BTRFS_DC_WRITTEN;
2953 spin_unlock(&block_group->lock);
2956 spin_unlock(&block_group->lock);
2959 * We hit an ENOSPC when setting up the cache in this transaction, just
2960 * skip doing the setup, we've already cleared the cache so we're safe.
2962 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2968 * Try to preallocate enough space based on how big the block group is.
2969 * Keep in mind this has to include any pinned space which could end up
2970 * taking up quite a bit since it's not folded into the other space
2973 cache_size = div_u64(block_group->length, SZ_256M);
2978 cache_size *= fs_info->sectorsize;
2980 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
2985 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
2986 cache_size, cache_size,
2989 * Our cache requires contiguous chunks so that we don't modify a bunch
2990 * of metadata or split extents when writing the cache out, which means
2991 * we can enospc if we are heavily fragmented in addition to just normal
2992 * out of space conditions. So if we hit this just skip setting up any
2993 * other block groups for this transaction, maybe we'll unpin enough
2994 * space the next time around.
2997 dcs = BTRFS_DC_SETUP;
2998 else if (ret == -ENOSPC)
2999 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3004 btrfs_release_path(path);
3006 spin_lock(&block_group->lock);
3007 if (!ret && dcs == BTRFS_DC_SETUP)
3008 block_group->cache_generation = trans->transid;
3009 block_group->disk_cache_state = dcs;
3010 spin_unlock(&block_group->lock);
3012 extent_changeset_free(data_reserved);
3016 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
3018 struct btrfs_fs_info *fs_info = trans->fs_info;
3019 struct btrfs_block_group *cache, *tmp;
3020 struct btrfs_transaction *cur_trans = trans->transaction;
3021 struct btrfs_path *path;
3023 if (list_empty(&cur_trans->dirty_bgs) ||
3024 !btrfs_test_opt(fs_info, SPACE_CACHE))
3027 path = btrfs_alloc_path();
3031 /* Could add new block groups, use _safe just in case */
3032 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3034 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3035 cache_save_setup(cache, trans, path);
3038 btrfs_free_path(path);
3043 * Transaction commit does final block group cache writeback during a critical
3044 * section where nothing is allowed to change the FS. This is required in
3045 * order for the cache to actually match the block group, but can introduce a
3046 * lot of latency into the commit.
3048 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3049 * There's a chance we'll have to redo some of it if the block group changes
3050 * again during the commit, but it greatly reduces the commit latency by
3051 * getting rid of the easy block groups while we're still allowing others to
3054 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3056 struct btrfs_fs_info *fs_info = trans->fs_info;
3057 struct btrfs_block_group *cache;
3058 struct btrfs_transaction *cur_trans = trans->transaction;
3061 struct btrfs_path *path = NULL;
3063 struct list_head *io = &cur_trans->io_bgs;
3066 spin_lock(&cur_trans->dirty_bgs_lock);
3067 if (list_empty(&cur_trans->dirty_bgs)) {
3068 spin_unlock(&cur_trans->dirty_bgs_lock);
3071 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3072 spin_unlock(&cur_trans->dirty_bgs_lock);
3075 /* Make sure all the block groups on our dirty list actually exist */
3076 btrfs_create_pending_block_groups(trans);
3079 path = btrfs_alloc_path();
3087 * cache_write_mutex is here only to save us from balance or automatic
3088 * removal of empty block groups deleting this block group while we are
3089 * writing out the cache
3091 mutex_lock(&trans->transaction->cache_write_mutex);
3092 while (!list_empty(&dirty)) {
3093 bool drop_reserve = true;
3095 cache = list_first_entry(&dirty, struct btrfs_block_group,
3098 * This can happen if something re-dirties a block group that
3099 * is already under IO. Just wait for it to finish and then do
3102 if (!list_empty(&cache->io_list)) {
3103 list_del_init(&cache->io_list);
3104 btrfs_wait_cache_io(trans, cache, path);
3105 btrfs_put_block_group(cache);
3110 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3111 * it should update the cache_state. Don't delete until after
3114 * Since we're not running in the commit critical section
3115 * we need the dirty_bgs_lock to protect from update_block_group
3117 spin_lock(&cur_trans->dirty_bgs_lock);
3118 list_del_init(&cache->dirty_list);
3119 spin_unlock(&cur_trans->dirty_bgs_lock);
3123 cache_save_setup(cache, trans, path);
3125 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3126 cache->io_ctl.inode = NULL;
3127 ret = btrfs_write_out_cache(trans, cache, path);
3128 if (ret == 0 && cache->io_ctl.inode) {
3132 * The cache_write_mutex is protecting the
3133 * io_list, also refer to the definition of
3134 * btrfs_transaction::io_bgs for more details
3136 list_add_tail(&cache->io_list, io);
3139 * If we failed to write the cache, the
3140 * generation will be bad and life goes on
3146 ret = update_block_group_item(trans, path, cache);
3148 * Our block group might still be attached to the list
3149 * of new block groups in the transaction handle of some
3150 * other task (struct btrfs_trans_handle->new_bgs). This
3151 * means its block group item isn't yet in the extent
3152 * tree. If this happens ignore the error, as we will
3153 * try again later in the critical section of the
3154 * transaction commit.
3156 if (ret == -ENOENT) {
3158 spin_lock(&cur_trans->dirty_bgs_lock);
3159 if (list_empty(&cache->dirty_list)) {
3160 list_add_tail(&cache->dirty_list,
3161 &cur_trans->dirty_bgs);
3162 btrfs_get_block_group(cache);
3163 drop_reserve = false;
3165 spin_unlock(&cur_trans->dirty_bgs_lock);
3167 btrfs_abort_transaction(trans, ret);
3171 /* If it's not on the io list, we need to put the block group */
3173 btrfs_put_block_group(cache);
3175 btrfs_delayed_refs_rsv_release(fs_info, 1);
3177 * Avoid blocking other tasks for too long. It might even save
3178 * us from writing caches for block groups that are going to be
3181 mutex_unlock(&trans->transaction->cache_write_mutex);
3184 mutex_lock(&trans->transaction->cache_write_mutex);
3186 mutex_unlock(&trans->transaction->cache_write_mutex);
3189 * Go through delayed refs for all the stuff we've just kicked off
3190 * and then loop back (just once)
3193 ret = btrfs_run_delayed_refs(trans, 0);
3194 if (!ret && loops == 0) {
3196 spin_lock(&cur_trans->dirty_bgs_lock);
3197 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3199 * dirty_bgs_lock protects us from concurrent block group
3200 * deletes too (not just cache_write_mutex).
3202 if (!list_empty(&dirty)) {
3203 spin_unlock(&cur_trans->dirty_bgs_lock);
3206 spin_unlock(&cur_trans->dirty_bgs_lock);
3210 spin_lock(&cur_trans->dirty_bgs_lock);
3211 list_splice_init(&dirty, &cur_trans->dirty_bgs);
3212 spin_unlock(&cur_trans->dirty_bgs_lock);
3213 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3216 btrfs_free_path(path);
3220 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
3222 struct btrfs_fs_info *fs_info = trans->fs_info;
3223 struct btrfs_block_group *cache;
3224 struct btrfs_transaction *cur_trans = trans->transaction;
3227 struct btrfs_path *path;
3228 struct list_head *io = &cur_trans->io_bgs;
3230 path = btrfs_alloc_path();
3235 * Even though we are in the critical section of the transaction commit,
3236 * we can still have concurrent tasks adding elements to this
3237 * transaction's list of dirty block groups. These tasks correspond to
3238 * endio free space workers started when writeback finishes for a
3239 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3240 * allocate new block groups as a result of COWing nodes of the root
3241 * tree when updating the free space inode. The writeback for the space
3242 * caches is triggered by an earlier call to
3243 * btrfs_start_dirty_block_groups() and iterations of the following
3245 * Also we want to do the cache_save_setup first and then run the
3246 * delayed refs to make sure we have the best chance at doing this all
3249 spin_lock(&cur_trans->dirty_bgs_lock);
3250 while (!list_empty(&cur_trans->dirty_bgs)) {
3251 cache = list_first_entry(&cur_trans->dirty_bgs,
3252 struct btrfs_block_group,
3256 * This can happen if cache_save_setup re-dirties a block group
3257 * that is already under IO. Just wait for it to finish and
3258 * then do it all again
3260 if (!list_empty(&cache->io_list)) {
3261 spin_unlock(&cur_trans->dirty_bgs_lock);
3262 list_del_init(&cache->io_list);
3263 btrfs_wait_cache_io(trans, cache, path);
3264 btrfs_put_block_group(cache);
3265 spin_lock(&cur_trans->dirty_bgs_lock);
3269 * Don't remove from the dirty list until after we've waited on
3272 list_del_init(&cache->dirty_list);
3273 spin_unlock(&cur_trans->dirty_bgs_lock);
3276 cache_save_setup(cache, trans, path);
3279 ret = btrfs_run_delayed_refs(trans,
3280 (unsigned long) -1);
3282 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3283 cache->io_ctl.inode = NULL;
3284 ret = btrfs_write_out_cache(trans, cache, path);
3285 if (ret == 0 && cache->io_ctl.inode) {
3287 list_add_tail(&cache->io_list, io);
3290 * If we failed to write the cache, the
3291 * generation will be bad and life goes on
3297 ret = update_block_group_item(trans, path, cache);
3299 * One of the free space endio workers might have
3300 * created a new block group while updating a free space
3301 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3302 * and hasn't released its transaction handle yet, in
3303 * which case the new block group is still attached to
3304 * its transaction handle and its creation has not
3305 * finished yet (no block group item in the extent tree
3306 * yet, etc). If this is the case, wait for all free
3307 * space endio workers to finish and retry. This is a
3308 * very rare case so no need for a more efficient and
3311 if (ret == -ENOENT) {
3312 wait_event(cur_trans->writer_wait,
3313 atomic_read(&cur_trans->num_writers) == 1);
3314 ret = update_block_group_item(trans, path, cache);
3317 btrfs_abort_transaction(trans, ret);
3320 /* If its not on the io list, we need to put the block group */
3322 btrfs_put_block_group(cache);
3323 btrfs_delayed_refs_rsv_release(fs_info, 1);
3324 spin_lock(&cur_trans->dirty_bgs_lock);
3326 spin_unlock(&cur_trans->dirty_bgs_lock);
3329 * Refer to the definition of io_bgs member for details why it's safe
3330 * to use it without any locking
3332 while (!list_empty(io)) {
3333 cache = list_first_entry(io, struct btrfs_block_group,
3335 list_del_init(&cache->io_list);
3336 btrfs_wait_cache_io(trans, cache, path);
3337 btrfs_put_block_group(cache);
3340 btrfs_free_path(path);
3344 static inline bool should_reclaim_block_group(struct btrfs_block_group *bg,
3347 const struct btrfs_space_info *space_info = bg->space_info;
3348 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold);
3349 const u64 new_val = bg->used;
3350 const u64 old_val = new_val + bytes_freed;
3353 if (reclaim_thresh == 0)
3356 thresh = div_factor_fine(bg->length, reclaim_thresh);
3359 * If we were below the threshold before don't reclaim, we are likely a
3360 * brand new block group and we don't want to relocate new block groups.
3362 if (old_val < thresh)
3364 if (new_val >= thresh)
3369 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3370 u64 bytenr, u64 num_bytes, bool alloc)
3372 struct btrfs_fs_info *info = trans->fs_info;
3373 struct btrfs_block_group *cache = NULL;
3374 u64 total = num_bytes;
3380 /* Block accounting for super block */
3381 spin_lock(&info->delalloc_root_lock);
3382 old_val = btrfs_super_bytes_used(info->super_copy);
3384 old_val += num_bytes;
3386 old_val -= num_bytes;
3387 btrfs_set_super_bytes_used(info->super_copy, old_val);
3388 spin_unlock(&info->delalloc_root_lock);
3391 struct btrfs_space_info *space_info;
3392 bool reclaim = false;
3394 cache = btrfs_lookup_block_group(info, bytenr);
3399 space_info = cache->space_info;
3400 factor = btrfs_bg_type_to_factor(cache->flags);
3403 * If this block group has free space cache written out, we
3404 * need to make sure to load it if we are removing space. This
3405 * is because we need the unpinning stage to actually add the
3406 * space back to the block group, otherwise we will leak space.
3408 if (!alloc && !btrfs_block_group_done(cache))
3409 btrfs_cache_block_group(cache, true);
3411 byte_in_group = bytenr - cache->start;
3412 WARN_ON(byte_in_group > cache->length);
3414 spin_lock(&space_info->lock);
3415 spin_lock(&cache->lock);
3417 if (btrfs_test_opt(info, SPACE_CACHE) &&
3418 cache->disk_cache_state < BTRFS_DC_CLEAR)
3419 cache->disk_cache_state = BTRFS_DC_CLEAR;
3421 old_val = cache->used;
3422 num_bytes = min(total, cache->length - byte_in_group);
3424 old_val += num_bytes;
3425 cache->used = old_val;
3426 cache->reserved -= num_bytes;
3427 space_info->bytes_reserved -= num_bytes;
3428 space_info->bytes_used += num_bytes;
3429 space_info->disk_used += num_bytes * factor;
3430 spin_unlock(&cache->lock);
3431 spin_unlock(&space_info->lock);
3433 old_val -= num_bytes;
3434 cache->used = old_val;
3435 cache->pinned += num_bytes;
3436 btrfs_space_info_update_bytes_pinned(info, space_info,
3438 space_info->bytes_used -= num_bytes;
3439 space_info->disk_used -= num_bytes * factor;
3441 reclaim = should_reclaim_block_group(cache, num_bytes);
3442 spin_unlock(&cache->lock);
3443 spin_unlock(&space_info->lock);
3445 set_extent_dirty(&trans->transaction->pinned_extents,
3446 bytenr, bytenr + num_bytes - 1,
3447 GFP_NOFS | __GFP_NOFAIL);
3450 spin_lock(&trans->transaction->dirty_bgs_lock);
3451 if (list_empty(&cache->dirty_list)) {
3452 list_add_tail(&cache->dirty_list,
3453 &trans->transaction->dirty_bgs);
3454 trans->delayed_ref_updates++;
3455 btrfs_get_block_group(cache);
3457 spin_unlock(&trans->transaction->dirty_bgs_lock);
3460 * No longer have used bytes in this block group, queue it for
3461 * deletion. We do this after adding the block group to the
3462 * dirty list to avoid races between cleaner kthread and space
3465 if (!alloc && old_val == 0) {
3466 if (!btrfs_test_opt(info, DISCARD_ASYNC))
3467 btrfs_mark_bg_unused(cache);
3468 } else if (!alloc && reclaim) {
3469 btrfs_mark_bg_to_reclaim(cache);
3472 btrfs_put_block_group(cache);
3474 bytenr += num_bytes;
3477 /* Modified block groups are accounted for in the delayed_refs_rsv. */
3478 btrfs_update_delayed_refs_rsv(trans);
3483 * btrfs_add_reserved_bytes - update the block_group and space info counters
3484 * @cache: The cache we are manipulating
3485 * @ram_bytes: The number of bytes of file content, and will be same to
3486 * @num_bytes except for the compress path.
3487 * @num_bytes: The number of bytes in question
3488 * @delalloc: The blocks are allocated for the delalloc write
3490 * This is called by the allocator when it reserves space. If this is a
3491 * reservation and the block group has become read only we cannot make the
3492 * reservation and return -EAGAIN, otherwise this function always succeeds.
3494 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3495 u64 ram_bytes, u64 num_bytes, int delalloc)
3497 struct btrfs_space_info *space_info = cache->space_info;
3500 spin_lock(&space_info->lock);
3501 spin_lock(&cache->lock);
3505 cache->reserved += num_bytes;
3506 space_info->bytes_reserved += num_bytes;
3507 trace_btrfs_space_reservation(cache->fs_info, "space_info",
3508 space_info->flags, num_bytes, 1);
3509 btrfs_space_info_update_bytes_may_use(cache->fs_info,
3510 space_info, -ram_bytes);
3512 cache->delalloc_bytes += num_bytes;
3515 * Compression can use less space than we reserved, so wake
3516 * tickets if that happens
3518 if (num_bytes < ram_bytes)
3519 btrfs_try_granting_tickets(cache->fs_info, space_info);
3521 spin_unlock(&cache->lock);
3522 spin_unlock(&space_info->lock);
3527 * btrfs_free_reserved_bytes - update the block_group and space info counters
3528 * @cache: The cache we are manipulating
3529 * @num_bytes: The number of bytes in question
3530 * @delalloc: The blocks are allocated for the delalloc write
3532 * This is called by somebody who is freeing space that was never actually used
3533 * on disk. For example if you reserve some space for a new leaf in transaction
3534 * A and before transaction A commits you free that leaf, you call this with
3535 * reserve set to 0 in order to clear the reservation.
3537 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3538 u64 num_bytes, int delalloc)
3540 struct btrfs_space_info *space_info = cache->space_info;
3542 spin_lock(&space_info->lock);
3543 spin_lock(&cache->lock);
3545 space_info->bytes_readonly += num_bytes;
3546 cache->reserved -= num_bytes;
3547 space_info->bytes_reserved -= num_bytes;
3548 space_info->max_extent_size = 0;
3551 cache->delalloc_bytes -= num_bytes;
3552 spin_unlock(&cache->lock);
3554 btrfs_try_granting_tickets(cache->fs_info, space_info);
3555 spin_unlock(&space_info->lock);
3558 static void force_metadata_allocation(struct btrfs_fs_info *info)
3560 struct list_head *head = &info->space_info;
3561 struct btrfs_space_info *found;
3563 list_for_each_entry(found, head, list) {
3564 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3565 found->force_alloc = CHUNK_ALLOC_FORCE;
3569 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
3570 struct btrfs_space_info *sinfo, int force)
3572 u64 bytes_used = btrfs_space_info_used(sinfo, false);
3575 if (force == CHUNK_ALLOC_FORCE)
3579 * in limited mode, we want to have some free space up to
3580 * about 1% of the FS size.
3582 if (force == CHUNK_ALLOC_LIMITED) {
3583 thresh = btrfs_super_total_bytes(fs_info->super_copy);
3584 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
3586 if (sinfo->total_bytes - bytes_used < thresh)
3590 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
3595 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3597 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3599 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3602 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
3604 struct btrfs_block_group *bg;
3608 * Check if we have enough space in the system space info because we
3609 * will need to update device items in the chunk btree and insert a new
3610 * chunk item in the chunk btree as well. This will allocate a new
3611 * system block group if needed.
3613 check_system_chunk(trans, flags);
3615 bg = btrfs_create_chunk(trans, flags);
3621 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3623 * Normally we are not expected to fail with -ENOSPC here, since we have
3624 * previously reserved space in the system space_info and allocated one
3625 * new system chunk if necessary. However there are three exceptions:
3627 * 1) We may have enough free space in the system space_info but all the
3628 * existing system block groups have a profile which can not be used
3629 * for extent allocation.
3631 * This happens when mounting in degraded mode. For example we have a
3632 * RAID1 filesystem with 2 devices, lose one device and mount the fs
3633 * using the other device in degraded mode. If we then allocate a chunk,
3634 * we may have enough free space in the existing system space_info, but
3635 * none of the block groups can be used for extent allocation since they
3636 * have a RAID1 profile, and because we are in degraded mode with a
3637 * single device, we are forced to allocate a new system chunk with a
3638 * SINGLE profile. Making check_system_chunk() iterate over all system
3639 * block groups and check if they have a usable profile and enough space
3640 * can be slow on very large filesystems, so we tolerate the -ENOSPC and
3641 * try again after forcing allocation of a new system chunk. Like this
3642 * we avoid paying the cost of that search in normal circumstances, when
3643 * we were not mounted in degraded mode;
3645 * 2) We had enough free space info the system space_info, and one suitable
3646 * block group to allocate from when we called check_system_chunk()
3647 * above. However right after we called it, the only system block group
3648 * with enough free space got turned into RO mode by a running scrub,
3649 * and in this case we have to allocate a new one and retry. We only
3650 * need do this allocate and retry once, since we have a transaction
3651 * handle and scrub uses the commit root to search for block groups;
3653 * 3) We had one system block group with enough free space when we called
3654 * check_system_chunk(), but after that, right before we tried to
3655 * allocate the last extent buffer we needed, a discard operation came
3656 * in and it temporarily removed the last free space entry from the
3657 * block group (discard removes a free space entry, discards it, and
3658 * then adds back the entry to the block group cache).
3660 if (ret == -ENOSPC) {
3661 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
3662 struct btrfs_block_group *sys_bg;
3664 sys_bg = btrfs_create_chunk(trans, sys_flags);
3665 if (IS_ERR(sys_bg)) {
3666 ret = PTR_ERR(sys_bg);
3667 btrfs_abort_transaction(trans, ret);
3671 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3673 btrfs_abort_transaction(trans, ret);
3677 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3679 btrfs_abort_transaction(trans, ret);
3683 btrfs_abort_transaction(trans, ret);
3687 btrfs_trans_release_chunk_metadata(trans);
3690 return ERR_PTR(ret);
3692 btrfs_get_block_group(bg);
3697 * Chunk allocation is done in 2 phases:
3699 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3700 * the chunk, the chunk mapping, create its block group and add the items
3701 * that belong in the chunk btree to it - more specifically, we need to
3702 * update device items in the chunk btree and add a new chunk item to it.
3704 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3705 * group item to the extent btree and the device extent items to the devices
3708 * This is done to prevent deadlocks. For example when COWing a node from the
3709 * extent btree we are holding a write lock on the node's parent and if we
3710 * trigger chunk allocation and attempted to insert the new block group item
3711 * in the extent btree right way, we could deadlock because the path for the
3712 * insertion can include that parent node. At first glance it seems impossible
3713 * to trigger chunk allocation after starting a transaction since tasks should
3714 * reserve enough transaction units (metadata space), however while that is true
3715 * most of the time, chunk allocation may still be triggered for several reasons:
3717 * 1) When reserving metadata, we check if there is enough free space in the
3718 * metadata space_info and therefore don't trigger allocation of a new chunk.
3719 * However later when the task actually tries to COW an extent buffer from
3720 * the extent btree or from the device btree for example, it is forced to
3721 * allocate a new block group (chunk) because the only one that had enough
3722 * free space was just turned to RO mode by a running scrub for example (or
3723 * device replace, block group reclaim thread, etc), so we can not use it
3724 * for allocating an extent and end up being forced to allocate a new one;
3726 * 2) Because we only check that the metadata space_info has enough free bytes,
3727 * we end up not allocating a new metadata chunk in that case. However if
3728 * the filesystem was mounted in degraded mode, none of the existing block
3729 * groups might be suitable for extent allocation due to their incompatible
3730 * profile (for e.g. mounting a 2 devices filesystem, where all block groups
3731 * use a RAID1 profile, in degraded mode using a single device). In this case
3732 * when the task attempts to COW some extent buffer of the extent btree for
3733 * example, it will trigger allocation of a new metadata block group with a
3734 * suitable profile (SINGLE profile in the example of the degraded mount of
3735 * the RAID1 filesystem);
3737 * 3) The task has reserved enough transaction units / metadata space, but when
3738 * it attempts to COW an extent buffer from the extent or device btree for
3739 * example, it does not find any free extent in any metadata block group,
3740 * therefore forced to try to allocate a new metadata block group.
3741 * This is because some other task allocated all available extents in the
3742 * meanwhile - this typically happens with tasks that don't reserve space
3743 * properly, either intentionally or as a bug. One example where this is
3744 * done intentionally is fsync, as it does not reserve any transaction units
3745 * and ends up allocating a variable number of metadata extents for log
3746 * tree extent buffers;
3748 * 4) The task has reserved enough transaction units / metadata space, but right
3749 * before it tries to allocate the last extent buffer it needs, a discard
3750 * operation comes in and, temporarily, removes the last free space entry from
3751 * the only metadata block group that had free space (discard starts by
3752 * removing a free space entry from a block group, then does the discard
3753 * operation and, once it's done, it adds back the free space entry to the
3756 * We also need this 2 phases setup when adding a device to a filesystem with
3757 * a seed device - we must create new metadata and system chunks without adding
3758 * any of the block group items to the chunk, extent and device btrees. If we
3759 * did not do it this way, we would get ENOSPC when attempting to update those
3760 * btrees, since all the chunks from the seed device are read-only.
3762 * Phase 1 does the updates and insertions to the chunk btree because if we had
3763 * it done in phase 2 and have a thundering herd of tasks allocating chunks in
3764 * parallel, we risk having too many system chunks allocated by many tasks if
3765 * many tasks reach phase 1 without the previous ones completing phase 2. In the
3766 * extreme case this leads to exhaustion of the system chunk array in the
3767 * superblock. This is easier to trigger if using a btree node/leaf size of 64K
3768 * and with RAID filesystems (so we have more device items in the chunk btree).
3769 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
3770 * the system chunk array due to concurrent allocations") provides more details.
3772 * Allocation of system chunks does not happen through this function. A task that
3773 * needs to update the chunk btree (the only btree that uses system chunks), must
3774 * preallocate chunk space by calling either check_system_chunk() or
3775 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
3776 * metadata chunk or when removing a chunk, while the later is used before doing
3777 * a modification to the chunk btree - use cases for the later are adding,
3778 * removing and resizing a device as well as relocation of a system chunk.
3779 * See the comment below for more details.
3781 * The reservation of system space, done through check_system_chunk(), as well
3782 * as all the updates and insertions into the chunk btree must be done while
3783 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
3784 * an extent buffer from the chunks btree we never trigger allocation of a new
3785 * system chunk, which would result in a deadlock (trying to lock twice an
3786 * extent buffer of the chunk btree, first time before triggering the chunk
3787 * allocation and the second time during chunk allocation while attempting to
3788 * update the chunks btree). The system chunk array is also updated while holding
3789 * that mutex. The same logic applies to removing chunks - we must reserve system
3790 * space, update the chunk btree and the system chunk array in the superblock
3791 * while holding fs_info->chunk_mutex.
3793 * This function, btrfs_chunk_alloc(), belongs to phase 1.
3795 * If @force is CHUNK_ALLOC_FORCE:
3796 * - return 1 if it successfully allocates a chunk,
3797 * - return errors including -ENOSPC otherwise.
3798 * If @force is NOT CHUNK_ALLOC_FORCE:
3799 * - return 0 if it doesn't need to allocate a new chunk,
3800 * - return 1 if it successfully allocates a chunk,
3801 * - return errors including -ENOSPC otherwise.
3803 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
3804 enum btrfs_chunk_alloc_enum force)
3806 struct btrfs_fs_info *fs_info = trans->fs_info;
3807 struct btrfs_space_info *space_info;
3808 struct btrfs_block_group *ret_bg;
3809 bool wait_for_alloc = false;
3810 bool should_alloc = false;
3811 bool from_extent_allocation = false;
3814 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
3815 from_extent_allocation = true;
3816 force = CHUNK_ALLOC_FORCE;
3819 /* Don't re-enter if we're already allocating a chunk */
3820 if (trans->allocating_chunk)
3823 * Allocation of system chunks can not happen through this path, as we
3824 * could end up in a deadlock if we are allocating a data or metadata
3825 * chunk and there is another task modifying the chunk btree.
3827 * This is because while we are holding the chunk mutex, we will attempt
3828 * to add the new chunk item to the chunk btree or update an existing
3829 * device item in the chunk btree, while the other task that is modifying
3830 * the chunk btree is attempting to COW an extent buffer while holding a
3831 * lock on it and on its parent - if the COW operation triggers a system
3832 * chunk allocation, then we can deadlock because we are holding the
3833 * chunk mutex and we may need to access that extent buffer or its parent
3834 * in order to add the chunk item or update a device item.
3836 * Tasks that want to modify the chunk tree should reserve system space
3837 * before updating the chunk btree, by calling either
3838 * btrfs_reserve_chunk_metadata() or check_system_chunk().
3839 * It's possible that after a task reserves the space, it still ends up
3840 * here - this happens in the cases described above at do_chunk_alloc().
3841 * The task will have to either retry or fail.
3843 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3846 space_info = btrfs_find_space_info(fs_info, flags);
3850 spin_lock(&space_info->lock);
3851 if (force < space_info->force_alloc)
3852 force = space_info->force_alloc;
3853 should_alloc = should_alloc_chunk(fs_info, space_info, force);
3854 if (space_info->full) {
3855 /* No more free physical space */
3860 spin_unlock(&space_info->lock);
3862 } else if (!should_alloc) {
3863 spin_unlock(&space_info->lock);
3865 } else if (space_info->chunk_alloc) {
3867 * Someone is already allocating, so we need to block
3868 * until this someone is finished and then loop to
3869 * recheck if we should continue with our allocation
3872 wait_for_alloc = true;
3873 force = CHUNK_ALLOC_NO_FORCE;
3874 spin_unlock(&space_info->lock);
3875 mutex_lock(&fs_info->chunk_mutex);
3876 mutex_unlock(&fs_info->chunk_mutex);
3878 /* Proceed with allocation */
3879 space_info->chunk_alloc = 1;
3880 wait_for_alloc = false;
3881 spin_unlock(&space_info->lock);
3885 } while (wait_for_alloc);
3887 mutex_lock(&fs_info->chunk_mutex);
3888 trans->allocating_chunk = true;
3891 * If we have mixed data/metadata chunks we want to make sure we keep
3892 * allocating mixed chunks instead of individual chunks.
3894 if (btrfs_mixed_space_info(space_info))
3895 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3898 * if we're doing a data chunk, go ahead and make sure that
3899 * we keep a reasonable number of metadata chunks allocated in the
3902 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3903 fs_info->data_chunk_allocations++;
3904 if (!(fs_info->data_chunk_allocations %
3905 fs_info->metadata_ratio))
3906 force_metadata_allocation(fs_info);
3909 ret_bg = do_chunk_alloc(trans, flags);
3910 trans->allocating_chunk = false;
3912 if (IS_ERR(ret_bg)) {
3913 ret = PTR_ERR(ret_bg);
3914 } else if (from_extent_allocation) {
3916 * New block group is likely to be used soon. Try to activate
3917 * it now. Failure is OK for now.
3919 btrfs_zone_activate(ret_bg);
3923 btrfs_put_block_group(ret_bg);
3925 spin_lock(&space_info->lock);
3928 space_info->full = 1;
3933 space_info->max_extent_size = 0;
3936 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3938 space_info->chunk_alloc = 0;
3939 spin_unlock(&space_info->lock);
3940 mutex_unlock(&fs_info->chunk_mutex);
3945 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
3949 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
3951 num_dev = fs_info->fs_devices->rw_devices;
3956 static void reserve_chunk_space(struct btrfs_trans_handle *trans,
3960 struct btrfs_fs_info *fs_info = trans->fs_info;
3961 struct btrfs_space_info *info;
3966 * Needed because we can end up allocating a system chunk and for an
3967 * atomic and race free space reservation in the chunk block reserve.
3969 lockdep_assert_held(&fs_info->chunk_mutex);
3971 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3972 spin_lock(&info->lock);
3973 left = info->total_bytes - btrfs_space_info_used(info, true);
3974 spin_unlock(&info->lock);
3976 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3977 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3979 btrfs_dump_space_info(fs_info, info, 0, 0);
3983 u64 flags = btrfs_system_alloc_profile(fs_info);
3984 struct btrfs_block_group *bg;
3987 * Ignore failure to create system chunk. We might end up not
3988 * needing it, as we might not need to COW all nodes/leafs from
3989 * the paths we visit in the chunk tree (they were already COWed
3990 * or created in the current transaction for example).
3992 bg = btrfs_create_chunk(trans, flags);
3997 * We have a new chunk. We also need to activate it for
4000 ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
4005 * If we fail to add the chunk item here, we end up
4006 * trying again at phase 2 of chunk allocation, at
4007 * btrfs_create_pending_block_groups(). So ignore
4008 * any error here. An ENOSPC here could happen, due to
4009 * the cases described at do_chunk_alloc() - the system
4010 * block group we just created was just turned into RO
4011 * mode by a scrub for example, or a running discard
4012 * temporarily removed its free space entries, etc.
4014 btrfs_chunk_alloc_add_chunk_item(trans, bg);
4019 ret = btrfs_block_rsv_add(fs_info,
4020 &fs_info->chunk_block_rsv,
4021 bytes, BTRFS_RESERVE_NO_FLUSH);
4023 trans->chunk_bytes_reserved += bytes;
4028 * Reserve space in the system space for allocating or removing a chunk.
4029 * The caller must be holding fs_info->chunk_mutex.
4031 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
4033 struct btrfs_fs_info *fs_info = trans->fs_info;
4034 const u64 num_devs = get_profile_num_devs(fs_info, type);
4037 /* num_devs device items to update and 1 chunk item to add or remove. */
4038 bytes = btrfs_calc_metadata_size(fs_info, num_devs) +
4039 btrfs_calc_insert_metadata_size(fs_info, 1);
4041 reserve_chunk_space(trans, bytes, type);
4045 * Reserve space in the system space, if needed, for doing a modification to the
4048 * @trans: A transaction handle.
4049 * @is_item_insertion: Indicate if the modification is for inserting a new item
4050 * in the chunk btree or if it's for the deletion or update
4051 * of an existing item.
4053 * This is used in a context where we need to update the chunk btree outside
4054 * block group allocation and removal, to avoid a deadlock with a concurrent
4055 * task that is allocating a metadata or data block group and therefore needs to
4056 * update the chunk btree while holding the chunk mutex. After the update to the
4057 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called.
4060 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
4061 bool is_item_insertion)
4063 struct btrfs_fs_info *fs_info = trans->fs_info;
4066 if (is_item_insertion)
4067 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
4069 bytes = btrfs_calc_metadata_size(fs_info, 1);
4071 mutex_lock(&fs_info->chunk_mutex);
4072 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM);
4073 mutex_unlock(&fs_info->chunk_mutex);
4076 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
4078 struct btrfs_block_group *block_group;
4080 block_group = btrfs_lookup_first_block_group(info, 0);
4081 while (block_group) {
4082 btrfs_wait_block_group_cache_done(block_group);
4083 spin_lock(&block_group->lock);
4084 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
4085 &block_group->runtime_flags)) {
4086 struct inode *inode = block_group->inode;
4088 block_group->inode = NULL;
4089 spin_unlock(&block_group->lock);
4091 ASSERT(block_group->io_ctl.inode == NULL);
4094 spin_unlock(&block_group->lock);
4096 block_group = btrfs_next_block_group(block_group);
4101 * Must be called only after stopping all workers, since we could have block
4102 * group caching kthreads running, and therefore they could race with us if we
4103 * freed the block groups before stopping them.
4105 int btrfs_free_block_groups(struct btrfs_fs_info *info)
4107 struct btrfs_block_group *block_group;
4108 struct btrfs_space_info *space_info;
4109 struct btrfs_caching_control *caching_ctl;
4112 write_lock(&info->block_group_cache_lock);
4113 while (!list_empty(&info->caching_block_groups)) {
4114 caching_ctl = list_entry(info->caching_block_groups.next,
4115 struct btrfs_caching_control, list);
4116 list_del(&caching_ctl->list);
4117 btrfs_put_caching_control(caching_ctl);
4119 write_unlock(&info->block_group_cache_lock);
4121 spin_lock(&info->unused_bgs_lock);
4122 while (!list_empty(&info->unused_bgs)) {
4123 block_group = list_first_entry(&info->unused_bgs,
4124 struct btrfs_block_group,
4126 list_del_init(&block_group->bg_list);
4127 btrfs_put_block_group(block_group);
4130 while (!list_empty(&info->reclaim_bgs)) {
4131 block_group = list_first_entry(&info->reclaim_bgs,
4132 struct btrfs_block_group,
4134 list_del_init(&block_group->bg_list);
4135 btrfs_put_block_group(block_group);
4137 spin_unlock(&info->unused_bgs_lock);
4139 spin_lock(&info->zone_active_bgs_lock);
4140 while (!list_empty(&info->zone_active_bgs)) {
4141 block_group = list_first_entry(&info->zone_active_bgs,
4142 struct btrfs_block_group,
4144 list_del_init(&block_group->active_bg_list);
4145 btrfs_put_block_group(block_group);
4147 spin_unlock(&info->zone_active_bgs_lock);
4149 write_lock(&info->block_group_cache_lock);
4150 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
4151 block_group = rb_entry(n, struct btrfs_block_group,
4153 rb_erase_cached(&block_group->cache_node,
4154 &info->block_group_cache_tree);
4155 RB_CLEAR_NODE(&block_group->cache_node);
4156 write_unlock(&info->block_group_cache_lock);
4158 down_write(&block_group->space_info->groups_sem);
4159 list_del(&block_group->list);
4160 up_write(&block_group->space_info->groups_sem);
4163 * We haven't cached this block group, which means we could
4164 * possibly have excluded extents on this block group.
4166 if (block_group->cached == BTRFS_CACHE_NO ||
4167 block_group->cached == BTRFS_CACHE_ERROR)
4168 btrfs_free_excluded_extents(block_group);
4170 btrfs_remove_free_space_cache(block_group);
4171 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
4172 ASSERT(list_empty(&block_group->dirty_list));
4173 ASSERT(list_empty(&block_group->io_list));
4174 ASSERT(list_empty(&block_group->bg_list));
4175 ASSERT(refcount_read(&block_group->refs) == 1);
4176 ASSERT(block_group->swap_extents == 0);
4177 btrfs_put_block_group(block_group);
4179 write_lock(&info->block_group_cache_lock);
4181 write_unlock(&info->block_group_cache_lock);
4183 btrfs_release_global_block_rsv(info);
4185 while (!list_empty(&info->space_info)) {
4186 space_info = list_entry(info->space_info.next,
4187 struct btrfs_space_info,
4191 * Do not hide this behind enospc_debug, this is actually
4192 * important and indicates a real bug if this happens.
4194 if (WARN_ON(space_info->bytes_pinned > 0 ||
4195 space_info->bytes_may_use > 0))
4196 btrfs_dump_space_info(info, space_info, 0, 0);
4199 * If there was a failure to cleanup a log tree, very likely due
4200 * to an IO failure on a writeback attempt of one or more of its
4201 * extent buffers, we could not do proper (and cheap) unaccounting
4202 * of their reserved space, so don't warn on bytes_reserved > 0 in
4205 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
4206 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
4207 if (WARN_ON(space_info->bytes_reserved > 0))
4208 btrfs_dump_space_info(info, space_info, 0, 0);
4211 WARN_ON(space_info->reclaim_size > 0);
4212 list_del(&space_info->list);
4213 btrfs_sysfs_remove_space_info(space_info);
4218 void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4220 atomic_inc(&cache->frozen);
4223 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
4225 struct btrfs_fs_info *fs_info = block_group->fs_info;
4226 struct extent_map_tree *em_tree;
4227 struct extent_map *em;
4230 spin_lock(&block_group->lock);
4231 cleanup = (atomic_dec_and_test(&block_group->frozen) &&
4232 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
4233 spin_unlock(&block_group->lock);
4236 em_tree = &fs_info->mapping_tree;
4237 write_lock(&em_tree->lock);
4238 em = lookup_extent_mapping(em_tree, block_group->start,
4240 BUG_ON(!em); /* logic error, can't happen */
4241 remove_extent_mapping(em_tree, em);
4242 write_unlock(&em_tree->lock);
4244 /* once for us and once for the tree */
4245 free_extent_map(em);
4246 free_extent_map(em);
4249 * We may have left one free space entry and other possible
4250 * tasks trimming this block group have left 1 entry each one.
4253 btrfs_remove_free_space_cache(block_group);
4257 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
4261 spin_lock(&bg->lock);
4266 spin_unlock(&bg->lock);
4271 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
4273 spin_lock(&bg->lock);
4275 ASSERT(bg->swap_extents >= amount);
4276 bg->swap_extents -= amount;
4277 spin_unlock(&bg->lock);