Lines Matching +full:i +full:- +full:cache +full:- +full:block +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
7 #include "block-group.h"
8 #include "space-info.h"
9 #include "disk-io.h"
10 #include "free-space-cache.h"
11 #include "free-space-tree.h"
14 #include "ref-verify.h"
16 #include "tree-log.h"
17 #include "delalloc-space.h"
23 #include "extent-tree.h"
28 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_should_fragment_free_space()
31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_should_fragment_free_space()
33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); in btrfs_should_fragment_free_space()
45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; in get_restripe_target()
52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; in get_restripe_target()
55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; in get_restripe_target()
58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; in get_restripe_target()
74 u64 num_devices = fs_info->fs_devices->rw_devices; in btrfs_reduce_alloc_profile()
83 spin_lock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
86 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
89 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
98 /* Select the highest-redundancy RAID level. */ in btrfs_reduce_alloc_profile()
128 seq = read_seqbegin(&fs_info->profiles_lock); in btrfs_get_alloc_profile()
131 flags |= fs_info->avail_data_alloc_bits; in btrfs_get_alloc_profile()
133 flags |= fs_info->avail_system_alloc_bits; in btrfs_get_alloc_profile()
135 flags |= fs_info->avail_metadata_alloc_bits; in btrfs_get_alloc_profile()
136 } while (read_seqretry(&fs_info->profiles_lock, seq)); in btrfs_get_alloc_profile()
141 void btrfs_get_block_group(struct btrfs_block_group *cache) in btrfs_get_block_group() argument
143 refcount_inc(&cache->refs); in btrfs_get_block_group()
146 void btrfs_put_block_group(struct btrfs_block_group *cache) in btrfs_put_block_group() argument
148 if (refcount_dec_and_test(&cache->refs)) { in btrfs_put_block_group()
149 WARN_ON(cache->pinned > 0); in btrfs_put_block_group()
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_put_block_group()
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) in btrfs_put_block_group()
159 WARN_ON(cache->reserved > 0); in btrfs_put_block_group()
166 if (WARN_ON(!list_empty(&cache->discard_list))) in btrfs_put_block_group()
167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, in btrfs_put_block_group()
168 cache); in btrfs_put_block_group()
170 kfree(cache->free_space_ctl); in btrfs_put_block_group()
171 btrfs_free_chunk_map(cache->physical_map); in btrfs_put_block_group()
172 kfree(cache); in btrfs_put_block_group()
177 * This adds the block group to the fs_info rb tree for the block group cache
184 struct btrfs_block_group *cache; in btrfs_add_block_group_cache() local
187 ASSERT(block_group->length != 0); in btrfs_add_block_group_cache()
189 write_lock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
190 p = &info->block_group_cache_tree.rb_root.rb_node; in btrfs_add_block_group_cache()
194 cache = rb_entry(parent, struct btrfs_block_group, cache_node); in btrfs_add_block_group_cache()
195 if (block_group->start < cache->start) { in btrfs_add_block_group_cache()
196 p = &(*p)->rb_left; in btrfs_add_block_group_cache()
197 } else if (block_group->start > cache->start) { in btrfs_add_block_group_cache()
198 p = &(*p)->rb_right; in btrfs_add_block_group_cache()
201 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
202 return -EEXIST; in btrfs_add_block_group_cache()
206 rb_link_node(&block_group->cache_node, parent, p); in btrfs_add_block_group_cache()
207 rb_insert_color_cached(&block_group->cache_node, in btrfs_add_block_group_cache()
208 &info->block_group_cache_tree, leftmost); in btrfs_add_block_group_cache()
210 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
216 * This will return the block group at or after bytenr if contains is 0, else
217 * it will return the block group that contains the bytenr
222 struct btrfs_block_group *cache, *ret = NULL; in block_group_cache_tree_search() local
226 read_lock(&info->block_group_cache_lock); in block_group_cache_tree_search()
227 n = info->block_group_cache_tree.rb_root.rb_node; in block_group_cache_tree_search()
230 cache = rb_entry(n, struct btrfs_block_group, cache_node); in block_group_cache_tree_search()
231 end = cache->start + cache->length - 1; in block_group_cache_tree_search()
232 start = cache->start; in block_group_cache_tree_search()
235 if (!contains && (!ret || start < ret->start)) in block_group_cache_tree_search()
236 ret = cache; in block_group_cache_tree_search()
237 n = n->rb_left; in block_group_cache_tree_search()
240 ret = cache; in block_group_cache_tree_search()
243 n = n->rb_right; in block_group_cache_tree_search()
245 ret = cache; in block_group_cache_tree_search()
251 read_unlock(&info->block_group_cache_lock); in block_group_cache_tree_search()
257 * Return the block group that starts at or after bytenr
266 * Return the block group that contains the given bytenr
275 struct btrfs_block_group *cache) in btrfs_next_block_group() argument
277 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_next_block_group()
280 read_lock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
282 /* If our block group was removed, we need a full search. */ in btrfs_next_block_group()
283 if (RB_EMPTY_NODE(&cache->cache_node)) { in btrfs_next_block_group()
284 const u64 next_bytenr = cache->start + cache->length; in btrfs_next_block_group()
286 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
287 btrfs_put_block_group(cache); in btrfs_next_block_group()
290 node = rb_next(&cache->cache_node); in btrfs_next_block_group()
291 btrfs_put_block_group(cache); in btrfs_next_block_group()
293 cache = rb_entry(node, struct btrfs_block_group, cache_node); in btrfs_next_block_group()
294 btrfs_get_block_group(cache); in btrfs_next_block_group()
296 cache = NULL; in btrfs_next_block_group()
297 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
298 return cache; in btrfs_next_block_group()
308 * number of NOCOW writers in the block group that contains the extent, as long
309 * as the block group exists and it's currently not in read-only mode.
311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
326 spin_lock(&bg->lock); in btrfs_inc_nocow_writers()
327 if (bg->ro) in btrfs_inc_nocow_writers()
330 atomic_inc(&bg->nocow_writers); in btrfs_inc_nocow_writers()
331 spin_unlock(&bg->lock); in btrfs_inc_nocow_writers()
338 /* No put on block group, done by btrfs_dec_nocow_writers(). */ in btrfs_inc_nocow_writers()
343 * Decrement the number of NOCOW writers in a block group.
346 * and on the block group returned by that call. Typically this is called after
350 * After this call, the caller should not use the block group anymore. It it wants
355 if (atomic_dec_and_test(&bg->nocow_writers)) in btrfs_dec_nocow_writers()
356 wake_up_var(&bg->nocow_writers); in btrfs_dec_nocow_writers()
364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); in btrfs_wait_nocow_writers()
374 if (atomic_dec_and_test(&bg->reservations)) in btrfs_dec_block_group_reservations()
375 wake_up_var(&bg->reservations); in btrfs_dec_block_group_reservations()
381 struct btrfs_space_info *space_info = bg->space_info; in btrfs_wait_block_group_reservations()
383 ASSERT(bg->ro); in btrfs_wait_block_group_reservations()
385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) in btrfs_wait_block_group_reservations()
389 * Our block group is read only but before we set it to read only, in btrfs_wait_block_group_reservations()
394 * block group's reservations counter is incremented while a read lock in btrfs_wait_block_group_reservations()
398 down_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
399 up_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); in btrfs_wait_block_group_reservations()
405 struct btrfs_block_group *cache) in btrfs_get_caching_control() argument
409 spin_lock(&cache->lock); in btrfs_get_caching_control()
410 if (!cache->caching_ctl) { in btrfs_get_caching_control()
411 spin_unlock(&cache->lock); in btrfs_get_caching_control()
415 ctl = cache->caching_ctl; in btrfs_get_caching_control()
416 refcount_inc(&ctl->count); in btrfs_get_caching_control()
417 spin_unlock(&cache->lock); in btrfs_get_caching_control()
423 if (refcount_dec_and_test(&ctl->count)) in btrfs_put_caching_control()
428 * When we wait for progress in the block group caching, its because our
433 * up, and then it will check the block group free space numbers for our min
435 * a free extent of a given size, but this is a good start.
437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
438 * any of the information in this block group.
440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, in btrfs_wait_block_group_cache_progress() argument
446 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_progress()
451 * We've already failed to allocate from this block group, so even if in btrfs_wait_block_group_cache_progress()
452 * there's enough space in the block group it isn't contiguous enough to in btrfs_wait_block_group_cache_progress()
456 progress = atomic_read(&caching_ctl->progress); in btrfs_wait_block_group_cache_progress()
458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || in btrfs_wait_block_group_cache_progress()
459 (progress != atomic_read(&caching_ctl->progress) && in btrfs_wait_block_group_cache_progress()
460 (cache->free_space_ctl->free_space >= num_bytes))); in btrfs_wait_block_group_cache_progress()
465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, in btrfs_caching_ctl_wait_done() argument
468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); in btrfs_caching_ctl_wait_done()
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; in btrfs_caching_ctl_wait_done()
472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) in btrfs_wait_block_group_cache_done() argument
477 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_done()
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; in btrfs_wait_block_group_cache_done()
480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); in btrfs_wait_block_group_cache_done()
488 struct btrfs_fs_info *fs_info = block_group->fs_info; in fragment_free_space()
489 u64 start = block_group->start; in fragment_free_space()
490 u64 len = block_group->length; in fragment_free_space()
491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? in fragment_free_space()
492 fs_info->nodesize : fs_info->sectorsize; in fragment_free_space()
501 len -= step; in fragment_free_space()
507 * Add a free space range to the in memory free space cache of a block group.
508 * This checks if the range contains super block locations and any such
509 * locations are not added to the free space cache.
511 * @block_group: The target block group.
515 * added to the block group's free space cache.
522 struct btrfs_fs_info *info = block_group->fs_info; in btrfs_add_new_free_space()
523 u64 extent_start, extent_end, size; in btrfs_add_new_free_space() local
530 if (!find_first_extent_bit(&info->excluded_extents, start, in btrfs_add_new_free_space()
539 size = extent_start - start; in btrfs_add_new_free_space()
541 start, size); in btrfs_add_new_free_space()
545 *total_added_ret += size; in btrfs_add_new_free_space()
553 size = end - start; in btrfs_add_new_free_space()
555 size); in btrfs_add_new_free_space()
559 *total_added_ret += size; in btrfs_add_new_free_space()
566 * Get an arbitrary extent item index / max_index through the block group
568 * @block_group the block group to sample from
569 * @index: the integral step through the block group to grab from
573 * Pre-conditions on indices:
585 struct btrfs_fs_info *fs_info = block_group->fs_info; in sample_block_group_extent_item()
588 u64 search_end = block_group->start + block_group->length; in sample_block_group_extent_item()
596 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
597 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
601 return -ENOMEM; in sample_block_group_extent_item()
603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, in sample_block_group_extent_item()
606 path->skip_locking = 1; in sample_block_group_extent_item()
607 path->search_commit_root = 1; in sample_block_group_extent_item()
608 path->reada = READA_FORWARD; in sample_block_group_extent_item()
610 search_offset = index * div_u64(block_group->length, max_index); in sample_block_group_extent_item()
611 search_key.objectid = block_group->start + search_offset; in sample_block_group_extent_item()
616 /* Success; sampled an extent item in the block group */ in sample_block_group_extent_item()
617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && in sample_block_group_extent_item()
618 found_key->objectid >= block_group->start && in sample_block_group_extent_item()
619 found_key->objectid + found_key->offset <= search_end) in sample_block_group_extent_item()
623 if (found_key->objectid >= search_end) { in sample_block_group_extent_item()
629 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
630 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
636 * Best effort attempt to compute a block group's size class while caching it.
638 * @block_group: the block group we are caching
640 * We cannot infer the size class while adding free space extents, because that
645 * them at even steps through the block group and pick the smallest size class
646 * we see. Since size class is best effort, and not guaranteed in general,
651 * If we are caching in a block group from disk, then there are three major cases
653 * 1. the block group is well behaved and all extents in it are the same size
655 * 2. the block group is mostly one size class with rare exceptions for last
657 * 3. the block group was populated before size classes and can have a totally
658 * arbitrary mix of size classes.
660 * In case 1, looking at any extent in the block group will yield the correct
661 * result. For the mixed cases, taking the minimum size class seems like a good
662 * approximation, since gaps from frees will be usable to the size class. For
672 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_block_group_size_class()
674 int i; in load_block_group_size_class() local
675 u64 min_size = block_group->length; in load_block_group_size_class()
682 lockdep_assert_held(&caching_ctl->mutex); in load_block_group_size_class()
683 lockdep_assert_held_read(&fs_info->commit_root_sem); in load_block_group_size_class()
684 for (i = 0; i < 5; ++i) { in load_block_group_size_class()
685 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); in load_block_group_size_class()
694 spin_lock(&block_group->lock); in load_block_group_size_class()
695 block_group->size_class = size_class; in load_block_group_size_class()
696 spin_unlock(&block_group->lock); in load_block_group_size_class()
704 struct btrfs_block_group *block_group = caching_ctl->block_group; in load_extent_tree_free()
705 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_extent_tree_free()
718 return -ENOMEM; in load_extent_tree_free()
720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); in load_extent_tree_free()
726 * allocate from this block group until we've had a chance to fragment in load_extent_tree_free()
736 * root, since its read-only in load_extent_tree_free()
738 path->skip_locking = 1; in load_extent_tree_free()
739 path->search_commit_root = 1; in load_extent_tree_free()
740 path->reada = READA_FORWARD; in load_extent_tree_free()
751 leaf = path->nodes[0]; in load_extent_tree_free()
756 last = (u64)-1; in load_extent_tree_free()
760 if (path->slots[0] < nritems) { in load_extent_tree_free()
761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in load_extent_tree_free()
768 rwsem_is_contended(&fs_info->commit_root_sem)) { in load_extent_tree_free()
770 up_read(&fs_info->commit_root_sem); in load_extent_tree_free()
771 mutex_unlock(&caching_ctl->mutex); in load_extent_tree_free()
773 mutex_lock(&caching_ctl->mutex); in load_extent_tree_free()
774 down_read(&fs_info->commit_root_sem); in load_extent_tree_free()
783 leaf = path->nodes[0]; in load_extent_tree_free()
796 if (key.objectid < block_group->start) { in load_extent_tree_free()
797 path->slots[0]++; in load_extent_tree_free()
801 if (key.objectid >= block_group->start + block_group->length) in load_extent_tree_free()
815 fs_info->nodesize; in load_extent_tree_free()
822 atomic_inc(&caching_ctl->progress); in load_extent_tree_free()
823 wake_up(&caching_ctl->wait); in load_extent_tree_free()
827 path->slots[0]++; in load_extent_tree_free()
831 block_group->start + block_group->length, in load_extent_tree_free()
840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, in btrfs_free_excluded_extents()
841 bg->start + bg->length - 1, EXTENT_UPTODATE); in btrfs_free_excluded_extents()
852 block_group = caching_ctl->block_group; in caching_thread()
853 fs_info = block_group->fs_info; in caching_thread()
855 mutex_lock(&caching_ctl->mutex); in caching_thread()
856 down_read(&fs_info->commit_root_sem); in caching_thread()
867 * We failed to load the space cache, set ourselves to in caching_thread()
870 spin_lock(&block_group->lock); in caching_thread()
871 block_group->cached = BTRFS_CACHE_STARTED; in caching_thread()
872 spin_unlock(&block_group->lock); in caching_thread()
873 wake_up(&caching_ctl->wait); in caching_thread()
878 * can't actually cache from the free space tree as our commit root and in caching_thread()
884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) in caching_thread()
889 spin_lock(&block_group->lock); in caching_thread()
890 block_group->caching_ctl = NULL; in caching_thread()
891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; in caching_thread()
892 spin_unlock(&block_group->lock); in caching_thread()
898 spin_lock(&block_group->space_info->lock); in caching_thread()
899 spin_lock(&block_group->lock); in caching_thread()
900 bytes_used = block_group->length - block_group->used; in caching_thread()
901 block_group->space_info->bytes_used += bytes_used >> 1; in caching_thread()
902 spin_unlock(&block_group->lock); in caching_thread()
903 spin_unlock(&block_group->space_info->lock); in caching_thread()
908 up_read(&fs_info->commit_root_sem); in caching_thread()
910 mutex_unlock(&caching_ctl->mutex); in caching_thread()
912 wake_up(&caching_ctl->wait); in caching_thread()
918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) in btrfs_cache_block_group() argument
920 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_cache_block_group()
924 /* Allocator for zoned filesystems does not use the cache at all */ in btrfs_cache_block_group()
930 return -ENOMEM; in btrfs_cache_block_group()
932 INIT_LIST_HEAD(&caching_ctl->list); in btrfs_cache_block_group()
933 mutex_init(&caching_ctl->mutex); in btrfs_cache_block_group()
934 init_waitqueue_head(&caching_ctl->wait); in btrfs_cache_block_group()
935 caching_ctl->block_group = cache; in btrfs_cache_block_group()
936 refcount_set(&caching_ctl->count, 2); in btrfs_cache_block_group()
937 atomic_set(&caching_ctl->progress, 0); in btrfs_cache_block_group()
938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); in btrfs_cache_block_group()
940 spin_lock(&cache->lock); in btrfs_cache_block_group()
941 if (cache->cached != BTRFS_CACHE_NO) { in btrfs_cache_block_group()
944 caching_ctl = cache->caching_ctl; in btrfs_cache_block_group()
946 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
947 spin_unlock(&cache->lock); in btrfs_cache_block_group()
950 WARN_ON(cache->caching_ctl); in btrfs_cache_block_group()
951 cache->caching_ctl = caching_ctl; in btrfs_cache_block_group()
952 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group()
953 spin_unlock(&cache->lock); in btrfs_cache_block_group()
955 write_lock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
956 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); in btrfs_cache_block_group()
958 write_unlock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
960 btrfs_get_block_group(cache); in btrfs_cache_block_group()
962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); in btrfs_cache_block_group()
965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); in btrfs_cache_block_group()
977 write_seqlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
979 fs_info->avail_data_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
983 fs_info->avail_system_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
984 write_sequnlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
1003 struct list_head *head = &fs_info->space_info; in clear_incompat_bg_bits()
1007 down_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) in clear_incompat_bg_bits()
1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) in clear_incompat_bg_bits()
1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) in clear_incompat_bg_bits()
1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) in clear_incompat_bg_bits()
1016 up_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1029 struct btrfs_fs_info *fs_info = trans->fs_info; in remove_block_group_item()
1035 key.objectid = block_group->start; in remove_block_group_item()
1037 key.offset = block_group->length; in remove_block_group_item()
1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in remove_block_group_item()
1041 ret = -ENOENT; in remove_block_group_item()
1052 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_remove_block_group()
1065 block_group = btrfs_lookup_block_group(fs_info, map->start); in btrfs_remove_block_group()
1067 BUG_ON(!block_group->ro); in btrfs_remove_block_group()
1071 * Free the reserved super bytes from this block group before in btrfs_remove_block_group()
1075 btrfs_free_ref_tree_range(fs_info, block_group->start, in btrfs_remove_block_group()
1076 block_group->length); in btrfs_remove_block_group()
1078 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_remove_block_group()
1079 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_remove_block_group()
1081 /* make sure this block group isn't part of an allocation cluster */ in btrfs_remove_block_group()
1082 cluster = &fs_info->data_alloc_cluster; in btrfs_remove_block_group()
1083 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1085 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1088 * make sure this block group isn't part of a metadata in btrfs_remove_block_group()
1091 cluster = &fs_info->meta_alloc_cluster; in btrfs_remove_block_group()
1092 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1094 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1101 ret = -ENOMEM; in btrfs_remove_block_group()
1111 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1113 * Make sure our free space cache IO is done before removing the in btrfs_remove_block_group()
1116 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1117 if (!list_empty(&block_group->io_list)) { in btrfs_remove_block_group()
1118 list_del_init(&block_group->io_list); in btrfs_remove_block_group()
1120 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); in btrfs_remove_block_group()
1122 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1125 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1128 if (!list_empty(&block_group->dirty_list)) { in btrfs_remove_block_group()
1129 list_del_init(&block_group->dirty_list); in btrfs_remove_block_group()
1133 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1134 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1140 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1141 rb_erase_cached(&block_group->cache_node, in btrfs_remove_block_group()
1142 &fs_info->block_group_cache_tree); in btrfs_remove_block_group()
1143 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_remove_block_group()
1145 /* Once for the block groups rbtree */ in btrfs_remove_block_group()
1148 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1150 down_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1155 list_del_init(&block_group->list); in btrfs_remove_block_group()
1156 if (list_empty(&block_group->space_info->block_groups[index])) { in btrfs_remove_block_group()
1157 kobj = block_group->space_info->block_group_kobjs[index]; in btrfs_remove_block_group()
1158 block_group->space_info->block_group_kobjs[index] = NULL; in btrfs_remove_block_group()
1159 clear_avail_alloc_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1161 up_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1162 clear_incompat_bg_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1168 if (block_group->cached == BTRFS_CACHE_STARTED) in btrfs_remove_block_group()
1171 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1176 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { in btrfs_remove_block_group()
1177 if (ctl->block_group == block_group) { in btrfs_remove_block_group()
1179 refcount_inc(&caching_ctl->count); in btrfs_remove_block_group()
1185 list_del_init(&caching_ctl->list); in btrfs_remove_block_group()
1186 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1194 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1195 WARN_ON(!list_empty(&block_group->dirty_list)); in btrfs_remove_block_group()
1196 WARN_ON(!list_empty(&block_group->io_list)); in btrfs_remove_block_group()
1197 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1201 spin_lock(&block_group->space_info->lock); in btrfs_remove_block_group()
1202 list_del_init(&block_group->ro_list); in btrfs_remove_block_group()
1205 WARN_ON(block_group->space_info->total_bytes in btrfs_remove_block_group()
1206 < block_group->length); in btrfs_remove_block_group()
1207 WARN_ON(block_group->space_info->bytes_readonly in btrfs_remove_block_group()
1208 < block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1209 WARN_ON(block_group->space_info->bytes_zone_unusable in btrfs_remove_block_group()
1210 < block_group->zone_unusable); in btrfs_remove_block_group()
1211 WARN_ON(block_group->space_info->disk_total in btrfs_remove_block_group()
1212 < block_group->length * factor); in btrfs_remove_block_group()
1214 block_group->space_info->total_bytes -= block_group->length; in btrfs_remove_block_group()
1215 block_group->space_info->bytes_readonly -= in btrfs_remove_block_group()
1216 (block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1217 block_group->space_info->bytes_zone_unusable -= in btrfs_remove_block_group()
1218 block_group->zone_unusable; in btrfs_remove_block_group()
1219 block_group->space_info->disk_total -= block_group->length * factor; in btrfs_remove_block_group()
1221 spin_unlock(&block_group->space_info->lock); in btrfs_remove_block_group()
1224 * Remove the free space for the block group from the free space tree in btrfs_remove_block_group()
1225 * and the block group's item from the extent tree before marking the in btrfs_remove_block_group()
1226 * block group as removed. This is to prevent races with tasks that in btrfs_remove_block_group()
1227 * freeze and unfreeze a block group, this task and another task in btrfs_remove_block_group()
1228 * allocating a new block group - the unfreeze task ends up removing in btrfs_remove_block_group()
1229 * the block group's extent map before the task calling this function in btrfs_remove_block_group()
1230 * deletes the block group item from the extent tree, allowing for in btrfs_remove_block_group()
1231 * another task to attempt to create another block group with the same in btrfs_remove_block_group()
1232 * item key (and failing with -EEXIST and a transaction abort). in btrfs_remove_block_group()
1242 spin_lock(&block_group->lock); in btrfs_remove_block_group()
1243 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); in btrfs_remove_block_group()
1246 * At this point trimming or scrub can't start on this block group, in btrfs_remove_block_group()
1247 * because we removed the block group from the rbtree in btrfs_remove_block_group()
1248 * fs_info->block_group_cache_tree so no one can't find it anymore and in btrfs_remove_block_group()
1249 * even if someone already got this block group before we removed it in btrfs_remove_block_group()
1250 * from the rbtree, they have already incremented block_group->frozen - in btrfs_remove_block_group()
1255 * And we must not remove the chunk map from the fs_info->mapping_tree in btrfs_remove_block_group()
1257 * ranges from being reused for a new block group. This is needed to in btrfs_remove_block_group()
1263 * allowing for new block groups to be created that can reuse the same in btrfs_remove_block_group()
1267 * is mounted with -odiscard. The same protections must remain in btrfs_remove_block_group()
1271 remove_map = (atomic_read(&block_group->frozen) == 0); in btrfs_remove_block_group()
1272 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1295 ASSERT(map->start == chunk_offset); in btrfs_start_trans_remove_block_group()
1299 * to remove a block group (done at btrfs_remove_chunk() and at in btrfs_start_trans_remove_block_group()
1304 * 1 unit for deleting the block group item (located in the extent in btrfs_start_trans_remove_block_group()
1311 * In order to remove a block group we also need to reserve units in the in btrfs_start_trans_remove_block_group()
1316 num_items = 3 + map->num_stripes; in btrfs_start_trans_remove_block_group()
1323 * Mark block group @cache read-only, so later write won't happen to block
1324 * group @cache.
1326 * If @force is not set, this function will only mark the block group readonly
1327 * if we have enough free space (1M) in other metadata/system block groups.
1328 * If @force is not set, this function will mark the block group readonly
1331 * NOTE: This function doesn't care if other block groups can contain all the
1332 * data in this block group. That check should be done by relocation routine,
1335 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) in inc_block_group_ro() argument
1337 struct btrfs_space_info *sinfo = cache->space_info; in inc_block_group_ro()
1339 int ret = -ENOSPC; in inc_block_group_ro()
1341 spin_lock(&sinfo->lock); in inc_block_group_ro()
1342 spin_lock(&cache->lock); in inc_block_group_ro()
1344 if (cache->swap_extents) { in inc_block_group_ro()
1345 ret = -ETXTBSY; in inc_block_group_ro()
1349 if (cache->ro) { in inc_block_group_ro()
1350 cache->ro++; in inc_block_group_ro()
1355 num_bytes = cache->length - cache->reserved - cache->pinned - in inc_block_group_ro()
1356 cache->bytes_super - cache->zone_unusable - cache->used; in inc_block_group_ro()
1364 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { in inc_block_group_ro()
1371 if (sinfo_used + num_bytes <= sinfo->total_bytes) in inc_block_group_ro()
1378 * leeway to allow us to mark this block group as read only. in inc_block_group_ro()
1380 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, in inc_block_group_ro()
1386 sinfo->bytes_readonly += num_bytes; in inc_block_group_ro()
1387 if (btrfs_is_zoned(cache->fs_info)) { in inc_block_group_ro()
1389 sinfo->bytes_readonly += cache->zone_unusable; in inc_block_group_ro()
1390 sinfo->bytes_zone_unusable -= cache->zone_unusable; in inc_block_group_ro()
1391 cache->zone_unusable = 0; in inc_block_group_ro()
1393 cache->ro++; in inc_block_group_ro()
1394 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); in inc_block_group_ro()
1397 spin_unlock(&cache->lock); in inc_block_group_ro()
1398 spin_unlock(&sinfo->lock); in inc_block_group_ro()
1399 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { in inc_block_group_ro()
1400 btrfs_info(cache->fs_info, in inc_block_group_ro()
1401 "unable to make block group %llu ro", cache->start); in inc_block_group_ro()
1402 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); in inc_block_group_ro()
1410 struct btrfs_fs_info *fs_info = bg->fs_info; in clean_pinned_extents()
1412 const u64 start = bg->start; in clean_pinned_extents()
1413 const u64 end = start + bg->length - 1; in clean_pinned_extents()
1416 spin_lock(&fs_info->trans_lock); in clean_pinned_extents()
1417 if (trans->transaction->list.prev != &fs_info->trans_list) { in clean_pinned_extents()
1418 prev_trans = list_last_entry(&trans->transaction->list, in clean_pinned_extents()
1420 refcount_inc(&prev_trans->use_count); in clean_pinned_extents()
1422 spin_unlock(&fs_info->trans_lock); in clean_pinned_extents()
1428 * transaction N - 1, and have seen a range belonging to the block in clean_pinned_extents()
1429 * group in pinned_extents before we were able to clear the whole block in clean_pinned_extents()
1431 * the block group after we unpinned it from pinned_extents and removed in clean_pinned_extents()
1434 mutex_lock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1436 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, in clean_pinned_extents()
1442 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, in clean_pinned_extents()
1445 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1465 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_delete_unused_bgs()
1475 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_delete_unused_bgs()
1478 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1479 while (!list_empty(&fs_info->unused_bgs)) { in btrfs_delete_unused_bgs()
1483 block_group = list_first_entry(&fs_info->unused_bgs, in btrfs_delete_unused_bgs()
1486 list_del_init(&block_group->bg_list); in btrfs_delete_unused_bgs()
1488 space_info = block_group->space_info; in btrfs_delete_unused_bgs()
1494 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1496 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); in btrfs_delete_unused_bgs()
1499 down_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1502 * Async discard moves the final block group discard to be prior in btrfs_delete_unused_bgs()
1509 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1511 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1516 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1517 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1518 if (btrfs_is_block_group_used(block_group) || block_group->ro || in btrfs_delete_unused_bgs()
1519 list_is_singular(&block_group->list)) { in btrfs_delete_unused_bgs()
1522 * outstanding allocations in this block group. We do in btrfs_delete_unused_bgs()
1524 * this block group. in btrfs_delete_unused_bgs()
1527 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1528 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1529 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1534 * The block group may be unused but there may be space reserved in btrfs_delete_unused_bgs()
1535 * accounting with the existence of that block group, that is, in btrfs_delete_unused_bgs()
1536 * space_info->bytes_may_use was incremented by a task but no in btrfs_delete_unused_bgs()
1537 * space was yet allocated from the block group by the task. in btrfs_delete_unused_bgs()
1544 * So check if the total space of the space_info minus the size in btrfs_delete_unused_bgs()
1545 * of this block group is less than the used space of the in btrfs_delete_unused_bgs()
1546 * space_info - if that's the case, then it means we have tasks in btrfs_delete_unused_bgs()
1547 * that might be relying on the block group in order to allocate in btrfs_delete_unused_bgs()
1548 * extents, and add back the block group to the unused list when in btrfs_delete_unused_bgs()
1550 * needing to allocate extents from the block group. in btrfs_delete_unused_bgs()
1553 if (space_info->total_bytes - block_group->length < used) { in btrfs_delete_unused_bgs()
1557 * fs_info->unused_bgs list. in btrfs_delete_unused_bgs()
1560 list_add_tail(&block_group->bg_list, &retry_list); in btrfs_delete_unused_bgs()
1563 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1564 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1565 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1569 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1570 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1574 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1583 if (ret == -EAGAIN) in btrfs_delete_unused_bgs()
1593 block_group->start); in btrfs_delete_unused_bgs()
1601 * We could have pending pinned extents for this block group, in btrfs_delete_unused_bgs()
1616 spin_lock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1617 if (!list_empty(&block_group->discard_list)) { in btrfs_delete_unused_bgs()
1618 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1620 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1624 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1627 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1628 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1631 -block_group->pinned); in btrfs_delete_unused_bgs()
1632 space_info->bytes_readonly += block_group->pinned; in btrfs_delete_unused_bgs()
1633 block_group->pinned = 0; in btrfs_delete_unused_bgs()
1635 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1636 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1639 * The normal path here is an unused block group is passed here, in btrfs_delete_unused_bgs()
1642 * before coming down the unused block group path as trimming in btrfs_delete_unused_bgs()
1650 * need to reset sequential-required zones. in btrfs_delete_unused_bgs()
1663 ret = btrfs_remove_chunk(trans, block_group->start); in btrfs_delete_unused_bgs()
1672 * If we're not mounted with -odiscard, we can just forget in btrfs_delete_unused_bgs()
1673 * about this block group. Otherwise we'll need to wait in btrfs_delete_unused_bgs()
1677 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1680 * fs_info->unused_bgs, so use a list_move operation in btrfs_delete_unused_bgs()
1681 * to add the block group to the deleted_bgs list. in btrfs_delete_unused_bgs()
1683 list_move(&block_group->bg_list, in btrfs_delete_unused_bgs()
1684 &trans->transaction->deleted_bgs); in btrfs_delete_unused_bgs()
1685 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1692 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1694 list_splice_tail(&retry_list, &fs_info->unused_bgs); in btrfs_delete_unused_bgs()
1695 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1696 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1701 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1702 list_splice_tail(&retry_list, &fs_info->unused_bgs); in btrfs_delete_unused_bgs()
1703 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1704 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1711 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_unused()
1713 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1714 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_unused()
1717 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1718 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { in btrfs_mark_bg_unused()
1719 /* Pull out the block group from the reclaim_bgs list. */ in btrfs_mark_bg_unused()
1721 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1723 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1727 * We want block groups with a low number of used bytes to be in the beginning
1738 return bg1->used > bg2->used; in reclaim_bgs_cmp()
1750 const struct btrfs_space_info *space_info = bg->space_info; in should_reclaim_block_group()
1751 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); in should_reclaim_block_group()
1752 const u64 new_val = bg->used; in should_reclaim_block_group()
1759 thresh = mult_perc(bg->length, reclaim_thresh); in should_reclaim_block_group()
1763 * brand new block group and we don't want to relocate new block groups. in should_reclaim_block_group()
1779 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_reclaim_bgs_work()
1788 sb_start_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1791 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1799 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { in btrfs_reclaim_bgs_work()
1801 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1805 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1808 * The block groups might still be in use and reachable via bg_list, in btrfs_reclaim_bgs_work()
1811 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); in btrfs_reclaim_bgs_work()
1812 while (!list_empty(&fs_info->reclaim_bgs)) { in btrfs_reclaim_bgs_work()
1816 bg = list_first_entry(&fs_info->reclaim_bgs, in btrfs_reclaim_bgs_work()
1819 list_del_init(&bg->bg_list); in btrfs_reclaim_bgs_work()
1821 space_info = bg->space_info; in btrfs_reclaim_bgs_work()
1822 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1825 down_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1827 spin_lock(&bg->lock); in btrfs_reclaim_bgs_work()
1828 if (bg->reserved || bg->pinned || bg->ro) { in btrfs_reclaim_bgs_work()
1831 * outstanding allocations in this block group. We do in btrfs_reclaim_bgs_work()
1833 * this block group. in btrfs_reclaim_bgs_work()
1835 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1836 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1839 if (bg->used == 0) { in btrfs_reclaim_bgs_work()
1841 * It is possible that we trigger relocation on a block in btrfs_reclaim_bgs_work()
1847 * for the non-existent extents and running some extra in btrfs_reclaim_bgs_work()
1849 * other mechanisms for dealing with empty block groups. in btrfs_reclaim_bgs_work()
1853 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1854 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1859 * The block group might no longer meet the reclaim condition by in btrfs_reclaim_bgs_work()
1868 if (!should_reclaim_block_group(bg, bg->length)) { in btrfs_reclaim_bgs_work()
1869 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1870 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1873 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1876 * Get out fast, in case we're read-only or unmounting the in btrfs_reclaim_bgs_work()
1877 * filesystem. It is OK to drop block groups from the list even in btrfs_reclaim_bgs_work()
1878 * for the read-only case. As we did sb_start_write(), in btrfs_reclaim_bgs_work()
1879 * "mount -o remount,ro" won't happen and read-only filesystem in btrfs_reclaim_bgs_work()
1880 * means it is forced read-only due to a fatal error. So, it in btrfs_reclaim_bgs_work()
1881 * never gets back to read-write to let us reclaim again. in btrfs_reclaim_bgs_work()
1884 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1889 * Cache the zone_unusable value before turning the block group in btrfs_reclaim_bgs_work()
1891 * zone_unusable value gets moved to the block group's read-only in btrfs_reclaim_bgs_work()
1894 zone_unusable = bg->zone_unusable; in btrfs_reclaim_bgs_work()
1896 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1902 bg->start, in btrfs_reclaim_bgs_work()
1903 div64_u64(bg->used * 100, bg->length), in btrfs_reclaim_bgs_work()
1904 div64_u64(zone_unusable * 100, bg->length)); in btrfs_reclaim_bgs_work()
1906 ret = btrfs_relocate_chunk(fs_info, bg->start); in btrfs_reclaim_bgs_work()
1910 bg->start); in btrfs_reclaim_bgs_work()
1918 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1920 * Reclaiming all the block groups in the list can take really in btrfs_reclaim_bgs_work()
1921 * long. Prioritize cleaning up unused block groups. in btrfs_reclaim_bgs_work()
1928 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_reclaim_bgs_work()
1930 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1932 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1933 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1936 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1941 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1942 if (!list_empty(&fs_info->reclaim_bgs)) in btrfs_reclaim_bgs()
1943 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); in btrfs_reclaim_bgs()
1944 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1949 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_to_reclaim()
1951 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1952 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_to_reclaim()
1955 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); in btrfs_mark_bg_to_reclaim()
1957 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1970 slot = path->slots[0]; in read_bg_from_eb()
1971 leaf = path->nodes[0]; in read_bg_from_eb()
1973 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); in read_bg_from_eb()
1977 key->objectid, key->offset); in read_bg_from_eb()
1978 return -ENOENT; in read_bg_from_eb()
1981 if (map->start != key->objectid || map->chunk_len != key->offset) { in read_bg_from_eb()
1983 "block group %llu len %llu mismatch with chunk %llu len %llu", in read_bg_from_eb()
1984 key->objectid, key->offset, map->start, map->chunk_len); in read_bg_from_eb()
1985 ret = -EUCLEAN; in read_bg_from_eb()
1994 if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in read_bg_from_eb()
1996 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", in read_bg_from_eb()
1997 key->objectid, key->offset, flags, in read_bg_from_eb()
1998 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); in read_bg_from_eb()
1999 ret = -EUCLEAN; in read_bg_from_eb()
2016 if (found_key.objectid >= key->objectid && in find_first_block_group()
2029 write_seqlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2031 fs_info->avail_data_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2033 fs_info->avail_metadata_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2035 fs_info->avail_system_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2036 write_sequnlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2043 * @chunk_start: logical address of block group
2047 * @stripe_len: size of IO stripe for the given block group
2050 * Used primarily to exclude those portions of a block group that contain super
2051 * block copies.
2061 int i, nr = 0; in btrfs_rmap_block() local
2066 return -EIO; in btrfs_rmap_block()
2068 data_stripe_length = map->stripe_size; in btrfs_rmap_block()
2070 chunk_start = map->start; in btrfs_rmap_block()
2073 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in btrfs_rmap_block()
2076 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); in btrfs_rmap_block()
2078 ret = -ENOMEM; in btrfs_rmap_block()
2082 for (i = 0; i < map->num_stripes; i++) { in btrfs_rmap_block()
2088 if (!in_range(physical, map->stripes[i].physical, in btrfs_rmap_block()
2092 stripe_nr = (physical - map->stripes[i].physical) >> in btrfs_rmap_block()
2094 offset = (physical - map->stripes[i].physical) & in btrfs_rmap_block()
2097 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | in btrfs_rmap_block()
2099 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, in btrfs_rmap_block()
2100 map->sub_stripes); in btrfs_rmap_block()
2104 * instead of map->stripe_len in btrfs_rmap_block()
2128 static int exclude_super_stripes(struct btrfs_block_group *cache) in exclude_super_stripes() argument
2130 struct btrfs_fs_info *fs_info = cache->fs_info; in exclude_super_stripes()
2135 int i, nr, ret; in exclude_super_stripes() local
2137 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { in exclude_super_stripes()
2138 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; in exclude_super_stripes()
2139 cache->bytes_super += stripe_len; in exclude_super_stripes()
2140 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, in exclude_super_stripes()
2141 cache->start + stripe_len - 1, in exclude_super_stripes()
2147 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { in exclude_super_stripes()
2148 bytenr = btrfs_sb_offset(i); in exclude_super_stripes()
2149 ret = btrfs_rmap_block(fs_info, cache->start, in exclude_super_stripes()
2158 "zoned: block group %llu must not contain super block", in exclude_super_stripes()
2159 cache->start); in exclude_super_stripes()
2160 return -EUCLEAN; in exclude_super_stripes()
2163 while (nr--) { in exclude_super_stripes()
2165 cache->start + cache->length - logical[nr]); in exclude_super_stripes()
2167 cache->bytes_super += len; in exclude_super_stripes()
2168 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], in exclude_super_stripes()
2169 logical[nr] + len - 1, in exclude_super_stripes()
2185 struct btrfs_block_group *cache; in btrfs_create_block_group_cache() local
2187 cache = kzalloc(sizeof(*cache), GFP_NOFS); in btrfs_create_block_group_cache()
2188 if (!cache) in btrfs_create_block_group_cache()
2191 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in btrfs_create_block_group_cache()
2193 if (!cache->free_space_ctl) { in btrfs_create_block_group_cache()
2194 kfree(cache); in btrfs_create_block_group_cache()
2198 cache->start = start; in btrfs_create_block_group_cache()
2200 cache->fs_info = fs_info; in btrfs_create_block_group_cache()
2201 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); in btrfs_create_block_group_cache()
2203 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; in btrfs_create_block_group_cache()
2205 refcount_set(&cache->refs, 1); in btrfs_create_block_group_cache()
2206 spin_lock_init(&cache->lock); in btrfs_create_block_group_cache()
2207 init_rwsem(&cache->data_rwsem); in btrfs_create_block_group_cache()
2208 INIT_LIST_HEAD(&cache->list); in btrfs_create_block_group_cache()
2209 INIT_LIST_HEAD(&cache->cluster_list); in btrfs_create_block_group_cache()
2210 INIT_LIST_HEAD(&cache->bg_list); in btrfs_create_block_group_cache()
2211 INIT_LIST_HEAD(&cache->ro_list); in btrfs_create_block_group_cache()
2212 INIT_LIST_HEAD(&cache->discard_list); in btrfs_create_block_group_cache()
2213 INIT_LIST_HEAD(&cache->dirty_list); in btrfs_create_block_group_cache()
2214 INIT_LIST_HEAD(&cache->io_list); in btrfs_create_block_group_cache()
2215 INIT_LIST_HEAD(&cache->active_bg_list); in btrfs_create_block_group_cache()
2216 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); in btrfs_create_block_group_cache()
2217 atomic_set(&cache->frozen, 0); in btrfs_create_block_group_cache()
2218 mutex_init(&cache->free_space_lock); in btrfs_create_block_group_cache()
2220 return cache; in btrfs_create_block_group_cache()
2224 * Iterate all chunks and verify that each of them has the corresponding block
2245 bg = btrfs_lookup_block_group(fs_info, map->start); in check_chunk_block_group_mappings()
2248 "chunk start=%llu len=%llu doesn't have corresponding block group", in check_chunk_block_group_mappings()
2249 map->start, map->chunk_len); in check_chunk_block_group_mappings()
2250 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2254 if (bg->start != map->start || bg->length != map->chunk_len || in check_chunk_block_group_mappings()
2255 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != in check_chunk_block_group_mappings()
2256 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in check_chunk_block_group_mappings()
2258 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", in check_chunk_block_group_mappings()
2259 map->start, map->chunk_len, in check_chunk_block_group_mappings()
2260 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, in check_chunk_block_group_mappings()
2261 bg->start, bg->length, in check_chunk_block_group_mappings()
2262 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); in check_chunk_block_group_mappings()
2263 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2268 start = map->start + map->chunk_len; in check_chunk_block_group_mappings()
2280 struct btrfs_block_group *cache; in read_one_block_group() local
2284 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); in read_one_block_group()
2286 cache = btrfs_create_block_group_cache(info, key->objectid); in read_one_block_group()
2287 if (!cache) in read_one_block_group()
2288 return -ENOMEM; in read_one_block_group()
2290 cache->length = key->offset; in read_one_block_group()
2291 cache->used = btrfs_stack_block_group_used(bgi); in read_one_block_group()
2292 cache->commit_used = cache->used; in read_one_block_group()
2293 cache->flags = btrfs_stack_block_group_flags(bgi); in read_one_block_group()
2294 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); in read_one_block_group()
2296 set_free_space_tree_thresholds(cache); in read_one_block_group()
2300 * When we mount with old space cache, we need to in read_one_block_group()
2304 * truncate the old free space cache inode and in read_one_block_group()
2307 * the new space cache info onto disk. in read_one_block_group()
2310 cache->disk_cache_state = BTRFS_DC_CLEAR; in read_one_block_group()
2312 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && in read_one_block_group()
2313 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { in read_one_block_group()
2315 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", in read_one_block_group()
2316 cache->start); in read_one_block_group()
2317 ret = -EINVAL; in read_one_block_group()
2321 ret = btrfs_load_block_group_zone_info(cache, false); in read_one_block_group()
2324 cache->start); in read_one_block_group()
2333 ret = exclude_super_stripes(cache); in read_one_block_group()
2336 btrfs_free_excluded_extents(cache); in read_one_block_group()
2342 * free space for a block group. So, we don't need any caching work. in read_one_block_group()
2353 btrfs_calc_zone_unusable(cache); in read_one_block_group()
2355 btrfs_free_excluded_extents(cache); in read_one_block_group()
2356 } else if (cache->length == cache->used) { in read_one_block_group()
2357 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2358 btrfs_free_excluded_extents(cache); in read_one_block_group()
2359 } else if (cache->used == 0) { in read_one_block_group()
2360 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2361 ret = btrfs_add_new_free_space(cache, cache->start, in read_one_block_group()
2362 cache->start + cache->length, NULL); in read_one_block_group()
2363 btrfs_free_excluded_extents(cache); in read_one_block_group()
2368 ret = btrfs_add_block_group_cache(info, cache); in read_one_block_group()
2370 btrfs_remove_free_space_cache(cache); in read_one_block_group()
2373 trace_btrfs_add_block_group(info, cache, 0); in read_one_block_group()
2374 btrfs_add_bg_to_space_info(info, cache); in read_one_block_group()
2376 set_avail_alloc_bits(info, cache->flags); in read_one_block_group()
2377 if (btrfs_chunk_writeable(info, cache->start)) { in read_one_block_group()
2378 if (cache->used == 0) { in read_one_block_group()
2379 ASSERT(list_empty(&cache->bg_list)); in read_one_block_group()
2381 btrfs_discard_queue_work(&info->discard_ctl, cache); in read_one_block_group()
2383 btrfs_mark_bg_unused(cache); in read_one_block_group()
2386 inc_block_group_ro(cache, 1); in read_one_block_group()
2391 btrfs_put_block_group(cache); in read_one_block_group()
2400 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { in fill_dummy_bgs()
2405 bg = btrfs_create_block_group_cache(fs_info, map->start); in fill_dummy_bgs()
2407 ret = -ENOMEM; in fill_dummy_bgs()
2411 /* Fill dummy cache as FULL */ in fill_dummy_bgs()
2412 bg->length = map->chunk_len; in fill_dummy_bgs()
2413 bg->flags = map->type; in fill_dummy_bgs()
2414 bg->cached = BTRFS_CACHE_FINISHED; in fill_dummy_bgs()
2415 bg->used = map->chunk_len; in fill_dummy_bgs()
2416 bg->flags = map->type; in fill_dummy_bgs()
2419 * We may have some valid block group cache added already, in in fill_dummy_bgs()
2422 if (ret == -EEXIST) { in fill_dummy_bgs()
2436 set_avail_alloc_bits(fs_info, bg->flags); in fill_dummy_bgs()
2448 struct btrfs_block_group *cache; in btrfs_read_block_groups() local
2456 * unsupported RO options. The fs can never be mounted read-write, so no in btrfs_read_block_groups()
2457 * need to waste time searching block group items. in btrfs_read_block_groups()
2462 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & in btrfs_read_block_groups()
2471 return -ENOMEM; in btrfs_read_block_groups()
2473 cache_gen = btrfs_super_cache_generation(info->super_copy); in btrfs_read_block_groups()
2475 btrfs_super_generation(info->super_copy) != cache_gen) in btrfs_read_block_groups()
2491 leaf = path->nodes[0]; in btrfs_read_block_groups()
2492 slot = path->slots[0]; in btrfs_read_block_groups()
2507 list_for_each_entry(space_info, &info->space_info, list) { in btrfs_read_block_groups()
2508 int i; in btrfs_read_block_groups() local
2510 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { in btrfs_read_block_groups()
2511 if (list_empty(&space_info->block_groups[i])) in btrfs_read_block_groups()
2513 cache = list_first_entry(&space_info->block_groups[i], in btrfs_read_block_groups()
2516 btrfs_sysfs_add_block_group_type(cache); in btrfs_read_block_groups()
2519 if (!(btrfs_get_alloc_profile(info, space_info->flags) & in btrfs_read_block_groups()
2526 * Avoid allocating from un-mirrored block group if there are in btrfs_read_block_groups()
2527 * mirrored block groups. in btrfs_read_block_groups()
2529 list_for_each_entry(cache, in btrfs_read_block_groups()
2530 &space_info->block_groups[BTRFS_RAID_RAID0], in btrfs_read_block_groups()
2532 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2533 list_for_each_entry(cache, in btrfs_read_block_groups()
2534 &space_info->block_groups[BTRFS_RAID_SINGLE], in btrfs_read_block_groups()
2536 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2546 * Try to fill the tree using dummy block groups so that the user can in btrfs_read_block_groups()
2564 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_block_group_item()
2571 spin_lock(&block_group->lock); in insert_block_group_item()
2572 btrfs_set_stack_block_group_used(&bgi, block_group->used); in insert_block_group_item()
2574 block_group->global_root_id); in insert_block_group_item()
2575 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); in insert_block_group_item()
2576 old_commit_used = block_group->commit_used; in insert_block_group_item()
2577 block_group->commit_used = block_group->used; in insert_block_group_item()
2578 key.objectid = block_group->start; in insert_block_group_item()
2580 key.offset = block_group->length; in insert_block_group_item()
2581 spin_unlock(&block_group->lock); in insert_block_group_item()
2585 spin_lock(&block_group->lock); in insert_block_group_item()
2586 block_group->commit_used = old_commit_used; in insert_block_group_item()
2587 spin_unlock(&block_group->lock); in insert_block_group_item()
2597 struct btrfs_fs_info *fs_info = device->fs_info; in insert_dev_extent()
2598 struct btrfs_root *root = fs_info->dev_root; in insert_dev_extent()
2605 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); in insert_dev_extent()
2606 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); in insert_dev_extent()
2609 return -ENOMEM; in insert_dev_extent()
2611 key.objectid = device->devid; in insert_dev_extent()
2618 leaf = path->nodes[0]; in insert_dev_extent()
2619 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); in insert_dev_extent()
2641 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_dev_extents()
2645 int i; in insert_dev_extents() local
2657 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the in insert_dev_extents()
2661 mutex_lock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2662 for (i = 0; i < map->num_stripes; i++) { in insert_dev_extents()
2663 device = map->stripes[i].dev; in insert_dev_extents()
2664 dev_offset = map->stripes[i].physical; in insert_dev_extents()
2667 map->stripe_size); in insert_dev_extents()
2671 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2686 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_create_pending_block_groups()
2690 while (!list_empty(&trans->new_bgs)) { in btrfs_create_pending_block_groups()
2693 block_group = list_first_entry(&trans->new_bgs, in btrfs_create_pending_block_groups()
2699 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_create_pending_block_groups()
2705 &block_group->runtime_flags)) { in btrfs_create_pending_block_groups()
2706 mutex_lock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2708 mutex_unlock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2712 ret = insert_dev_extents(trans, block_group->start, in btrfs_create_pending_block_groups()
2713 block_group->length); in btrfs_create_pending_block_groups()
2724 if (block_group->space_info->block_group_kobjs[index] == NULL) in btrfs_create_pending_block_groups()
2730 list_del_init(&block_group->bg_list); in btrfs_create_pending_block_groups()
2731 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); in btrfs_create_pending_block_groups()
2734 * If the block group is still unused, add it to the list of in btrfs_create_pending_block_groups()
2735 * unused block groups. The block group may have been created in in btrfs_create_pending_block_groups()
2739 * so the block group may become unused for a long time. For in btrfs_create_pending_block_groups()
2748 * uncompressed data size, because the compression is only done in btrfs_create_pending_block_groups()
2751 * size because the data may be uncompressible in the worst case. in btrfs_create_pending_block_groups()
2756 spin_lock(&block_group->lock); in btrfs_create_pending_block_groups()
2758 spin_unlock(&block_group->lock); in btrfs_create_pending_block_groups()
2768 * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2780 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) in calculate_global_root_id()
2784 div64_u64_rem(offset, fs_info->nr_global_roots, &index); in calculate_global_root_id()
2790 u64 chunk_offset, u64 size) in btrfs_make_block_group() argument
2792 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_make_block_group()
2793 struct btrfs_block_group *cache; in btrfs_make_block_group() local
2798 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); in btrfs_make_block_group()
2799 if (!cache) in btrfs_make_block_group()
2800 return ERR_PTR(-ENOMEM); in btrfs_make_block_group()
2803 * Mark it as new before adding it to the rbtree of block groups or any in btrfs_make_block_group()
2807 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); in btrfs_make_block_group()
2809 cache->length = size; in btrfs_make_block_group()
2810 set_free_space_tree_thresholds(cache); in btrfs_make_block_group()
2811 cache->flags = type; in btrfs_make_block_group()
2812 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_make_block_group()
2813 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); in btrfs_make_block_group()
2816 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); in btrfs_make_block_group()
2818 ret = btrfs_load_block_group_zone_info(cache, true); in btrfs_make_block_group()
2820 btrfs_put_block_group(cache); in btrfs_make_block_group()
2824 ret = exclude_super_stripes(cache); in btrfs_make_block_group()
2827 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2828 btrfs_put_block_group(cache); in btrfs_make_block_group()
2832 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); in btrfs_make_block_group()
2833 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2835 btrfs_put_block_group(cache); in btrfs_make_block_group()
2841 * assigned to our block group. We want our bg to be added to the rbtree in btrfs_make_block_group()
2842 * with its ->space_info set. in btrfs_make_block_group()
2844 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); in btrfs_make_block_group()
2845 ASSERT(cache->space_info); in btrfs_make_block_group()
2847 ret = btrfs_add_block_group_cache(fs_info, cache); in btrfs_make_block_group()
2849 btrfs_remove_free_space_cache(cache); in btrfs_make_block_group()
2850 btrfs_put_block_group(cache); in btrfs_make_block_group()
2855 * Now that our block group has its ->space_info set and is inserted in in btrfs_make_block_group()
2858 trace_btrfs_add_block_group(fs_info, cache, 1); in btrfs_make_block_group()
2859 btrfs_add_bg_to_space_info(fs_info, cache); in btrfs_make_block_group()
2863 if (btrfs_should_fragment_free_space(cache)) { in btrfs_make_block_group()
2864 cache->space_info->bytes_used += size >> 1; in btrfs_make_block_group()
2865 fragment_free_space(cache); in btrfs_make_block_group()
2869 list_add_tail(&cache->bg_list, &trans->new_bgs); in btrfs_make_block_group()
2873 return cache; in btrfs_make_block_group()
2877 * Mark one block group RO, can be called several times for the same block
2880 * @cache: the destination block group
2881 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2883 * block group RO.
2885 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, in btrfs_inc_block_group_ro() argument
2888 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_inc_block_group_ro()
2896 * This can only happen when we are doing read-only scrub on read-only in btrfs_inc_block_group_ro()
2898 * In that case we should not start a new transaction on read-only fs. in btrfs_inc_block_group_ro()
2901 if (sb_rdonly(fs_info->sb)) { in btrfs_inc_block_group_ro()
2902 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2903 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2904 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2916 * We're not allowed to set block groups readonly after the dirty in btrfs_inc_block_group_ro()
2917 * block group cache has started writing. If it already started, in btrfs_inc_block_group_ro()
2920 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2921 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { in btrfs_inc_block_group_ro()
2922 u64 transid = trans->transid; in btrfs_inc_block_group_ro()
2924 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2937 * corresponding block group with the new raid level. in btrfs_inc_block_group_ro()
2939 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2940 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro()
2947 if (ret == -ENOSPC) in btrfs_inc_block_group_ro()
2954 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2957 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2963 * we still want to try our best to mark the block group read-only. in btrfs_inc_block_group_ro()
2965 if (!do_chunk_alloc && ret == -ENOSPC && in btrfs_inc_block_group_ro()
2966 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) in btrfs_inc_block_group_ro()
2969 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro()
2977 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); in btrfs_inc_block_group_ro()
2981 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2982 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2985 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { in btrfs_inc_block_group_ro()
2986 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2987 mutex_lock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2989 mutex_unlock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2992 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2998 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) in btrfs_dec_block_group_ro() argument
3000 struct btrfs_space_info *sinfo = cache->space_info; in btrfs_dec_block_group_ro()
3003 BUG_ON(!cache->ro); in btrfs_dec_block_group_ro()
3005 spin_lock(&sinfo->lock); in btrfs_dec_block_group_ro()
3006 spin_lock(&cache->lock); in btrfs_dec_block_group_ro()
3007 if (!--cache->ro) { in btrfs_dec_block_group_ro()
3008 if (btrfs_is_zoned(cache->fs_info)) { in btrfs_dec_block_group_ro()
3010 cache->zone_unusable = in btrfs_dec_block_group_ro()
3011 (cache->alloc_offset - cache->used) + in btrfs_dec_block_group_ro()
3012 (cache->length - cache->zone_capacity); in btrfs_dec_block_group_ro()
3013 sinfo->bytes_zone_unusable += cache->zone_unusable; in btrfs_dec_block_group_ro()
3014 sinfo->bytes_readonly -= cache->zone_unusable; in btrfs_dec_block_group_ro()
3016 num_bytes = cache->length - cache->reserved - in btrfs_dec_block_group_ro()
3017 cache->pinned - cache->bytes_super - in btrfs_dec_block_group_ro()
3018 cache->zone_unusable - cache->used; in btrfs_dec_block_group_ro()
3019 sinfo->bytes_readonly -= num_bytes; in btrfs_dec_block_group_ro()
3020 list_del_init(&cache->ro_list); in btrfs_dec_block_group_ro()
3022 spin_unlock(&cache->lock); in btrfs_dec_block_group_ro()
3023 spin_unlock(&sinfo->lock); in btrfs_dec_block_group_ro()
3028 struct btrfs_block_group *cache) in update_block_group_item() argument
3030 struct btrfs_fs_info *fs_info = trans->fs_info; in update_block_group_item()
3041 * Block group items update can be triggered out of commit transaction in update_block_group_item()
3043 * We cannot use cache->used directly outside of the spin lock, as it in update_block_group_item()
3046 spin_lock(&cache->lock); in update_block_group_item()
3047 old_commit_used = cache->commit_used; in update_block_group_item()
3048 used = cache->used; in update_block_group_item()
3050 if (cache->commit_used == used) { in update_block_group_item()
3051 spin_unlock(&cache->lock); in update_block_group_item()
3054 cache->commit_used = used; in update_block_group_item()
3055 spin_unlock(&cache->lock); in update_block_group_item()
3057 key.objectid = cache->start; in update_block_group_item()
3059 key.offset = cache->length; in update_block_group_item()
3064 ret = -ENOENT; in update_block_group_item()
3068 leaf = path->nodes[0]; in update_block_group_item()
3069 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); in update_block_group_item()
3072 cache->global_root_id); in update_block_group_item()
3073 btrfs_set_stack_block_group_flags(&bgi, cache->flags); in update_block_group_item()
3079 * We didn't update the block group item, need to revert commit_used in update_block_group_item()
3080 * unless the block group item didn't exist yet - this is to prevent a in update_block_group_item()
3081 * race with a concurrent insertion of the block group item, with in update_block_group_item()
3084 * insertion set it to a value greater than 0 - if the block group later in update_block_group_item()
3087 if (ret < 0 && ret != -ENOENT) { in update_block_group_item()
3088 spin_lock(&cache->lock); in update_block_group_item()
3089 cache->commit_used = old_commit_used; in update_block_group_item()
3090 spin_unlock(&cache->lock); in update_block_group_item()
3100 struct btrfs_fs_info *fs_info = block_group->fs_info; in cache_save_setup()
3113 * If this block group is smaller than 100 megs don't bother caching the in cache_save_setup()
3114 * block group. in cache_save_setup()
3116 if (block_group->length < (100 * SZ_1M)) { in cache_save_setup()
3117 spin_lock(&block_group->lock); in cache_save_setup()
3118 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in cache_save_setup()
3119 spin_unlock(&block_group->lock); in cache_save_setup()
3127 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { in cache_save_setup()
3137 if (block_group->ro) in cache_save_setup()
3148 * from here on out we know not to trust this cache when we load up next in cache_save_setup()
3151 BTRFS_I(inode)->generation = 0; in cache_save_setup()
3156 * super cache generation to 0 so we know to invalidate the in cache_save_setup()
3157 * cache, but then we'd have to keep track of the block groups in cache_save_setup()
3158 * that fail this way so we know we _have_ to reset this cache in cache_save_setup()
3159 * before the next commit or risk reading stale cache. So to in cache_save_setup()
3170 if (block_group->cache_generation == trans->transid && in cache_save_setup()
3178 &fs_info->global_block_rsv); in cache_save_setup()
3187 spin_lock(&block_group->lock); in cache_save_setup()
3188 if (block_group->cached != BTRFS_CACHE_FINISHED || in cache_save_setup()
3197 spin_unlock(&block_group->lock); in cache_save_setup()
3200 spin_unlock(&block_group->lock); in cache_save_setup()
3203 * We hit an ENOSPC when setting up the cache in this transaction, just in cache_save_setup()
3204 * skip doing the setup, we've already cleared the cache so we're safe. in cache_save_setup()
3206 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { in cache_save_setup()
3207 ret = -ENOSPC; in cache_save_setup()
3212 * Try to preallocate enough space based on how big the block group is. in cache_save_setup()
3215 * cache. in cache_save_setup()
3217 cache_size = div_u64(block_group->length, SZ_256M); in cache_save_setup()
3222 cache_size *= fs_info->sectorsize; in cache_save_setup()
3233 * Our cache requires contiguous chunks so that we don't modify a bunch in cache_save_setup()
3234 * of metadata or split extents when writing the cache out, which means in cache_save_setup()
3237 * other block groups for this transaction, maybe we'll unpin enough in cache_save_setup()
3242 else if (ret == -ENOSPC) in cache_save_setup()
3243 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); in cache_save_setup()
3250 spin_lock(&block_group->lock); in cache_save_setup()
3252 block_group->cache_generation = trans->transid; in cache_save_setup()
3253 block_group->disk_cache_state = dcs; in cache_save_setup()
3254 spin_unlock(&block_group->lock); in cache_save_setup()
3262 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_setup_space_cache()
3263 struct btrfs_block_group *cache, *tmp; in btrfs_setup_space_cache() local
3264 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_setup_space_cache()
3267 if (list_empty(&cur_trans->dirty_bgs) || in btrfs_setup_space_cache()
3273 return -ENOMEM; in btrfs_setup_space_cache()
3275 /* Could add new block groups, use _safe just in case */ in btrfs_setup_space_cache()
3276 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, in btrfs_setup_space_cache()
3278 if (cache->disk_cache_state == BTRFS_DC_CLEAR) in btrfs_setup_space_cache()
3279 cache_save_setup(cache, trans, path); in btrfs_setup_space_cache()
3287 * Transaction commit does final block group cache writeback during a critical
3289 * order for the cache to actually match the block group, but can introduce a
3292 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3293 * There's a chance we'll have to redo some of it if the block group changes
3295 * getting rid of the easy block groups while we're still allowing others to
3300 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_start_dirty_block_groups()
3301 struct btrfs_block_group *cache; in btrfs_start_dirty_block_groups() local
3302 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_start_dirty_block_groups()
3307 struct list_head *io = &cur_trans->io_bgs; in btrfs_start_dirty_block_groups()
3310 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3311 if (list_empty(&cur_trans->dirty_bgs)) { in btrfs_start_dirty_block_groups()
3312 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3315 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3316 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3319 /* Make sure all the block groups on our dirty list actually exist */ in btrfs_start_dirty_block_groups()
3325 ret = -ENOMEM; in btrfs_start_dirty_block_groups()
3332 * removal of empty block groups deleting this block group while we are in btrfs_start_dirty_block_groups()
3333 * writing out the cache in btrfs_start_dirty_block_groups()
3335 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3339 cache = list_first_entry(&dirty, struct btrfs_block_group, in btrfs_start_dirty_block_groups()
3342 * This can happen if something re-dirties a block group that in btrfs_start_dirty_block_groups()
3346 if (!list_empty(&cache->io_list)) { in btrfs_start_dirty_block_groups()
3347 list_del_init(&cache->io_list); in btrfs_start_dirty_block_groups()
3348 btrfs_wait_cache_io(trans, cache, path); in btrfs_start_dirty_block_groups()
3349 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
3354 * btrfs_wait_cache_io uses the cache->dirty_list to decide if in btrfs_start_dirty_block_groups()
3361 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3362 list_del_init(&cache->dirty_list); in btrfs_start_dirty_block_groups()
3363 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3367 cache_save_setup(cache, trans, path); in btrfs_start_dirty_block_groups()
3369 if (cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_start_dirty_block_groups()
3370 cache->io_ctl.inode = NULL; in btrfs_start_dirty_block_groups()
3371 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_start_dirty_block_groups()
3372 if (ret == 0 && cache->io_ctl.inode) { in btrfs_start_dirty_block_groups()
3380 list_add_tail(&cache->io_list, io); in btrfs_start_dirty_block_groups()
3383 * If we failed to write the cache, the in btrfs_start_dirty_block_groups()
3390 ret = update_block_group_item(trans, path, cache); in btrfs_start_dirty_block_groups()
3392 * Our block group might still be attached to the list in btrfs_start_dirty_block_groups()
3393 * of new block groups in the transaction handle of some in btrfs_start_dirty_block_groups()
3394 * other task (struct btrfs_trans_handle->new_bgs). This in btrfs_start_dirty_block_groups()
3395 * means its block group item isn't yet in the extent in btrfs_start_dirty_block_groups()
3400 if (ret == -ENOENT) { in btrfs_start_dirty_block_groups()
3402 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3403 if (list_empty(&cache->dirty_list)) { in btrfs_start_dirty_block_groups()
3404 list_add_tail(&cache->dirty_list, in btrfs_start_dirty_block_groups()
3405 &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3406 btrfs_get_block_group(cache); in btrfs_start_dirty_block_groups()
3409 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3415 /* If it's not on the io list, we need to put the block group */ in btrfs_start_dirty_block_groups()
3417 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
3422 * us from writing caches for block groups that are going to be in btrfs_start_dirty_block_groups()
3425 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3428 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3430 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3440 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3441 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3443 * dirty_bgs_lock protects us from concurrent block group in btrfs_start_dirty_block_groups()
3447 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3450 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3454 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3455 list_splice_init(&dirty, &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3456 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3466 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_dirty_block_groups()
3467 struct btrfs_block_group *cache; in btrfs_write_dirty_block_groups() local
3468 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_write_dirty_block_groups()
3472 struct list_head *io = &cur_trans->io_bgs; in btrfs_write_dirty_block_groups()
3476 return -ENOMEM; in btrfs_write_dirty_block_groups()
3481 * transaction's list of dirty block groups. These tasks correspond to in btrfs_write_dirty_block_groups()
3483 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can in btrfs_write_dirty_block_groups()
3484 * allocate new block groups as a result of COWing nodes of the root in btrfs_write_dirty_block_groups()
3493 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3494 while (!list_empty(&cur_trans->dirty_bgs)) { in btrfs_write_dirty_block_groups()
3495 cache = list_first_entry(&cur_trans->dirty_bgs, in btrfs_write_dirty_block_groups()
3500 * This can happen if cache_save_setup re-dirties a block group in btrfs_write_dirty_block_groups()
3504 if (!list_empty(&cache->io_list)) { in btrfs_write_dirty_block_groups()
3505 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3506 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3507 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
3508 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3509 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3516 list_del_init(&cache->dirty_list); in btrfs_write_dirty_block_groups()
3517 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3520 cache_save_setup(cache, trans, path); in btrfs_write_dirty_block_groups()
3525 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_write_dirty_block_groups()
3526 cache->io_ctl.inode = NULL; in btrfs_write_dirty_block_groups()
3527 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_write_dirty_block_groups()
3528 if (ret == 0 && cache->io_ctl.inode) { in btrfs_write_dirty_block_groups()
3530 list_add_tail(&cache->io_list, io); in btrfs_write_dirty_block_groups()
3533 * If we failed to write the cache, the in btrfs_write_dirty_block_groups()
3540 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
3543 * created a new block group while updating a free space in btrfs_write_dirty_block_groups()
3544 * cache's inode (at inode.c:btrfs_finish_ordered_io()) in btrfs_write_dirty_block_groups()
3546 * which case the new block group is still attached to in btrfs_write_dirty_block_groups()
3548 * finished yet (no block group item in the extent tree in btrfs_write_dirty_block_groups()
3554 if (ret == -ENOENT) { in btrfs_write_dirty_block_groups()
3555 wait_event(cur_trans->writer_wait, in btrfs_write_dirty_block_groups()
3556 atomic_read(&cur_trans->num_writers) == 1); in btrfs_write_dirty_block_groups()
3557 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
3563 /* If its not on the io list, we need to put the block group */ in btrfs_write_dirty_block_groups()
3565 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3567 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3569 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3576 cache = list_first_entry(io, struct btrfs_block_group, in btrfs_write_dirty_block_groups()
3578 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3579 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
3580 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3590 struct btrfs_fs_info *info = trans->fs_info; in btrfs_update_block_group()
3592 struct btrfs_block_group *cache; in btrfs_update_block_group() local
3598 /* Block accounting for super block */ in btrfs_update_block_group()
3599 spin_lock(&info->delalloc_root_lock); in btrfs_update_block_group()
3600 old_val = btrfs_super_bytes_used(info->super_copy); in btrfs_update_block_group()
3604 old_val -= num_bytes; in btrfs_update_block_group()
3605 btrfs_set_super_bytes_used(info->super_copy, old_val); in btrfs_update_block_group()
3606 spin_unlock(&info->delalloc_root_lock); in btrfs_update_block_group()
3608 cache = btrfs_lookup_block_group(info, bytenr); in btrfs_update_block_group()
3609 if (!cache) in btrfs_update_block_group()
3610 return -ENOENT; in btrfs_update_block_group()
3612 /* An extent can not span multiple block groups. */ in btrfs_update_block_group()
3613 ASSERT(bytenr + num_bytes <= cache->start + cache->length); in btrfs_update_block_group()
3615 space_info = cache->space_info; in btrfs_update_block_group()
3616 factor = btrfs_bg_type_to_factor(cache->flags); in btrfs_update_block_group()
3619 * If this block group has free space cache written out, we need to make in btrfs_update_block_group()
3621 * the unpinning stage to actually add the space back to the block group, in btrfs_update_block_group()
3624 if (!alloc && !btrfs_block_group_done(cache)) in btrfs_update_block_group()
3625 btrfs_cache_block_group(cache, true); in btrfs_update_block_group()
3627 spin_lock(&space_info->lock); in btrfs_update_block_group()
3628 spin_lock(&cache->lock); in btrfs_update_block_group()
3631 cache->disk_cache_state < BTRFS_DC_CLEAR) in btrfs_update_block_group()
3632 cache->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_update_block_group()
3634 old_val = cache->used; in btrfs_update_block_group()
3637 cache->used = old_val; in btrfs_update_block_group()
3638 cache->reserved -= num_bytes; in btrfs_update_block_group()
3639 space_info->bytes_reserved -= num_bytes; in btrfs_update_block_group()
3640 space_info->bytes_used += num_bytes; in btrfs_update_block_group()
3641 space_info->disk_used += num_bytes * factor; in btrfs_update_block_group()
3642 spin_unlock(&cache->lock); in btrfs_update_block_group()
3643 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3645 old_val -= num_bytes; in btrfs_update_block_group()
3646 cache->used = old_val; in btrfs_update_block_group()
3647 cache->pinned += num_bytes; in btrfs_update_block_group()
3649 space_info->bytes_used -= num_bytes; in btrfs_update_block_group()
3650 space_info->disk_used -= num_bytes * factor; in btrfs_update_block_group()
3652 reclaim = should_reclaim_block_group(cache, num_bytes); in btrfs_update_block_group()
3654 spin_unlock(&cache->lock); in btrfs_update_block_group()
3655 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3657 set_extent_bit(&trans->transaction->pinned_extents, bytenr, in btrfs_update_block_group()
3658 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); in btrfs_update_block_group()
3661 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3662 if (list_empty(&cache->dirty_list)) { in btrfs_update_block_group()
3663 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); in btrfs_update_block_group()
3665 btrfs_get_block_group(cache); in btrfs_update_block_group()
3667 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3670 * No longer have used bytes in this block group, queue it for deletion. in btrfs_update_block_group()
3671 * We do this after adding the block group to the dirty list to avoid in btrfs_update_block_group()
3672 * races between cleaner kthread and space cache writeout. in btrfs_update_block_group()
3676 btrfs_mark_bg_unused(cache); in btrfs_update_block_group()
3678 btrfs_mark_bg_to_reclaim(cache); in btrfs_update_block_group()
3681 btrfs_put_block_group(cache); in btrfs_update_block_group()
3683 /* Modified block groups are accounted for in the delayed_refs_rsv. */ in btrfs_update_block_group()
3693 * @cache: The cache we are manipulating
3700 * reservation and the block group has become read only we cannot make the
3701 * reservation and return -EAGAIN, otherwise this function always succeeds.
3703 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, in btrfs_add_reserved_bytes() argument
3707 struct btrfs_space_info *space_info = cache->space_info; in btrfs_add_reserved_bytes()
3711 spin_lock(&space_info->lock); in btrfs_add_reserved_bytes()
3712 spin_lock(&cache->lock); in btrfs_add_reserved_bytes()
3713 if (cache->ro) { in btrfs_add_reserved_bytes()
3714 ret = -EAGAIN; in btrfs_add_reserved_bytes()
3718 if (btrfs_block_group_should_use_size_class(cache)) { in btrfs_add_reserved_bytes()
3720 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); in btrfs_add_reserved_bytes()
3724 cache->reserved += num_bytes; in btrfs_add_reserved_bytes()
3725 space_info->bytes_reserved += num_bytes; in btrfs_add_reserved_bytes()
3726 trace_btrfs_space_reservation(cache->fs_info, "space_info", in btrfs_add_reserved_bytes()
3727 space_info->flags, num_bytes, 1); in btrfs_add_reserved_bytes()
3728 btrfs_space_info_update_bytes_may_use(cache->fs_info, in btrfs_add_reserved_bytes()
3729 space_info, -ram_bytes); in btrfs_add_reserved_bytes()
3731 cache->delalloc_bytes += num_bytes; in btrfs_add_reserved_bytes()
3738 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_add_reserved_bytes()
3740 spin_unlock(&cache->lock); in btrfs_add_reserved_bytes()
3741 spin_unlock(&space_info->lock); in btrfs_add_reserved_bytes()
3748 * @cache: The cache we are manipulating
3757 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, in btrfs_free_reserved_bytes() argument
3760 struct btrfs_space_info *space_info = cache->space_info; in btrfs_free_reserved_bytes()
3762 spin_lock(&space_info->lock); in btrfs_free_reserved_bytes()
3763 spin_lock(&cache->lock); in btrfs_free_reserved_bytes()
3764 if (cache->ro) in btrfs_free_reserved_bytes()
3765 space_info->bytes_readonly += num_bytes; in btrfs_free_reserved_bytes()
3766 cache->reserved -= num_bytes; in btrfs_free_reserved_bytes()
3767 space_info->bytes_reserved -= num_bytes; in btrfs_free_reserved_bytes()
3768 space_info->max_extent_size = 0; in btrfs_free_reserved_bytes()
3771 cache->delalloc_bytes -= num_bytes; in btrfs_free_reserved_bytes()
3772 spin_unlock(&cache->lock); in btrfs_free_reserved_bytes()
3774 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_free_reserved_bytes()
3775 spin_unlock(&space_info->lock); in btrfs_free_reserved_bytes()
3780 struct list_head *head = &info->space_info; in force_metadata_allocation()
3784 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) in force_metadata_allocation()
3785 found->force_alloc = CHUNK_ALLOC_FORCE; in force_metadata_allocation()
3800 * about 1% of the FS size. in should_alloc_chunk()
3803 thresh = btrfs_super_total_bytes(fs_info->super_copy); in should_alloc_chunk()
3806 if (sinfo->total_bytes - bytes_used < thresh) in should_alloc_chunk()
3810 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) in should_alloc_chunk()
3817 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc()
3831 * system block group if needed. in do_chunk_alloc()
3843 * Normally we are not expected to fail with -ENOSPC here, since we have in do_chunk_alloc()
3848 * existing system block groups have a profile which can not be used in do_chunk_alloc()
3855 * none of the block groups can be used for extent allocation since they in do_chunk_alloc()
3859 * block groups and check if they have a usable profile and enough space in do_chunk_alloc()
3860 * can be slow on very large filesystems, so we tolerate the -ENOSPC and in do_chunk_alloc()
3866 * block group to allocate from when we called check_system_chunk() in do_chunk_alloc()
3867 * above. However right after we called it, the only system block group in do_chunk_alloc()
3871 * handle and scrub uses the commit root to search for block groups; in do_chunk_alloc()
3873 * 3) We had one system block group with enough free space when we called in do_chunk_alloc()
3877 * block group (discard removes a free space entry, discards it, and in do_chunk_alloc()
3878 * then adds back the entry to the block group cache). in do_chunk_alloc()
3880 if (ret == -ENOSPC) { in do_chunk_alloc()
3881 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); in do_chunk_alloc()
3919 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3920 * the chunk, the chunk mapping, create its block group and add the items
3921 * that belong in the chunk btree to it - more specifically, we need to
3924 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3930 * trigger chunk allocation and attempted to insert the new block group item
3941 * allocate a new block group (chunk) because the only one that had enough
3943 * device replace, block group reclaim thread, etc), so we can not use it
3948 * the filesystem was mounted in degraded mode, none of the existing block
3950 * profile (for e.g. mounting a 2 devices filesystem, where all block groups
3953 * example, it will trigger allocation of a new metadata block group with a
3959 * example, it does not find any free extent in any metadata block group,
3960 * therefore forced to try to allocate a new metadata block group.
3962 * meanwhile - this typically happens with tasks that don't reserve space
3971 * the only metadata block group that had free space (discard starts by
3972 * removing a free space entry from a block group, then does the discard
3974 * block group).
3977 * a seed device - we must create new metadata and system chunks without adding
3978 * any of the block group items to the chunk, extent and device btrees. If we
3980 * btrees, since all the chunks from the seed device are read-only.
3987 * superblock. This is easier to trigger if using a btree node/leaf size of 64K
3995 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
3997 * a modification to the chunk btree - use cases for the later are adding,
4003 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
4009 * that mutex. The same logic applies to removing chunks - we must reserve system
4011 * while holding fs_info->chunk_mutex.
4016 * - return 1 if it successfully allocates a chunk,
4017 * - return errors including -ENOSPC otherwise.
4019 * - return 0 if it doesn't need to allocate a new chunk,
4020 * - return 1 if it successfully allocates a chunk,
4021 * - return errors including -ENOSPC otherwise.
4026 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_chunk_alloc()
4039 /* Don't re-enter if we're already allocating a chunk */ in btrfs_chunk_alloc()
4040 if (trans->allocating_chunk) in btrfs_chunk_alloc()
4041 return -ENOSPC; in btrfs_chunk_alloc()
4051 * lock on it and on its parent - if the COW operation triggers a system in btrfs_chunk_alloc()
4060 * here - this happens in the cases described above at do_chunk_alloc(). in btrfs_chunk_alloc()
4064 return -ENOSPC; in btrfs_chunk_alloc()
4070 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4071 if (force < space_info->force_alloc) in btrfs_chunk_alloc()
4072 force = space_info->force_alloc; in btrfs_chunk_alloc()
4074 if (space_info->full) { in btrfs_chunk_alloc()
4077 ret = -ENOSPC; in btrfs_chunk_alloc()
4080 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4083 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4085 } else if (space_info->chunk_alloc) { in btrfs_chunk_alloc()
4087 * Someone is already allocating, so we need to block in btrfs_chunk_alloc()
4094 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4095 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4096 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4099 space_info->chunk_alloc = 1; in btrfs_chunk_alloc()
4101 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4107 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4108 trans->allocating_chunk = true; in btrfs_chunk_alloc()
4122 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { in btrfs_chunk_alloc()
4123 fs_info->data_chunk_allocations++; in btrfs_chunk_alloc()
4124 if (!(fs_info->data_chunk_allocations % in btrfs_chunk_alloc()
4125 fs_info->metadata_ratio)) in btrfs_chunk_alloc()
4130 trans->allocating_chunk = false; in btrfs_chunk_alloc()
4136 * New block group is likely to be used soon. Try to activate in btrfs_chunk_alloc()
4145 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4147 if (ret == -ENOSPC) in btrfs_chunk_alloc()
4148 space_info->full = 1; in btrfs_chunk_alloc()
4153 space_info->max_extent_size = 0; in btrfs_chunk_alloc()
4156 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in btrfs_chunk_alloc()
4158 space_info->chunk_alloc = 0; in btrfs_chunk_alloc()
4159 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4160 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4171 num_dev = fs_info->fs_devices->rw_devices; in get_profile_num_devs()
4180 struct btrfs_fs_info *fs_info = trans->fs_info; in reserve_chunk_space()
4187 * atomic and race free space reservation in the chunk block reserve. in reserve_chunk_space()
4189 lockdep_assert_held(&fs_info->chunk_mutex); in reserve_chunk_space()
4192 spin_lock(&info->lock); in reserve_chunk_space()
4193 left = info->total_bytes - btrfs_space_info_used(info, true); in reserve_chunk_space()
4194 spin_unlock(&info->lock); in reserve_chunk_space()
4229 * the cases described at do_chunk_alloc() - the system in reserve_chunk_space()
4230 * block group we just created was just turned into RO in reserve_chunk_space()
4240 &fs_info->chunk_block_rsv, in reserve_chunk_space()
4243 trans->chunk_bytes_reserved += bytes; in reserve_chunk_space()
4249 * The caller must be holding fs_info->chunk_mutex.
4253 struct btrfs_fs_info *fs_info = trans->fs_info; in check_system_chunk()
4274 * block group allocation and removal, to avoid a deadlock with a concurrent
4275 * task that is allocating a metadata or data block group and therefore needs to
4283 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_reserve_chunk_metadata()
4291 mutex_lock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4293 mutex_unlock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4303 spin_lock(&block_group->lock); in btrfs_put_block_group_cache()
4305 &block_group->runtime_flags)) { in btrfs_put_block_group_cache()
4306 struct inode *inode = block_group->inode; in btrfs_put_block_group_cache()
4308 block_group->inode = NULL; in btrfs_put_block_group_cache()
4309 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4311 ASSERT(block_group->io_ctl.inode == NULL); in btrfs_put_block_group_cache()
4314 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4321 * Must be called only after stopping all workers, since we could have block
4323 * freed the block groups before stopping them.
4333 if (info->active_meta_bg) { in btrfs_free_block_groups()
4334 btrfs_put_block_group(info->active_meta_bg); in btrfs_free_block_groups()
4335 info->active_meta_bg = NULL; in btrfs_free_block_groups()
4337 if (info->active_system_bg) { in btrfs_free_block_groups()
4338 btrfs_put_block_group(info->active_system_bg); in btrfs_free_block_groups()
4339 info->active_system_bg = NULL; in btrfs_free_block_groups()
4343 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4344 while (!list_empty(&info->caching_block_groups)) { in btrfs_free_block_groups()
4345 caching_ctl = list_entry(info->caching_block_groups.next, in btrfs_free_block_groups()
4347 list_del(&caching_ctl->list); in btrfs_free_block_groups()
4350 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4352 spin_lock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4353 while (!list_empty(&info->unused_bgs)) { in btrfs_free_block_groups()
4354 block_group = list_first_entry(&info->unused_bgs, in btrfs_free_block_groups()
4357 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4361 while (!list_empty(&info->reclaim_bgs)) { in btrfs_free_block_groups()
4362 block_group = list_first_entry(&info->reclaim_bgs, in btrfs_free_block_groups()
4365 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4368 spin_unlock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4370 spin_lock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4371 while (!list_empty(&info->zone_active_bgs)) { in btrfs_free_block_groups()
4372 block_group = list_first_entry(&info->zone_active_bgs, in btrfs_free_block_groups()
4375 list_del_init(&block_group->active_bg_list); in btrfs_free_block_groups()
4378 spin_unlock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4380 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4381 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { in btrfs_free_block_groups()
4384 rb_erase_cached(&block_group->cache_node, in btrfs_free_block_groups()
4385 &info->block_group_cache_tree); in btrfs_free_block_groups()
4386 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_free_block_groups()
4387 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4389 down_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4390 list_del(&block_group->list); in btrfs_free_block_groups()
4391 up_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4394 * We haven't cached this block group, which means we could in btrfs_free_block_groups()
4395 * possibly have excluded extents on this block group. in btrfs_free_block_groups()
4397 if (block_group->cached == BTRFS_CACHE_NO || in btrfs_free_block_groups()
4398 block_group->cached == BTRFS_CACHE_ERROR) in btrfs_free_block_groups()
4402 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); in btrfs_free_block_groups()
4403 ASSERT(list_empty(&block_group->dirty_list)); in btrfs_free_block_groups()
4404 ASSERT(list_empty(&block_group->io_list)); in btrfs_free_block_groups()
4405 ASSERT(list_empty(&block_group->bg_list)); in btrfs_free_block_groups()
4406 ASSERT(refcount_read(&block_group->refs) == 1); in btrfs_free_block_groups()
4407 ASSERT(block_group->swap_extents == 0); in btrfs_free_block_groups()
4410 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4412 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4416 while (!list_empty(&info->space_info)) { in btrfs_free_block_groups()
4417 space_info = list_entry(info->space_info.next, in btrfs_free_block_groups()
4425 if (WARN_ON(space_info->bytes_pinned > 0 || in btrfs_free_block_groups()
4426 space_info->bytes_may_use > 0)) in btrfs_free_block_groups()
4436 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_free_block_groups()
4438 if (WARN_ON(space_info->bytes_reserved > 0)) in btrfs_free_block_groups()
4442 WARN_ON(space_info->reclaim_size > 0); in btrfs_free_block_groups()
4443 list_del(&space_info->list); in btrfs_free_block_groups()
4449 void btrfs_freeze_block_group(struct btrfs_block_group *cache) in btrfs_freeze_block_group() argument
4451 atomic_inc(&cache->frozen); in btrfs_freeze_block_group()
4456 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_unfreeze_block_group()
4459 spin_lock(&block_group->lock); in btrfs_unfreeze_block_group()
4460 cleanup = (atomic_dec_and_test(&block_group->frozen) && in btrfs_unfreeze_block_group()
4461 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); in btrfs_unfreeze_block_group()
4462 spin_unlock(&block_group->lock); in btrfs_unfreeze_block_group()
4467 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); in btrfs_unfreeze_block_group()
4478 * tasks trimming this block group have left 1 entry each one. in btrfs_unfreeze_block_group()
4489 spin_lock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4490 if (bg->ro) in btrfs_inc_block_group_swap_extents()
4493 bg->swap_extents++; in btrfs_inc_block_group_swap_extents()
4494 spin_unlock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4501 spin_lock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4502 ASSERT(!bg->ro); in btrfs_dec_block_group_swap_extents()
4503 ASSERT(bg->swap_extents >= amount); in btrfs_dec_block_group_swap_extents()
4504 bg->swap_extents -= amount; in btrfs_dec_block_group_swap_extents()
4505 spin_unlock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4508 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) in btrfs_calc_block_group_size_class() argument
4510 if (size <= SZ_128K) in btrfs_calc_block_group_size_class()
4512 if (size <= SZ_8M) in btrfs_calc_block_group_size_class()
4518 * Handle a block group allocating an extent in a size class
4520 * @bg: The block group we allocated in.
4521 * @size_class: The size class of the allocation.
4523 * mismatched size classes.
4525 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
4526 * case of a race that leads to the wrong size class without
4529 * find_free_extent will skip block groups with a mismatched size class until
4531 * force_wrong_size_class. However, if a block group is newly allocated and
4532 * doesn't yet have a size class, then it is possible for two allocations of
4542 /* The new allocation is in the right size class, do nothing */ in btrfs_use_block_group_size_class()
4543 if (bg->size_class == size_class) in btrfs_use_block_group_size_class()
4546 * The new allocation is in a mismatched size class. in btrfs_use_block_group_size_class()
4555 if (bg->size_class != BTRFS_BG_SZ_NONE) { in btrfs_use_block_group_size_class()
4558 return -EAGAIN; in btrfs_use_block_group_size_class()
4561 * The happy new block group case: the new allocation is the first in btrfs_use_block_group_size_class()
4564 bg->size_class = size_class; in btrfs_use_block_group_size_class()
4571 if (btrfs_is_zoned(bg->fs_info)) in btrfs_block_group_should_use_size_class()