Lines Matching +full:i +full:- +full:cache +full:- +full:block +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
12 #include "rcu-string.h"
13 #include "disk-io.h"
14 #include "block-group.h"
16 #include "dev-replace.h"
17 #include "space-info.h"
26 #define WP_MISSING_DEV ((u64)-1)
28 #define WP_CONVENTIONAL ((u64)-2)
33 * - primary superblock: 0B (zone 0)
34 * - first copy: 512G (zone starting at that offset)
35 * - second copy: 4T (zone starting at that offset)
50 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
51 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
52 * - 1 zone for tree-log dedicated block group
53 * - 1 zone for relocation
58 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
59 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
60 * We do not expect the zone size to become larger than 8GiB or smaller than
73 return (zone->cond == BLK_ZONE_COND_FULL) || in sb_zone_is_full()
74 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity); in sb_zone_is_full()
92 int i; in sb_write_pointer() local
94 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { in sb_write_pointer()
95 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL); in sb_write_pointer()
96 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY); in sb_write_pointer()
97 full[i] = sb_zone_is_full(&zones[i]); in sb_write_pointer()
120 return -ENOENT; in sb_write_pointer()
123 struct address_space *mapping = bdev->bd_inode->i_mapping; in sb_write_pointer()
126 int i; in sb_write_pointer() local
128 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { in sb_write_pointer()
129 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT; in sb_write_pointer()
130 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) - in sb_write_pointer()
133 page[i] = read_cache_page_gfp(mapping, in sb_write_pointer()
135 if (IS_ERR(page[i])) { in sb_write_pointer()
136 if (i == 1) in sb_write_pointer()
138 return PTR_ERR(page[i]); in sb_write_pointer()
140 super[i] = page_address(page[i]); in sb_write_pointer()
149 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) in sb_write_pointer()
150 btrfs_release_disk_super(super[i]); in sb_write_pointer()
156 return -EUCLEAN; in sb_write_pointer()
172 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break; in sb_zone_number()
173 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break; in sb_zone_number()
190 return (u64)zone_number << zone_info->zone_size_shift; in zone_start_physical()
194 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
201 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT; in emulate_report_zones()
202 sector_t bdev_size = bdev_nr_sectors(device->bdev); in emulate_report_zones()
203 unsigned int i; in emulate_report_zones() local
206 for (i = 0; i < nr_zones; i++) { in emulate_report_zones()
207 zones[i].start = i * zone_sectors + pos; in emulate_report_zones()
208 zones[i].len = zone_sectors; in emulate_report_zones()
209 zones[i].capacity = zone_sectors; in emulate_report_zones()
210 zones[i].wp = zones[i].start + zone_sectors; in emulate_report_zones()
211 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL; in emulate_report_zones()
212 zones[i].cond = BLK_ZONE_COND_NOT_WP; in emulate_report_zones()
214 if (zones[i].wp >= bdev_size) { in emulate_report_zones()
215 i++; in emulate_report_zones()
220 return i; in emulate_report_zones()
226 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_get_dev_zones()
232 if (!bdev_is_zoned(device->bdev)) { in btrfs_get_dev_zones()
238 /* Check cache */ in btrfs_get_dev_zones()
239 if (zinfo->zone_cache) { in btrfs_get_dev_zones()
240 unsigned int i; in btrfs_get_dev_zones() local
243 ASSERT(IS_ALIGNED(pos, zinfo->zone_size)); in btrfs_get_dev_zones()
244 zno = pos >> zinfo->zone_size_shift; in btrfs_get_dev_zones()
249 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno); in btrfs_get_dev_zones()
251 for (i = 0; i < *nr_zones; i++) { in btrfs_get_dev_zones()
254 zone_info = &zinfo->zone_cache[zno + i]; in btrfs_get_dev_zones()
255 if (!zone_info->len) in btrfs_get_dev_zones()
259 if (i == *nr_zones) { in btrfs_get_dev_zones()
260 /* Cache hit on all the zones */ in btrfs_get_dev_zones()
261 memcpy(zones, zinfo->zone_cache + zno, in btrfs_get_dev_zones()
262 sizeof(*zinfo->zone_cache) * *nr_zones); in btrfs_get_dev_zones()
267 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones, in btrfs_get_dev_zones()
270 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zones()
272 pos, rcu_str_deref(device->name), in btrfs_get_dev_zones()
273 device->devid); in btrfs_get_dev_zones()
278 return -EIO; in btrfs_get_dev_zones()
280 /* Populate cache */ in btrfs_get_dev_zones()
281 if (zinfo->zone_cache) { in btrfs_get_dev_zones()
282 u32 zno = pos >> zinfo->zone_size_shift; in btrfs_get_dev_zones()
284 memcpy(zinfo->zone_cache + zno, zones, in btrfs_get_dev_zones()
285 sizeof(*zinfo->zone_cache) * *nr_zones); in btrfs_get_dev_zones()
291 /* The emulated zone size is determined from the size of device extent */
295 struct btrfs_root *root = fs_info->dev_root; in calculate_emulated_zone_size()
307 return -ENOMEM; in calculate_emulated_zone_size()
313 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in calculate_emulated_zone_size()
319 ret = -EUCLEAN; in calculate_emulated_zone_size()
324 leaf = path->nodes[0]; in calculate_emulated_zone_size()
325 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); in calculate_emulated_zone_size()
326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext); in calculate_emulated_zone_size()
337 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_get_dev_zone_info_all_devices()
341 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */ in btrfs_get_dev_zone_info_all_devices()
345 mutex_lock(&fs_devices->device_list_mutex); in btrfs_get_dev_zone_info_all_devices()
346 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_get_dev_zone_info_all_devices()
348 if (!device->bdev) in btrfs_get_dev_zone_info_all_devices()
355 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_get_dev_zone_info_all_devices()
362 struct btrfs_fs_info *fs_info = device->fs_info; in btrfs_get_dev_zone_info()
364 struct block_device *bdev = device->bdev; in btrfs_get_dev_zone_info()
370 unsigned int i, nreported = 0, nr_zones; in btrfs_get_dev_zone_info() local
382 if (device->zone_info) in btrfs_get_dev_zone_info()
387 return -ENOMEM; in btrfs_get_dev_zone_info()
389 device->zone_info = zone_info; in btrfs_get_dev_zone_info()
392 if (!fs_info->zone_size) { in btrfs_get_dev_zone_info()
398 ASSERT(fs_info->zone_size); in btrfs_get_dev_zone_info()
399 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT; in btrfs_get_dev_zone_info()
405 zone_info->zone_size = zone_sectors << SECTOR_SHIFT; in btrfs_get_dev_zone_info()
407 /* We reject devices with a zone size larger than 8GB */ in btrfs_get_dev_zone_info()
408 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) { in btrfs_get_dev_zone_info()
410 "zoned: %s: zone size %llu larger than supported maximum %llu", in btrfs_get_dev_zone_info()
411 rcu_str_deref(device->name), in btrfs_get_dev_zone_info()
412 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE); in btrfs_get_dev_zone_info()
413 ret = -EINVAL; in btrfs_get_dev_zone_info()
415 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) { in btrfs_get_dev_zone_info()
417 "zoned: %s: zone size %llu smaller than supported minimum %u", in btrfs_get_dev_zone_info()
418 rcu_str_deref(device->name), in btrfs_get_dev_zone_info()
419 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE); in btrfs_get_dev_zone_info()
420 ret = -EINVAL; in btrfs_get_dev_zone_info()
425 zone_info->zone_size_shift = ilog2(zone_info->zone_size); in btrfs_get_dev_zone_info()
426 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); in btrfs_get_dev_zone_info()
428 zone_info->nr_zones++; in btrfs_get_dev_zone_info()
434 rcu_str_deref(device->name), max_active_zones, in btrfs_get_dev_zone_info()
436 ret = -EINVAL; in btrfs_get_dev_zone_info()
439 zone_info->max_active_zones = max_active_zones; in btrfs_get_dev_zone_info()
441 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_get_dev_zone_info()
442 if (!zone_info->seq_zones) { in btrfs_get_dev_zone_info()
443 ret = -ENOMEM; in btrfs_get_dev_zone_info()
447 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_get_dev_zone_info()
448 if (!zone_info->empty_zones) { in btrfs_get_dev_zone_info()
449 ret = -ENOMEM; in btrfs_get_dev_zone_info()
453 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_get_dev_zone_info()
454 if (!zone_info->active_zones) { in btrfs_get_dev_zone_info()
455 ret = -ENOMEM; in btrfs_get_dev_zone_info()
461 ret = -ENOMEM; in btrfs_get_dev_zone_info()
466 * Enable zone cache only for a zoned device. On a non-zoned device, we in btrfs_get_dev_zone_info()
468 * use the cache. in btrfs_get_dev_zone_info()
470 if (populate_cache && bdev_is_zoned(device->bdev)) { in btrfs_get_dev_zone_info()
471 zone_info->zone_cache = vcalloc(zone_info->nr_zones, in btrfs_get_dev_zone_info()
473 if (!zone_info->zone_cache) { in btrfs_get_dev_zone_info()
474 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zone_info()
475 "zoned: failed to allocate zone cache for %s", in btrfs_get_dev_zone_info()
476 rcu_str_deref(device->name)); in btrfs_get_dev_zone_info()
477 ret = -ENOMEM; in btrfs_get_dev_zone_info()
491 for (i = 0; i < nr_zones; i++) { in btrfs_get_dev_zone_info()
492 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ) in btrfs_get_dev_zone_info()
493 __set_bit(nreported, zone_info->seq_zones); in btrfs_get_dev_zone_info()
494 switch (zones[i].cond) { in btrfs_get_dev_zone_info()
496 __set_bit(nreported, zone_info->empty_zones); in btrfs_get_dev_zone_info()
501 __set_bit(nreported, zone_info->active_zones); in btrfs_get_dev_zone_info()
507 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; in btrfs_get_dev_zone_info()
510 if (nreported != zone_info->nr_zones) { in btrfs_get_dev_zone_info()
511 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zone_info()
513 rcu_str_deref(device->name), nreported, in btrfs_get_dev_zone_info()
514 zone_info->nr_zones); in btrfs_get_dev_zone_info()
515 ret = -EIO; in btrfs_get_dev_zone_info()
521 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zone_info()
523 nactive, rcu_str_deref(device->name), in btrfs_get_dev_zone_info()
525 ret = -EIO; in btrfs_get_dev_zone_info()
528 atomic_set(&zone_info->active_zones_left, in btrfs_get_dev_zone_info()
529 max_active_zones - nactive); in btrfs_get_dev_zone_info()
530 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags); in btrfs_get_dev_zone_info()
535 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { in btrfs_get_dev_zone_info()
538 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i; in btrfs_get_dev_zone_info()
540 sb_zone = sb_zone_number(zone_info->zone_size_shift, i); in btrfs_get_dev_zone_info()
541 if (sb_zone + 1 >= zone_info->nr_zones) in btrfs_get_dev_zone_info()
546 &zone_info->sb_zones[sb_pos], in btrfs_get_dev_zone_info()
552 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zone_info()
553 "zoned: failed to read super block log zone info at devid %llu zone %u", in btrfs_get_dev_zone_info()
554 device->devid, sb_zone); in btrfs_get_dev_zone_info()
555 ret = -EUCLEAN; in btrfs_get_dev_zone_info()
563 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type == in btrfs_get_dev_zone_info()
567 ret = sb_write_pointer(device->bdev, in btrfs_get_dev_zone_info()
568 &zone_info->sb_zones[sb_pos], &sb_wp); in btrfs_get_dev_zone_info()
569 if (ret != -ENOENT && ret) { in btrfs_get_dev_zone_info()
570 btrfs_err_in_rcu(device->fs_info, in btrfs_get_dev_zone_info()
571 "zoned: super block log zone corrupted devid %llu zone %u", in btrfs_get_dev_zone_info()
572 device->devid, sb_zone); in btrfs_get_dev_zone_info()
573 ret = -EUCLEAN; in btrfs_get_dev_zone_info()
582 model = "host-managed zoned"; in btrfs_get_dev_zone_info()
590 "%s block device %s, %u %szones of %llu bytes", in btrfs_get_dev_zone_info()
591 model, rcu_str_deref(device->name), zone_info->nr_zones, in btrfs_get_dev_zone_info()
592 emulated, zone_info->zone_size); in btrfs_get_dev_zone_info()
604 struct btrfs_zoned_device_info *zone_info = device->zone_info; in btrfs_destroy_dev_zone_info()
609 bitmap_free(zone_info->active_zones); in btrfs_destroy_dev_zone_info()
610 bitmap_free(zone_info->seq_zones); in btrfs_destroy_dev_zone_info()
611 bitmap_free(zone_info->empty_zones); in btrfs_destroy_dev_zone_info()
612 vfree(zone_info->zone_cache); in btrfs_destroy_dev_zone_info()
614 device->zone_info = NULL; in btrfs_destroy_dev_zone_info()
621 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL); in btrfs_clone_dev_zone_info()
625 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_clone_dev_zone_info()
626 if (!zone_info->seq_zones) in btrfs_clone_dev_zone_info()
629 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones, in btrfs_clone_dev_zone_info()
630 zone_info->nr_zones); in btrfs_clone_dev_zone_info()
632 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_clone_dev_zone_info()
633 if (!zone_info->empty_zones) in btrfs_clone_dev_zone_info()
636 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones, in btrfs_clone_dev_zone_info()
637 zone_info->nr_zones); in btrfs_clone_dev_zone_info()
639 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); in btrfs_clone_dev_zone_info()
640 if (!zone_info->active_zones) in btrfs_clone_dev_zone_info()
643 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones, in btrfs_clone_dev_zone_info()
644 zone_info->nr_zones); in btrfs_clone_dev_zone_info()
645 zone_info->zone_cache = NULL; in btrfs_clone_dev_zone_info()
650 bitmap_free(zone_info->seq_zones); in btrfs_clone_dev_zone_info()
651 bitmap_free(zone_info->empty_zones); in btrfs_clone_dev_zone_info()
652 bitmap_free(zone_info->active_zones); in btrfs_clone_dev_zone_info()
665 return ret ? ret : -EIO; in btrfs_get_dev_zone()
674 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { in btrfs_check_for_zoned_device()
675 if (device->bdev && bdev_is_zoned(device->bdev)) { in btrfs_check_for_zoned_device()
678 device->bdev); in btrfs_check_for_zoned_device()
679 return -EINVAL; in btrfs_check_for_zoned_device()
688 struct queue_limits *lim = &fs_info->limits; in btrfs_check_zoned_mode()
694 * Host-Managed devices can't be used without the ZONED flag. With the in btrfs_check_zoned_mode()
702 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { in btrfs_check_zoned_mode()
703 struct btrfs_zoned_device_info *zone_info = device->zone_info; in btrfs_check_zoned_mode()
705 if (!device->bdev) in btrfs_check_zoned_mode()
709 zone_size = zone_info->zone_size; in btrfs_check_zoned_mode()
710 } else if (zone_info->zone_size != zone_size) { in btrfs_check_zoned_mode()
712 "zoned: unequal block device zone sizes: have %llu found %llu", in btrfs_check_zoned_mode()
713 zone_info->zone_size, zone_size); in btrfs_check_zoned_mode()
714 return -EINVAL; in btrfs_check_zoned_mode()
718 * With the zoned emulation, we can have non-zoned device on the in btrfs_check_zoned_mode()
720 * append size. in btrfs_check_zoned_mode()
722 if (bdev_is_zoned(device->bdev)) { in btrfs_check_zoned_mode()
724 &bdev_get_queue(device->bdev)->limits, in btrfs_check_zoned_mode()
736 "zoned: zone size %llu not aligned to stripe %u", in btrfs_check_zoned_mode()
738 return -EINVAL; in btrfs_check_zoned_mode()
742 btrfs_err(fs_info, "zoned: mixed block groups not supported"); in btrfs_check_zoned_mode()
743 return -EINVAL; in btrfs_check_zoned_mode()
746 fs_info->zone_size = zone_size; in btrfs_check_zoned_mode()
754 fs_info->max_zone_append_size = ALIGN_DOWN( in btrfs_check_zoned_mode()
755 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT, in btrfs_check_zoned_mode()
756 (u64)lim->max_sectors << SECTOR_SHIFT, in btrfs_check_zoned_mode()
757 (u64)lim->max_segments << PAGE_SHIFT), in btrfs_check_zoned_mode()
758 fs_info->sectorsize); in btrfs_check_zoned_mode()
759 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED; in btrfs_check_zoned_mode()
760 if (fs_info->max_zone_append_size < fs_info->max_extent_size) in btrfs_check_zoned_mode()
761 fs_info->max_extent_size = fs_info->max_zone_append_size; in btrfs_check_zoned_mode()
764 * Check mount options here, because we might change fs_info->zoned in btrfs_check_zoned_mode()
765 * from fs_info->zone_size. in btrfs_check_zoned_mode()
767 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt); in btrfs_check_zoned_mode()
771 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size); in btrfs_check_zoned_mode()
781 * Space cache writing is not COWed. Disable that to avoid write errors in btrfs_check_mountopts_zoned()
785 btrfs_err(info, "zoned: space cache v1 is not supported"); in btrfs_check_mountopts_zoned()
786 return -EINVAL; in btrfs_check_mountopts_zoned()
791 return -EINVAL; in btrfs_check_mountopts_zoned()
815 if (ret != -ENOENT && ret < 0) in sb_log_location()
826 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) { in sb_log_location()
830 reset->start, reset->len, in sb_log_location()
835 reset->cond = BLK_ZONE_COND_EMPTY; in sb_log_location()
836 reset->wp = reset->start; in sb_log_location()
838 } else if (ret != -ENOENT) { in sb_log_location()
853 wp -= BTRFS_SUPER_INFO_SIZE; in sb_log_location()
881 return -EINVAL; in btrfs_sb_log_location_bdev()
888 return -ENOENT; in btrfs_sb_log_location_bdev()
896 return -EIO; in btrfs_sb_log_location_bdev()
904 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_sb_log_location()
908 * For a zoned filesystem on a non-zoned block device, use the same in btrfs_sb_log_location()
909 * super block locations as regular filesystem. Doing so, the super in btrfs_sb_log_location()
910 * block can always be retrieved and the zoned flag of the volume in btrfs_sb_log_location()
911 * detected from the super block information. in btrfs_sb_log_location()
913 if (!bdev_is_zoned(device->bdev)) { in btrfs_sb_log_location()
918 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); in btrfs_sb_log_location()
919 if (zone_num + 1 >= zinfo->nr_zones) in btrfs_sb_log_location()
920 return -ENOENT; in btrfs_sb_log_location()
922 return sb_log_location(device->bdev, in btrfs_sb_log_location()
923 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror], in btrfs_sb_log_location()
935 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); in is_sb_log_zone()
936 if (zone_num + 1 >= zinfo->nr_zones) in is_sb_log_zone()
939 if (!test_bit(zone_num, zinfo->seq_zones)) in is_sb_log_zone()
947 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_advance_sb_log()
949 int i; in btrfs_advance_sb_log() local
954 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror]; in btrfs_advance_sb_log()
955 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { in btrfs_advance_sb_log()
957 if (zone->cond == BLK_ZONE_COND_FULL) { in btrfs_advance_sb_log()
962 if (zone->cond == BLK_ZONE_COND_EMPTY) in btrfs_advance_sb_log()
963 zone->cond = BLK_ZONE_COND_IMP_OPEN; in btrfs_advance_sb_log()
965 zone->wp += SUPER_INFO_SECTORS; in btrfs_advance_sb_log()
976 if (zone->wp != zone->start + zone->capacity) { in btrfs_advance_sb_log()
979 ret = blkdev_zone_mgmt(device->bdev, in btrfs_advance_sb_log()
980 REQ_OP_ZONE_FINISH, zone->start, in btrfs_advance_sb_log()
981 zone->len, GFP_NOFS); in btrfs_advance_sb_log()
986 zone->wp = zone->start + zone->len; in btrfs_advance_sb_log()
987 zone->cond = BLK_ZONE_COND_FULL; in btrfs_advance_sb_log()
994 return -EIO; in btrfs_advance_sb_log()
1012 return -ENOENT; in btrfs_reset_sb_log_zones()
1024 * @num_bytes: size of wanted region
1033 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_find_allocatable_zones()
1034 const u8 shift = zinfo->zone_size_shift; in btrfs_find_allocatable_zones()
1039 int i; in btrfs_find_allocatable_zones() local
1041 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); in btrfs_find_allocatable_zones()
1042 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size)); in btrfs_find_allocatable_zones()
1048 if (end > zinfo->nr_zones) in btrfs_find_allocatable_zones()
1053 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) { in btrfs_find_allocatable_zones()
1054 pos += zinfo->zone_size; in btrfs_find_allocatable_zones()
1059 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { in btrfs_find_allocatable_zones()
1063 sb_zone = sb_zone_number(shift, i); in btrfs_find_allocatable_zones()
1073 sb_pos = btrfs_sb_offset(i); in btrfs_find_allocatable_zones()
1078 zinfo->zone_size); in btrfs_find_allocatable_zones()
1091 struct btrfs_zoned_device_info *zone_info = device->zone_info; in btrfs_dev_set_active_zone()
1092 unsigned int zno = (pos >> zone_info->zone_size_shift); in btrfs_dev_set_active_zone()
1095 if (zone_info->max_active_zones == 0) in btrfs_dev_set_active_zone()
1098 if (!test_bit(zno, zone_info->active_zones)) { in btrfs_dev_set_active_zone()
1100 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0) in btrfs_dev_set_active_zone()
1102 if (test_and_set_bit(zno, zone_info->active_zones)) { in btrfs_dev_set_active_zone()
1104 atomic_inc(&zone_info->active_zones_left); in btrfs_dev_set_active_zone()
1113 struct btrfs_zoned_device_info *zone_info = device->zone_info; in btrfs_dev_clear_active_zone()
1114 unsigned int zno = (pos >> zone_info->zone_size_shift); in btrfs_dev_clear_active_zone()
1117 if (zone_info->max_active_zones == 0) in btrfs_dev_clear_active_zone()
1120 if (test_and_clear_bit(zno, zone_info->active_zones)) in btrfs_dev_clear_active_zone()
1121 atomic_inc(&zone_info->active_zones_left); in btrfs_dev_clear_active_zone()
1130 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET, in btrfs_reset_device_zone()
1140 physical += device->zone_info->zone_size; in btrfs_reset_device_zone()
1141 length -= device->zone_info->zone_size; in btrfs_reset_device_zone()
1147 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) in btrfs_ensure_empty_zones() argument
1149 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_ensure_empty_zones()
1150 const u8 shift = zinfo->zone_size_shift; in btrfs_ensure_empty_zones()
1152 unsigned long nbits = size >> shift; in btrfs_ensure_empty_zones()
1156 ASSERT(IS_ALIGNED(start, zinfo->zone_size)); in btrfs_ensure_empty_zones()
1157 ASSERT(IS_ALIGNED(size, zinfo->zone_size)); in btrfs_ensure_empty_zones()
1159 if (begin + nbits > zinfo->nr_zones) in btrfs_ensure_empty_zones()
1160 return -ERANGE; in btrfs_ensure_empty_zones()
1163 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits)) in btrfs_ensure_empty_zones()
1167 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) && in btrfs_ensure_empty_zones()
1168 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits)) in btrfs_ensure_empty_zones()
1171 for (pos = start; pos < start + size; pos += zinfo->zone_size) { in btrfs_ensure_empty_zones()
1180 device->fs_info, in btrfs_ensure_empty_zones()
1182 rcu_str_deref(device->name), device->devid, pos >> shift); in btrfs_ensure_empty_zones()
1185 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size, in btrfs_ensure_empty_zones()
1196 * for a block group consist of conventional zones. It is pointed to the
1197 * end of the highest addressed extent in the block group as an allocation
1200 static int calculate_alloc_pointer(struct btrfs_block_group *cache, in calculate_alloc_pointer() argument
1203 struct btrfs_fs_info *fs_info = cache->fs_info; in calculate_alloc_pointer()
1212 * Avoid tree lookups for a new block group, there's no use for it. in calculate_alloc_pointer()
1215 * Also, we have a lock chain of extent buffer lock -> chunk mutex. in calculate_alloc_pointer()
1216 * For new a block group, this function is called from in calculate_alloc_pointer()
1228 return -ENOMEM; in calculate_alloc_pointer()
1230 key.objectid = cache->start + cache->length; in calculate_alloc_pointer()
1238 ret = -EUCLEAN; in calculate_alloc_pointer()
1242 ret = btrfs_previous_extent_item(root, path, cache->start); in calculate_alloc_pointer()
1251 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); in calculate_alloc_pointer()
1256 length = fs_info->nodesize; in calculate_alloc_pointer()
1258 if (!(found_key.objectid >= cache->start && in calculate_alloc_pointer()
1259 found_key.objectid + length <= cache->start + cache->length)) { in calculate_alloc_pointer()
1260 ret = -EUCLEAN; in calculate_alloc_pointer()
1263 *offset_ret = found_key.objectid + length - cache->start; in calculate_alloc_pointer()
1281 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in btrfs_load_zone_info()
1282 struct btrfs_device *device = map->stripes[zone_idx].dev; in btrfs_load_zone_info()
1288 info->physical = map->stripes[zone_idx].physical; in btrfs_load_zone_info()
1290 if (!device->bdev) { in btrfs_load_zone_info()
1291 info->alloc_offset = WP_MISSING_DEV; in btrfs_load_zone_info()
1296 if (!device->zone_info->max_active_zones) in btrfs_load_zone_info()
1299 if (!btrfs_dev_is_sequential(device, info->physical)) { in btrfs_load_zone_info()
1300 info->alloc_offset = WP_CONVENTIONAL; in btrfs_load_zone_info()
1304 /* This zone will be used for allocation, so mark this zone non-empty. */ in btrfs_load_zone_info()
1305 btrfs_dev_clear_zone_empty(device, info->physical); in btrfs_load_zone_info()
1307 down_read(&dev_replace->rwsem); in btrfs_load_zone_info()
1309 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) in btrfs_load_zone_info()
1310 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical); in btrfs_load_zone_info()
1311 up_read(&dev_replace->rwsem); in btrfs_load_zone_info()
1317 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size)); in btrfs_load_zone_info()
1319 ret = btrfs_get_dev_zone(device, info->physical, &zone); in btrfs_load_zone_info()
1322 if (ret != -EIO && ret != -EOPNOTSUPP) in btrfs_load_zone_info()
1324 info->alloc_offset = WP_MISSING_DEV; in btrfs_load_zone_info()
1331 zone.start << SECTOR_SHIFT, rcu_str_deref(device->name), in btrfs_load_zone_info()
1332 device->devid); in btrfs_load_zone_info()
1333 return -EIO; in btrfs_load_zone_info()
1336 info->capacity = (zone.capacity << SECTOR_SHIFT); in btrfs_load_zone_info()
1343 (info->physical >> device->zone_info->zone_size_shift), in btrfs_load_zone_info()
1344 rcu_str_deref(device->name), device->devid); in btrfs_load_zone_info()
1345 info->alloc_offset = WP_MISSING_DEV; in btrfs_load_zone_info()
1348 info->alloc_offset = 0; in btrfs_load_zone_info()
1351 info->alloc_offset = info->capacity; in btrfs_load_zone_info()
1355 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT); in btrfs_load_zone_info()
1367 if (info->alloc_offset == WP_MISSING_DEV) { in btrfs_load_block_group_single()
1368 btrfs_err(bg->fs_info, in btrfs_load_block_group_single()
1370 info->physical); in btrfs_load_block_group_single()
1371 return -EIO; in btrfs_load_block_group_single()
1374 bg->alloc_offset = info->alloc_offset; in btrfs_load_block_group_single()
1375 bg->zone_capacity = info->capacity; in btrfs_load_block_group_single()
1377 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); in btrfs_load_block_group_single()
1386 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_load_block_group_dup()
1388 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { in btrfs_load_block_group_dup()
1389 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree"); in btrfs_load_block_group_dup()
1390 return -EINVAL; in btrfs_load_block_group_dup()
1394 btrfs_err(bg->fs_info, in btrfs_load_block_group_dup()
1397 return -EIO; in btrfs_load_block_group_dup()
1400 btrfs_err(bg->fs_info, in btrfs_load_block_group_dup()
1403 return -EIO; in btrfs_load_block_group_dup()
1406 btrfs_err(bg->fs_info, in btrfs_load_block_group_dup()
1408 return -EIO; in btrfs_load_block_group_dup()
1413 return -EIO; in btrfs_load_block_group_dup()
1415 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); in btrfs_load_block_group_dup()
1418 bg->alloc_offset = zone_info[0].alloc_offset; in btrfs_load_block_group_dup()
1419 bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity); in btrfs_load_block_group_dup()
1428 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_load_block_group_raid1()
1429 int i; in btrfs_load_block_group_raid1() local
1431 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { in btrfs_load_block_group_raid1()
1432 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", in btrfs_load_block_group_raid1()
1433 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_raid1()
1434 return -EINVAL; in btrfs_load_block_group_raid1()
1437 for (i = 0; i < map->num_stripes; i++) { in btrfs_load_block_group_raid1()
1438 if (zone_info[i].alloc_offset == WP_MISSING_DEV || in btrfs_load_block_group_raid1()
1439 zone_info[i].alloc_offset == WP_CONVENTIONAL) in btrfs_load_block_group_raid1()
1442 if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && in btrfs_load_block_group_raid1()
1446 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_raid1()
1447 return -EIO; in btrfs_load_block_group_raid1()
1449 if (test_bit(0, active) != test_bit(i, active)) { in btrfs_load_block_group_raid1()
1452 return -EIO; in btrfs_load_block_group_raid1()
1456 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); in btrfs_load_block_group_raid1()
1459 bg->zone_capacity = min_not_zero(zone_info[0].capacity, in btrfs_load_block_group_raid1()
1464 bg->alloc_offset = zone_info[0].alloc_offset; in btrfs_load_block_group_raid1()
1466 bg->alloc_offset = zone_info[i - 1].alloc_offset; in btrfs_load_block_group_raid1()
1476 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_load_block_group_raid0()
1478 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { in btrfs_load_block_group_raid0()
1479 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", in btrfs_load_block_group_raid0()
1480 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_raid0()
1481 return -EINVAL; in btrfs_load_block_group_raid0()
1484 for (int i = 0; i < map->num_stripes; i++) { in btrfs_load_block_group_raid0() local
1485 if (zone_info[i].alloc_offset == WP_MISSING_DEV || in btrfs_load_block_group_raid0()
1486 zone_info[i].alloc_offset == WP_CONVENTIONAL) in btrfs_load_block_group_raid0()
1489 if (test_bit(0, active) != test_bit(i, active)) { in btrfs_load_block_group_raid0()
1491 return -EIO; in btrfs_load_block_group_raid0()
1494 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); in btrfs_load_block_group_raid0()
1496 bg->zone_capacity += zone_info[i].capacity; in btrfs_load_block_group_raid0()
1497 bg->alloc_offset += zone_info[i].alloc_offset; in btrfs_load_block_group_raid0()
1508 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_load_block_group_raid10()
1510 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { in btrfs_load_block_group_raid10()
1511 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", in btrfs_load_block_group_raid10()
1512 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_raid10()
1513 return -EINVAL; in btrfs_load_block_group_raid10()
1516 for (int i = 0; i < map->num_stripes; i++) { in btrfs_load_block_group_raid10() local
1517 if (zone_info[i].alloc_offset == WP_MISSING_DEV || in btrfs_load_block_group_raid10()
1518 zone_info[i].alloc_offset == WP_CONVENTIONAL) in btrfs_load_block_group_raid10()
1521 if (test_bit(0, active) != test_bit(i, active)) { in btrfs_load_block_group_raid10()
1523 return -EIO; in btrfs_load_block_group_raid10()
1526 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); in btrfs_load_block_group_raid10()
1529 if ((i % map->sub_stripes) == 0) { in btrfs_load_block_group_raid10()
1530 bg->zone_capacity += zone_info[i].capacity; in btrfs_load_block_group_raid10()
1531 bg->alloc_offset += zone_info[i].alloc_offset; in btrfs_load_block_group_raid10()
1538 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) in btrfs_load_block_group_zone_info() argument
1540 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_load_block_group_zone_info()
1542 u64 logical = cache->start; in btrfs_load_block_group_zone_info()
1543 u64 length = cache->length; in btrfs_load_block_group_zone_info()
1546 int i; in btrfs_load_block_group_zone_info() local
1555 if (!IS_ALIGNED(length, fs_info->zone_size)) { in btrfs_load_block_group_zone_info()
1557 "zoned: block group %llu len %llu unaligned to zone size %llu", in btrfs_load_block_group_zone_info()
1558 logical, length, fs_info->zone_size); in btrfs_load_block_group_zone_info()
1559 return -EIO; in btrfs_load_block_group_zone_info()
1564 return -EINVAL; in btrfs_load_block_group_zone_info()
1566 cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS); in btrfs_load_block_group_zone_info()
1567 if (!cache->physical_map) { in btrfs_load_block_group_zone_info()
1568 ret = -ENOMEM; in btrfs_load_block_group_zone_info()
1572 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS); in btrfs_load_block_group_zone_info()
1574 ret = -ENOMEM; in btrfs_load_block_group_zone_info()
1578 active = bitmap_zalloc(map->num_stripes, GFP_NOFS); in btrfs_load_block_group_zone_info()
1580 ret = -ENOMEM; in btrfs_load_block_group_zone_info()
1584 for (i = 0; i < map->num_stripes; i++) { in btrfs_load_block_group_zone_info()
1585 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map); in btrfs_load_block_group_zone_info()
1589 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) in btrfs_load_block_group_zone_info()
1596 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags); in btrfs_load_block_group_zone_info()
1599 /* Zone capacity is always zone size in emulation */ in btrfs_load_block_group_zone_info()
1600 cache->zone_capacity = cache->length; in btrfs_load_block_group_zone_info()
1601 ret = calculate_alloc_pointer(cache, &last_alloc, new); in btrfs_load_block_group_zone_info()
1605 cache->start); in btrfs_load_block_group_zone_info()
1607 } else if (map->num_stripes == num_conventional) { in btrfs_load_block_group_zone_info()
1608 cache->alloc_offset = last_alloc; in btrfs_load_block_group_zone_info()
1609 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags); in btrfs_load_block_group_zone_info()
1614 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { in btrfs_load_block_group_zone_info()
1616 ret = btrfs_load_block_group_single(cache, &zone_info[0], active); in btrfs_load_block_group_zone_info()
1619 ret = btrfs_load_block_group_dup(cache, map, zone_info, active); in btrfs_load_block_group_zone_info()
1624 ret = btrfs_load_block_group_raid1(cache, map, zone_info, active); in btrfs_load_block_group_zone_info()
1627 ret = btrfs_load_block_group_raid0(cache, map, zone_info, active); in btrfs_load_block_group_zone_info()
1630 ret = btrfs_load_block_group_raid10(cache, map, zone_info, active); in btrfs_load_block_group_zone_info()
1636 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_zone_info()
1637 ret = -EINVAL; in btrfs_load_block_group_zone_info()
1643 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && in btrfs_load_block_group_zone_info()
1644 (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && in btrfs_load_block_group_zone_info()
1645 !fs_info->stripe_root) { in btrfs_load_block_group_zone_info()
1646 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", in btrfs_load_block_group_zone_info()
1647 btrfs_bg_type_to_raid_name(map->type)); in btrfs_load_block_group_zone_info()
1648 return -EINVAL; in btrfs_load_block_group_zone_info()
1651 if (cache->alloc_offset > cache->zone_capacity) { in btrfs_load_block_group_zone_info()
1653 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", in btrfs_load_block_group_zone_info()
1654 cache->alloc_offset, cache->zone_capacity, in btrfs_load_block_group_zone_info()
1655 cache->start); in btrfs_load_block_group_zone_info()
1656 ret = -EIO; in btrfs_load_block_group_zone_info()
1660 if (!ret && num_conventional && last_alloc > cache->alloc_offset) { in btrfs_load_block_group_zone_info()
1663 logical, last_alloc, cache->alloc_offset); in btrfs_load_block_group_zone_info()
1664 ret = -EIO; in btrfs_load_block_group_zone_info()
1668 cache->meta_write_pointer = cache->alloc_offset + cache->start; in btrfs_load_block_group_zone_info()
1669 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) { in btrfs_load_block_group_zone_info()
1670 btrfs_get_block_group(cache); in btrfs_load_block_group_zone_info()
1671 spin_lock(&fs_info->zone_active_bgs_lock); in btrfs_load_block_group_zone_info()
1672 list_add_tail(&cache->active_bg_list, in btrfs_load_block_group_zone_info()
1673 &fs_info->zone_active_bgs); in btrfs_load_block_group_zone_info()
1674 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_load_block_group_zone_info()
1677 btrfs_free_chunk_map(cache->physical_map); in btrfs_load_block_group_zone_info()
1678 cache->physical_map = NULL; in btrfs_load_block_group_zone_info()
1687 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) in btrfs_calc_zone_unusable() argument
1691 if (!btrfs_is_zoned(cache->fs_info)) in btrfs_calc_zone_unusable()
1694 WARN_ON(cache->bytes_super != 0); in btrfs_calc_zone_unusable()
1695 unusable = (cache->alloc_offset - cache->used) + in btrfs_calc_zone_unusable()
1696 (cache->length - cache->zone_capacity); in btrfs_calc_zone_unusable()
1697 free = cache->zone_capacity - cache->alloc_offset; in btrfs_calc_zone_unusable()
1699 /* We only need ->free_space in ALLOC_SEQ block groups */ in btrfs_calc_zone_unusable()
1700 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_calc_zone_unusable()
1701 cache->free_space_ctl->free_space = free; in btrfs_calc_zone_unusable()
1702 cache->zone_unusable = unusable; in btrfs_calc_zone_unusable()
1707 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT); in btrfs_use_zone_append()
1708 struct btrfs_inode *inode = bbio->inode; in btrfs_use_zone_append()
1709 struct btrfs_fs_info *fs_info = bbio->fs_info; in btrfs_use_zone_append()
1710 struct btrfs_block_group *cache; in btrfs_use_zone_append() local
1716 if (!inode || !is_data_inode(&inode->vfs_inode)) in btrfs_use_zone_append()
1719 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) in btrfs_use_zone_append()
1725 * Furthermore we have set aside own block-group from which only the in btrfs_use_zone_append()
1730 if (btrfs_is_data_reloc_root(inode->root)) in btrfs_use_zone_append()
1733 cache = btrfs_lookup_block_group(fs_info, start); in btrfs_use_zone_append()
1734 ASSERT(cache); in btrfs_use_zone_append()
1735 if (!cache) in btrfs_use_zone_append()
1738 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags); in btrfs_use_zone_append()
1739 btrfs_put_block_group(cache); in btrfs_use_zone_append()
1746 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_record_physical_zoned()
1747 struct btrfs_ordered_sum *sum = bbio->sums; in btrfs_record_physical_zoned()
1749 if (physical < bbio->orig_physical) in btrfs_record_physical_zoned()
1750 sum->logical -= bbio->orig_physical - physical; in btrfs_record_physical_zoned()
1752 sum->logical += physical - bbio->orig_physical; in btrfs_record_physical_zoned()
1758 struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree; in btrfs_rewrite_logical_zoned()
1761 ordered->disk_bytenr = logical; in btrfs_rewrite_logical_zoned()
1763 write_lock(&em_tree->lock); in btrfs_rewrite_logical_zoned()
1764 em = search_extent_mapping(em_tree, ordered->file_offset, in btrfs_rewrite_logical_zoned()
1765 ordered->num_bytes); in btrfs_rewrite_logical_zoned()
1766 em->block_start = logical; in btrfs_rewrite_logical_zoned()
1768 write_unlock(&em_tree->lock); in btrfs_rewrite_logical_zoned()
1776 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && in btrfs_zoned_split_ordered()
1777 split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset, in btrfs_zoned_split_ordered()
1778 ordered->num_bytes, len, logical)) in btrfs_zoned_split_ordered()
1784 new->disk_bytenr = logical; in btrfs_zoned_split_ordered()
1791 struct btrfs_inode *inode = BTRFS_I(ordered->inode); in btrfs_finish_ordered_zoned()
1792 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_finish_ordered_zoned()
1797 * Write to pre-allocated region is for the data relocation, and so in btrfs_finish_ordered_zoned()
1800 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) in btrfs_finish_ordered_zoned()
1803 ASSERT(!list_empty(&ordered->list)); in btrfs_finish_ordered_zoned()
1804 /* The ordered->list can be empty in the above pre-alloc case. */ in btrfs_finish_ordered_zoned()
1805 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list); in btrfs_finish_ordered_zoned()
1806 logical = sum->logical; in btrfs_finish_ordered_zoned()
1807 len = sum->len; in btrfs_finish_ordered_zoned()
1809 while (len < ordered->disk_num_bytes) { in btrfs_finish_ordered_zoned()
1811 if (sum->logical == logical + len) { in btrfs_finish_ordered_zoned()
1812 len += sum->len; in btrfs_finish_ordered_zoned()
1816 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); in btrfs_finish_ordered_zoned()
1820 logical = sum->logical; in btrfs_finish_ordered_zoned()
1821 len = sum->len; in btrfs_finish_ordered_zoned()
1824 if (ordered->disk_bytenr != logical) in btrfs_finish_ordered_zoned()
1829 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures in btrfs_finish_ordered_zoned()
1834 if ((inode->flags & BTRFS_INODE_NODATASUM) || in btrfs_finish_ordered_zoned()
1835 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) { in btrfs_finish_ordered_zoned()
1836 while ((sum = list_first_entry_or_null(&ordered->list, in btrfs_finish_ordered_zoned()
1838 list_del(&sum->list); in btrfs_finish_ordered_zoned()
1847 const struct writeback_control *wbc = ctx->wbc; in check_bg_is_active()
1848 struct btrfs_block_group *block_group = ctx->zoned_bg; in check_bg_is_active()
1849 struct btrfs_fs_info *fs_info = block_group->fs_info; in check_bg_is_active()
1851 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) in check_bg_is_active()
1854 if (fs_info->treelog_bg == block_group->start) { in check_bg_is_active()
1864 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */ in check_bg_is_active()
1865 lockdep_assert_held(&fs_info->zoned_meta_io_lock); in check_bg_is_active()
1872 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) { in check_bg_is_active()
1873 if (wbc->sync_mode == WB_SYNC_NONE || in check_bg_is_active()
1874 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)) in check_bg_is_active()
1878 /* Pivot active metadata/system block group. */ in check_bg_is_active()
1901 * Check if @ctx->eb is aligned to the write pointer.
1904 * 0: @ctx->eb is at the write pointer. You can write it.
1905 * -EAGAIN: There is a hole. The caller should handle the case.
1906 * -EBUSY: There is a hole, but the caller can just bail out.
1911 const struct writeback_control *wbc = ctx->wbc; in btrfs_check_meta_write_pointer()
1912 const struct extent_buffer *eb = ctx->eb; in btrfs_check_meta_write_pointer()
1913 struct btrfs_block_group *block_group = ctx->zoned_bg; in btrfs_check_meta_write_pointer()
1919 if (block_group->start > eb->start || in btrfs_check_meta_write_pointer()
1920 block_group->start + block_group->length <= eb->start) { in btrfs_check_meta_write_pointer()
1923 ctx->zoned_bg = NULL; in btrfs_check_meta_write_pointer()
1928 block_group = btrfs_lookup_block_group(fs_info, eb->start); in btrfs_check_meta_write_pointer()
1931 ctx->zoned_bg = block_group; in btrfs_check_meta_write_pointer()
1934 if (block_group->meta_write_pointer == eb->start) { in btrfs_check_meta_write_pointer()
1937 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) in btrfs_check_meta_write_pointer()
1940 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) in btrfs_check_meta_write_pointer()
1941 tgt = &fs_info->active_system_bg; in btrfs_check_meta_write_pointer()
1943 tgt = &fs_info->active_meta_bg; in btrfs_check_meta_write_pointer()
1949 * Since we may release fs_info->zoned_meta_io_lock, someone can already in btrfs_check_meta_write_pointer()
1952 if (block_group->meta_write_pointer > eb->start) in btrfs_check_meta_write_pointer()
1953 return -EBUSY; in btrfs_check_meta_write_pointer()
1956 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) in btrfs_check_meta_write_pointer()
1957 return -EAGAIN; in btrfs_check_meta_write_pointer()
1958 return -EBUSY; in btrfs_check_meta_write_pointer()
1964 return -EOPNOTSUPP; in btrfs_zoned_issue_zeroout()
1966 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT, in btrfs_zoned_issue_zeroout()
1977 int i, ret; in read_zone_info() local
1982 ret = -EIO; in read_zone_info()
1986 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { in read_zone_info()
1987 ret = -EINVAL; in read_zone_info()
1992 nmirrors = (int)bioc->num_stripes; in read_zone_info()
1993 for (i = 0; i < nmirrors; i++) { in read_zone_info()
1994 u64 physical = bioc->stripes[i].physical; in read_zone_info()
1995 struct btrfs_device *dev = bioc->stripes[i].dev; in read_zone_info()
1998 if (!dev->bdev) in read_zone_info()
2003 if (ret == -EIO || ret == -EOPNOTSUPP) in read_zone_info()
2015 * filling zeros between @physical_pos to a write pointer of dev-replace
2021 struct btrfs_fs_info *fs_info = tgt_dev->fs_info; in btrfs_sync_zone_write_pointer()
2034 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT); in btrfs_sync_zone_write_pointer()
2040 return -EUCLEAN; in btrfs_sync_zone_write_pointer()
2042 length = wp - physical_pos; in btrfs_sync_zone_write_pointer()
2047 * Activate block group and underlying device zones
2049 * @block_group: the block group to activate
2055 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_zone_activate()
2059 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA); in btrfs_zone_activate()
2061 int i; in btrfs_zone_activate() local
2063 if (!btrfs_is_zoned(block_group->fs_info)) in btrfs_zone_activate()
2066 map = block_group->physical_map; in btrfs_zone_activate()
2068 spin_lock(&fs_info->zone_active_bgs_lock); in btrfs_zone_activate()
2069 spin_lock(&block_group->lock); in btrfs_zone_activate()
2070 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { in btrfs_zone_activate()
2081 for (i = 0; i < map->num_stripes; i++) { in btrfs_zone_activate()
2085 device = map->stripes[i].dev; in btrfs_zone_activate()
2086 physical = map->stripes[i].physical; in btrfs_zone_activate()
2087 zinfo = device->zone_info; in btrfs_zone_activate()
2089 if (zinfo->max_active_zones == 0) in btrfs_zone_activate()
2093 reserved = zinfo->reserved_active_zones; in btrfs_zone_activate()
2095 * For the data block group, leave active zones for one in btrfs_zone_activate()
2096 * metadata block group and one system block group. in btrfs_zone_activate()
2098 if (atomic_read(&zinfo->active_zones_left) <= reserved) { in btrfs_zone_activate()
2109 zinfo->reserved_active_zones--; in btrfs_zone_activate()
2113 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); in btrfs_zone_activate()
2114 spin_unlock(&block_group->lock); in btrfs_zone_activate()
2116 /* For the active block group list */ in btrfs_zone_activate()
2118 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs); in btrfs_zone_activate()
2119 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_zone_activate()
2124 spin_unlock(&block_group->lock); in btrfs_zone_activate()
2125 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_zone_activate()
2131 struct btrfs_fs_info *fs_info = block_group->fs_info; in wait_eb_writebacks()
2132 const u64 end = block_group->start + block_group->length; in wait_eb_writebacks()
2138 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, in wait_eb_writebacks()
2139 block_group->start >> fs_info->sectorsize_bits) { in wait_eb_writebacks()
2148 if (eb->start < block_group->start) in wait_eb_writebacks()
2150 if (eb->start >= end) in wait_eb_writebacks()
2163 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_zone_finish()
2165 const bool is_metadata = (block_group->flags & in do_zone_finish()
2168 int i; in do_zone_finish() local
2170 spin_lock(&block_group->lock); in do_zone_finish()
2171 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { in do_zone_finish()
2172 spin_unlock(&block_group->lock); in do_zone_finish()
2178 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) { in do_zone_finish()
2179 spin_unlock(&block_group->lock); in do_zone_finish()
2180 return -EAGAIN; in do_zone_finish()
2184 * If we are sure that the block group is full (= no more room left for in do_zone_finish()
2185 * new allocation) and the IO for the last usable block is completed, we in do_zone_finish()
2188 * and block_group->meta_write_pointer for metadata. in do_zone_finish()
2191 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { in do_zone_finish()
2192 spin_unlock(&block_group->lock); in do_zone_finish()
2193 return -EAGAIN; in do_zone_finish()
2195 spin_unlock(&block_group->lock); in do_zone_finish()
2201 /* Ensure all writes in this block group finish */ in do_zone_finish()
2204 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start, in do_zone_finish()
2205 block_group->length); in do_zone_finish()
2210 spin_lock(&block_group->lock); in do_zone_finish()
2213 * Bail out if someone already deactivated the block group, or in do_zone_finish()
2214 * allocated space is left in the block group. in do_zone_finish()
2217 &block_group->runtime_flags)) { in do_zone_finish()
2218 spin_unlock(&block_group->lock); in do_zone_finish()
2223 if (block_group->reserved || in do_zone_finish()
2225 &block_group->runtime_flags)) { in do_zone_finish()
2226 spin_unlock(&block_group->lock); in do_zone_finish()
2228 return -EAGAIN; in do_zone_finish()
2232 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); in do_zone_finish()
2233 block_group->alloc_offset = block_group->zone_capacity; in do_zone_finish()
2234 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) in do_zone_finish()
2235 block_group->meta_write_pointer = block_group->start + in do_zone_finish()
2236 block_group->zone_capacity; in do_zone_finish()
2237 block_group->free_space_ctl->free_space = 0; in do_zone_finish()
2240 spin_unlock(&block_group->lock); in do_zone_finish()
2242 map = block_group->physical_map; in do_zone_finish()
2243 for (i = 0; i < map->num_stripes; i++) { in do_zone_finish()
2244 struct btrfs_device *device = map->stripes[i].dev; in do_zone_finish()
2245 const u64 physical = map->stripes[i].physical; in do_zone_finish()
2246 struct btrfs_zoned_device_info *zinfo = device->zone_info; in do_zone_finish()
2248 if (zinfo->max_active_zones == 0) in do_zone_finish()
2251 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, in do_zone_finish()
2253 zinfo->zone_size >> SECTOR_SHIFT, in do_zone_finish()
2259 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in do_zone_finish()
2260 zinfo->reserved_active_zones++; in do_zone_finish()
2267 spin_lock(&fs_info->zone_active_bgs_lock); in do_zone_finish()
2268 ASSERT(!list_empty(&block_group->active_bg_list)); in do_zone_finish()
2269 list_del_init(&block_group->active_bg_list); in do_zone_finish()
2270 spin_unlock(&fs_info->zone_active_bgs_lock); in do_zone_finish()
2275 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); in do_zone_finish()
2282 if (!btrfs_is_zoned(block_group->fs_info)) in btrfs_zone_finish()
2290 struct btrfs_fs_info *fs_info = fs_devices->fs_info; in btrfs_can_activate_zone()
2298 mutex_lock(&fs_info->chunk_mutex); in btrfs_can_activate_zone()
2299 spin_lock(&fs_info->zone_active_bgs_lock); in btrfs_can_activate_zone()
2300 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { in btrfs_can_activate_zone()
2301 struct btrfs_zoned_device_info *zinfo = device->zone_info; in btrfs_can_activate_zone()
2304 if (!device->bdev) in btrfs_can_activate_zone()
2307 if (!zinfo->max_active_zones) { in btrfs_can_activate_zone()
2313 reserved = zinfo->reserved_active_zones; in btrfs_can_activate_zone()
2317 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved)); in btrfs_can_activate_zone()
2320 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved)); in btrfs_can_activate_zone()
2326 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_can_activate_zone()
2327 mutex_unlock(&fs_info->chunk_mutex); in btrfs_can_activate_zone()
2330 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); in btrfs_can_activate_zone()
2347 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_zone_finish_endio()
2348 min_alloc_bytes = fs_info->sectorsize; in btrfs_zone_finish_endio()
2350 min_alloc_bytes = fs_info->nodesize; in btrfs_zone_finish_endio()
2352 /* Bail out if we can allocate more data from this block group. */ in btrfs_zone_finish_endio()
2354 block_group->start + block_group->zone_capacity) in btrfs_zone_finish_endio()
2368 wait_on_extent_buffer_writeback(bg->last_eb); in btrfs_zone_finish_endio_workfn()
2369 free_extent_buffer(bg->last_eb); in btrfs_zone_finish_endio_workfn()
2370 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length); in btrfs_zone_finish_endio_workfn()
2377 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) || in btrfs_schedule_zone_finish_bg()
2378 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity) in btrfs_schedule_zone_finish_bg()
2381 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) { in btrfs_schedule_zone_finish_bg()
2382 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing", in btrfs_schedule_zone_finish_bg()
2383 bg->start); in btrfs_schedule_zone_finish_bg()
2389 atomic_inc(&eb->refs); in btrfs_schedule_zone_finish_bg()
2390 bg->last_eb = eb; in btrfs_schedule_zone_finish_bg()
2391 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); in btrfs_schedule_zone_finish_bg()
2392 queue_work(system_unbound_wq, &bg->zone_finish_work); in btrfs_schedule_zone_finish_bg()
2397 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_clear_data_reloc_bg()
2399 spin_lock(&fs_info->relocation_bg_lock); in btrfs_clear_data_reloc_bg()
2400 if (fs_info->data_reloc_bg == bg->start) in btrfs_clear_data_reloc_bg()
2401 fs_info->data_reloc_bg = 0; in btrfs_clear_data_reloc_bg()
2402 spin_unlock(&fs_info->relocation_bg_lock); in btrfs_clear_data_reloc_bg()
2407 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_free_zone_cache()
2413 mutex_lock(&fs_devices->device_list_mutex); in btrfs_free_zone_cache()
2414 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_free_zone_cache()
2415 if (device->zone_info) { in btrfs_free_zone_cache()
2416 vfree(device->zone_info->zone_cache); in btrfs_free_zone_cache()
2417 device->zone_info->zone_cache = NULL; in btrfs_free_zone_cache()
2420 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_free_zone_cache()
2425 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_zoned_should_reclaim()
2433 if (fs_info->bg_reclaim_threshold == 0) in btrfs_zoned_should_reclaim()
2436 mutex_lock(&fs_devices->device_list_mutex); in btrfs_zoned_should_reclaim()
2437 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_zoned_should_reclaim()
2438 if (!device->bdev) in btrfs_zoned_should_reclaim()
2441 total += device->disk_total_bytes; in btrfs_zoned_should_reclaim()
2442 used += device->bytes_used; in btrfs_zoned_should_reclaim()
2444 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_zoned_should_reclaim()
2447 return factor >= fs_info->bg_reclaim_threshold; in btrfs_zoned_should_reclaim()
2459 /* It should be called on a previous data relocation block group. */ in btrfs_zoned_release_data_reloc_bg()
2460 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); in btrfs_zoned_release_data_reloc_bg()
2462 spin_lock(&block_group->lock); in btrfs_zoned_release_data_reloc_bg()
2463 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) in btrfs_zoned_release_data_reloc_bg()
2467 if (block_group->start + block_group->alloc_offset == logical + length) { in btrfs_zoned_release_data_reloc_bg()
2469 * Now, release this block group for further allocations and in btrfs_zoned_release_data_reloc_bg()
2473 &block_group->runtime_flags); in btrfs_zoned_release_data_reloc_bg()
2477 spin_unlock(&block_group->lock); in btrfs_zoned_release_data_reloc_bg()
2488 spin_lock(&fs_info->zone_active_bgs_lock); in btrfs_zone_finish_one_bg()
2489 list_for_each_entry(block_group, &fs_info->zone_active_bgs, in btrfs_zone_finish_one_bg()
2493 spin_lock(&block_group->lock); in btrfs_zone_finish_one_bg()
2494 if (block_group->reserved || block_group->alloc_offset == 0 || in btrfs_zone_finish_one_bg()
2495 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || in btrfs_zone_finish_one_bg()
2496 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { in btrfs_zone_finish_one_bg()
2497 spin_unlock(&block_group->lock); in btrfs_zone_finish_one_bg()
2501 avail = block_group->zone_capacity - block_group->alloc_offset; in btrfs_zone_finish_one_bg()
2509 spin_unlock(&block_group->lock); in btrfs_zone_finish_one_bg()
2511 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_zone_finish_one_bg()
2529 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) in btrfs_zoned_activate_one_bg()
2536 down_read(&space_info->groups_sem); in btrfs_zoned_activate_one_bg()
2538 list_for_each_entry(bg, &space_info->block_groups[index], in btrfs_zoned_activate_one_bg()
2540 if (!spin_trylock(&bg->lock)) in btrfs_zoned_activate_one_bg()
2544 &bg->runtime_flags)) { in btrfs_zoned_activate_one_bg()
2545 spin_unlock(&bg->lock); in btrfs_zoned_activate_one_bg()
2548 spin_unlock(&bg->lock); in btrfs_zoned_activate_one_bg()
2551 up_read(&space_info->groups_sem); in btrfs_zoned_activate_one_bg()
2558 up_read(&space_info->groups_sem); in btrfs_zoned_activate_one_bg()
2574 * Reserve zones for one metadata block group, one tree-log block group, and one
2575 * system block group.
2579 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_check_active_zone_reservation()
2582 /* Reserve zones for normal SINGLE metadata and tree-log block group. */ in btrfs_check_active_zone_reservation()
2584 /* Reserve a zone for SINGLE system block group. */ in btrfs_check_active_zone_reservation()
2587 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) in btrfs_check_active_zone_reservation()
2594 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP) in btrfs_check_active_zone_reservation()
2596 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP) in btrfs_check_active_zone_reservation()
2600 mutex_lock(&fs_devices->device_list_mutex); in btrfs_check_active_zone_reservation()
2601 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_check_active_zone_reservation()
2602 if (!device->bdev) in btrfs_check_active_zone_reservation()
2605 device->zone_info->reserved_active_zones = in btrfs_check_active_zone_reservation()
2608 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_check_active_zone_reservation()
2610 /* Release reservation for currently active block groups. */ in btrfs_check_active_zone_reservation()
2611 spin_lock(&fs_info->zone_active_bgs_lock); in btrfs_check_active_zone_reservation()
2612 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { in btrfs_check_active_zone_reservation()
2613 struct btrfs_chunk_map *map = block_group->physical_map; in btrfs_check_active_zone_reservation()
2615 if (!(block_group->flags & in btrfs_check_active_zone_reservation()
2619 for (int i = 0; i < map->num_stripes; i++) in btrfs_check_active_zone_reservation() local
2620 map->stripes[i].dev->zone_info->reserved_active_zones--; in btrfs_check_active_zone_reservation()
2622 spin_unlock(&fs_info->zone_active_bgs_lock); in btrfs_check_active_zone_reservation()