Commit 68a384b5 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: zoned: load active zone info for block group

Load activeness of underlying zones of a block group. When underlying zones
are active, we add the block group to the fs_info->zone_active_bgs list.
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent afba2bc0
...@@ -1170,6 +1170,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1170,6 +1170,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
unsigned int nofs_flag; unsigned int nofs_flag;
u64 *alloc_offsets = NULL; u64 *alloc_offsets = NULL;
u64 *caps = NULL; u64 *caps = NULL;
unsigned long *active = NULL;
u64 last_alloc = 0; u64 last_alloc = 0;
u32 num_sequential = 0, num_conventional = 0; u32 num_sequential = 0, num_conventional = 0;
...@@ -1214,6 +1215,12 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1214,6 +1215,12 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out; goto out;
} }
active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
if (!active) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
bool is_sequential; bool is_sequential;
struct blk_zone zone; struct blk_zone zone;
...@@ -1297,8 +1304,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1297,8 +1304,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
/* Partially used zone */ /* Partially used zone */
alloc_offsets[i] = alloc_offsets[i] =
((zone.wp - zone.start) << SECTOR_SHIFT); ((zone.wp - zone.start) << SECTOR_SHIFT);
__set_bit(i, active);
break; break;
} }
/*
* Consider a zone as active if we can allow any number of
* active zones.
*/
if (!device->zone_info->max_active_zones)
__set_bit(i, active);
} }
if (num_sequential > 0) if (num_sequential > 0)
...@@ -1346,6 +1361,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1346,6 +1361,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
} }
cache->alloc_offset = alloc_offsets[0]; cache->alloc_offset = alloc_offsets[0];
cache->zone_capacity = caps[0]; cache->zone_capacity = caps[0];
cache->zone_is_active = test_bit(0, active);
break; break;
case BTRFS_BLOCK_GROUP_DUP: case BTRFS_BLOCK_GROUP_DUP:
case BTRFS_BLOCK_GROUP_RAID1: case BTRFS_BLOCK_GROUP_RAID1:
...@@ -1361,6 +1377,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1361,6 +1377,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out; goto out;
} }
if (cache->zone_is_active) {
btrfs_get_block_group(cache);
spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
}
out: out:
if (cache->alloc_offset > fs_info->zone_size) { if (cache->alloc_offset > fs_info->zone_size) {
btrfs_err(fs_info, btrfs_err(fs_info,
...@@ -1392,6 +1415,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1392,6 +1415,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
kfree(cache->physical_map); kfree(cache->physical_map);
cache->physical_map = NULL; cache->physical_map = NULL;
} }
bitmap_free(active);
kfree(caps); kfree(caps);
kfree(alloc_offsets); kfree(alloc_offsets);
free_extent_map(em); free_extent_map(em);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment