Commit 920e4a58 authored by Miao Xie's avatar Miao Xie Committed by Chris Mason

Btrfs: cleanup the redundant code for the block group allocation and init

Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 26b47ff6
...@@ -8364,6 +8364,41 @@ static void __link_block_group(struct btrfs_space_info *space_info, ...@@ -8364,6 +8364,41 @@ static void __link_block_group(struct btrfs_space_info *space_info,
up_write(&space_info->groups_sem); up_write(&space_info->groups_sem);
} }
static struct btrfs_block_group_cache *
btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
{
struct btrfs_block_group_cache *cache;
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
return NULL;
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
return NULL;
}
cache->key.objectid = start;
cache->key.offset = size;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(root,
&root->fs_info->mapping_tree,
start);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->new_bg_list);
btrfs_init_free_space_ctl(cache);
return cache;
}
int btrfs_read_block_groups(struct btrfs_root *root) int btrfs_read_block_groups(struct btrfs_root *root)
{ {
struct btrfs_path *path; struct btrfs_path *path;
...@@ -8399,26 +8434,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8399,26 +8434,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
break; break;
if (ret != 0) if (ret != 0)
goto error; goto error;
leaf = path->nodes[0]; leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = kzalloc(sizeof(*cache), GFP_NOFS);
cache = btrfs_create_block_group_cache(root, found_key.objectid,
found_key.offset);
if (!cache) { if (!cache) {
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
ret = -ENOMEM;
goto error;
}
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
cache->fs_info = info;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
if (need_clear) { if (need_clear) {
/* /*
...@@ -8439,16 +8464,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8439,16 +8464,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
read_extent_buffer(leaf, &cache->item, read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item)); sizeof(cache->item));
memcpy(&cache->key, &found_key, sizeof(found_key)); cache->flags = btrfs_block_group_flags(&cache->item);
key.objectid = found_key.objectid + found_key.offset; key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(path); btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
cache->full_stripe_len = btrfs_full_stripe_len(root,
&root->fs_info->mapping_tree,
found_key.objectid);
btrfs_init_free_space_ctl(cache);
/* /*
* We need to exclude the super stripes now so that the space * We need to exclude the super stripes now so that the space
...@@ -8462,8 +8481,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8462,8 +8481,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* case. * case.
*/ */
free_excluded_extents(root, cache); free_excluded_extents(root, cache);
kfree(cache->free_space_ctl); btrfs_put_block_group(cache);
kfree(cache);
goto error; goto error;
} }
...@@ -8594,38 +8612,15 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -8594,38 +8612,15 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
root->fs_info->last_trans_log_full_commit = trans->transid; root->fs_info->last_trans_log_full_commit = trans->transid;
cache = kzalloc(sizeof(*cache), GFP_NOFS); cache = btrfs_create_block_group_cache(root, chunk_offset, size);
if (!cache) if (!cache)
return -ENOMEM; return -ENOMEM;
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
return -ENOMEM;
}
cache->key.objectid = chunk_offset;
cache->key.offset = size;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(root,
&root->fs_info->mapping_tree,
chunk_offset);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->new_bg_list);
btrfs_init_free_space_ctl(cache);
btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
cache->flags = type;
btrfs_set_block_group_flags(&cache->item, type); btrfs_set_block_group_flags(&cache->item, type);
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED; cache->cached = BTRFS_CACHE_FINISHED;
ret = exclude_super_stripes(root, cache); ret = exclude_super_stripes(root, cache);
...@@ -8635,8 +8630,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -8635,8 +8630,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* case. * case.
*/ */
free_excluded_extents(root, cache); free_excluded_extents(root, cache);
kfree(cache->free_space_ctl); btrfs_put_block_group(cache);
kfree(cache);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment