Commit 7dc66abb authored by Filipe Manana's avatar Filipe Manana Committed by David Sterba

btrfs: use a dedicated data structure for chunk maps

Currently we abuse the extent_map structure for two purposes:

1) To actually represent extents for inodes;
2) To represent chunk mappings.

This is odd and has several disadvantages:

1) To create a chunk map, we need to do two memory allocations: one for
   an extent_map structure and another one for a map_lookup structure, so
   more potential for an allocation failure and more complicated code to
   manage and link two structures;

2) For a chunk map we actually only use 3 fields (24 bytes) of the
   respective extent map structure: the 'start' field to have the logical
   start address of the chunk, the 'len' field to have the chunk's size,
   and the 'orig_block_len' field to contain the chunk's stripe size.

   Besides wasting a memory, it's also odd and not intuitive at all to
   have the stripe size in a field named 'orig_block_len'.

   We are also using 'block_len' of the extent_map structure to contain
   the chunk size, so we have 2 fields for the same value, 'len' and
   'block_len', which is pointless;

3) When an extent map is associated to a chunk mapping, we set the bit
   EXTENT_FLAG_FS_MAPPING on its flags and then make its member named
   'map_lookup' point to the associated map_lookup structure. This means
   that for an extent map associated to an inode extent, we are not using
   this 'map_lookup' pointer, so wasting 8 bytes (on a 64 bits platform);

4) Extent maps associated to a chunk mapping are never merged or split so
   it's pointless to use the existing extent map infrastructure.

So add a dedicated data structure named 'btrfs_chunk_map' to represent
chunk mappings, this is basically the existing map_lookup structure with
some extra fields:

1) 'start' to contain the chunk logical address;
2) 'chunk_len' to contain the chunk's length;
3) 'stripe_size' for the stripe size;
4) 'rb_node' for insertion into a rb tree;
5) 'refs' for reference counting.

This way we do a single memory allocation for chunk mappings and we don't
waste memory for them with unused/unnecessary fields from an extent_map.

We also save 8 bytes from the extent_map structure by removing the
'map_lookup' pointer, so the size of struct extent_map is reduced from
144 bytes down to 136 bytes, and we can now have 30 extents map per 4K
page instead of 28.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ebb0beca
This diff is collapsed.
......@@ -5,6 +5,8 @@
#include "free-space-cache.h"
struct btrfs_chunk_map;
enum btrfs_disk_cache_state {
BTRFS_DC_WRITTEN,
BTRFS_DC_ERROR,
......@@ -243,7 +245,7 @@ struct btrfs_block_group {
u64 zone_unusable;
u64 zone_capacity;
u64 meta_write_pointer;
struct map_lookup *physical_map;
struct btrfs_chunk_map *physical_map;
struct list_head active_bg_list;
struct work_struct zone_finish_work;
struct extent_buffer *last_eb;
......@@ -297,7 +299,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
u64 group_start, struct extent_map *em);
struct btrfs_chunk_map *map);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
void btrfs_reclaim_bgs_work(struct work_struct *work);
......
......@@ -550,8 +550,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
u64 physical)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_chunk_map *map;
u64 chunk_offset = cache->start;
int num_extents, cur_extent;
int i;
......@@ -567,9 +566,8 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
}
spin_unlock(&cache->lock);
em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
ASSERT(!IS_ERR(em));
map = em->map_lookup;
map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
ASSERT(!IS_ERR(map));
num_extents = 0;
cur_extent = 0;
......@@ -583,7 +581,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
cur_extent = i;
}
free_extent_map(em);
btrfs_free_chunk_map(map);
if (num_extents > 1 && cur_extent < num_extents - 1) {
/*
......@@ -812,25 +810,23 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
u64 start = 0;
int i;
write_lock(&em_tree->lock);
write_lock(&fs_info->mapping_tree_lock);
do {
em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em)
struct btrfs_chunk_map *map;
map = btrfs_find_chunk_map_nolock(fs_info, start, U64_MAX);
if (!map)
break;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
start = em->start + em->len;
free_extent_map(em);
start = map->start + map->chunk_len;
btrfs_free_chunk_map(map);
} while (start);
write_unlock(&em_tree->lock);
write_unlock(&fs_info->mapping_tree_lock);
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
......
......@@ -2720,7 +2720,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&fs_info->allocated_ebs);
spin_lock_init(&fs_info->eb_leak_lock);
#endif
extent_map_tree_init(&fs_info->mapping_tree);
fs_info->mapping_tree = RB_ROOT_CACHED;
rwlock_init(&fs_info->mapping_tree_lock);
btrfs_init_block_rsv(&fs_info->global_block_rsv,
BTRFS_BLOCK_RSV_GLOBAL);
btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
......@@ -3604,7 +3605,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
btrfs_stop_all_workers(fs_info);
btrfs_free_block_groups(fs_info);
fail_alloc:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_mapping_tree_free(fs_info);
iput(fs_info->btree_inode);
fail:
......@@ -4387,7 +4388,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_mapping_tree_free(fs_info);
btrfs_close_devices(fs_info->fs_devices);
}
......
......@@ -67,8 +67,6 @@ void free_extent_map(struct extent_map *em)
if (refcount_dec_and_test(&em->refs)) {
WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
kfree(em->map_lookup);
kmem_cache_free(extent_map_cache, em);
}
}
......@@ -217,13 +215,8 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
prev->block_start != EXTENT_MAP_DELALLOC);
if (prev->map_lookup || next->map_lookup)
ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
prev->map_lookup == next->map_lookup &&
((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
......@@ -361,39 +354,6 @@ static inline void setup_extent_mapping(struct extent_map_tree *tree,
try_merge_map(tree, em);
}
static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
{
struct map_lookup *map = em->map_lookup;
u64 stripe_size = em->orig_block_len;
int i;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
set_extent_bit(&device->alloc_state, stripe->physical,
stripe->physical + stripe_size - 1,
bits | EXTENT_NOWAIT, NULL);
}
}
static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
{
struct map_lookup *map = em->map_lookup;
u64 stripe_size = em->orig_block_len;
int i;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
__clear_extent_bit(&device->alloc_state, stripe->physical,
stripe->physical + stripe_size - 1,
bits | EXTENT_NOWAIT,
NULL, NULL);
}
}
/*
* Add new extent map to the extent tree
*
......@@ -419,10 +379,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
goto out;
setup_extent_mapping(tree, em, modified);
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
extent_map_device_set_bits(em, CHUNK_ALLOCATED);
extent_map_device_clear_bits(em, CHUNK_TRIMMED);
}
out:
return ret;
}
......@@ -506,8 +462,6 @@ void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
rb_erase_cached(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list);
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
RB_CLEAR_NODE(&em->rb_node);
}
......
......@@ -23,8 +23,6 @@ enum {
EXTENT_FLAG_LOGGING,
/* Filling in a preallocated extent */
EXTENT_FLAG_FILLING,
/* filesystem extent mapping type */
EXTENT_FLAG_FS_MAPPING,
/* This em is merged from two or more physically adjacent ems */
EXTENT_FLAG_MERGED,
};
......@@ -50,8 +48,6 @@ struct extent_map {
*/
u64 generation;
unsigned long flags;
/* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
struct map_lookup *map_lookup;
refcount_t refs;
unsigned int compress_type;
struct list_head list;
......
......@@ -398,7 +398,8 @@ struct btrfs_fs_info {
struct extent_io_tree excluded_extents;
/* logical->physical extent mapping */
struct extent_map_tree mapping_tree;
struct rb_root_cached mapping_tree;
rwlock_t mapping_tree_lock;
/*
* Block reservation for extent, checksum, root tree and delayed dir
......
......@@ -10565,6 +10565,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
struct extent_map *em = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
......@@ -10704,13 +10705,13 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
goto out;
}
em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
if (IS_ERR(map)) {
ret = PTR_ERR(map);
goto out;
}
if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
btrfs_warn(fs_info,
"swapfile must have single data profile");
ret = -EINVAL;
......@@ -10718,23 +10719,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
}
if (device == NULL) {
device = em->map_lookup->stripes[0].dev;
device = map->stripes[0].dev;
ret = btrfs_add_swapfile_pin(inode, device, false);
if (ret == 1)
ret = 0;
else if (ret)
goto out;
} else if (device != em->map_lookup->stripes[0].dev) {
} else if (device != map->stripes[0].dev) {
btrfs_warn(fs_info, "swapfile must be on one device");
ret = -EINVAL;
goto out;
}
physical_block_start = (em->map_lookup->stripes[0].physical +
(logical_block_start - em->start));
len = min(len, em->len - (logical_block_start - em->start));
free_extent_map(em);
em = NULL;
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
len = min(len, map->chunk_len - (logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
bg = btrfs_lookup_block_group(fs_info, logical_block_start);
if (!bg) {
......@@ -10787,6 +10788,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
out:
if (!IS_ERR_OR_NULL(em))
free_extent_map(em);
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
unlock_extent(io_tree, 0, isize - 1, &cached_state);
......
......@@ -164,7 +164,7 @@ struct raid56_bio_trace_info {
u8 stripe_nr;
};
static inline int nr_data_stripes(const struct map_lookup *map)
static inline int nr_data_stripes(const struct btrfs_chunk_map *map)
{
return map->num_stripes - btrfs_nr_parity_stripes(map->type);
}
......
......@@ -1279,7 +1279,7 @@ static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *d
* return 0 if it is a data stripe, 1 means parity stripe.
*/
static int get_raid56_logic_offset(u64 physical, int num,
struct map_lookup *map, u64 *offset,
struct btrfs_chunk_map *map, u64 *offset,
u64 *stripe_start)
{
int i;
......@@ -1894,7 +1894,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
u64 full_stripe_start)
{
DECLARE_COMPLETION_ONSTACK(io_done);
......@@ -2063,7 +2063,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
*/
static int scrub_simple_mirror(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
u64 logical_start, u64 logical_length,
struct btrfs_device *device,
u64 physical, int mirror_num)
......@@ -2124,7 +2124,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
}
/* Calculate the full stripe length for simple stripe based profiles */
static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
{
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10));
......@@ -2133,7 +2133,7 @@ static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
}
/* Get the logical bytenr for the stripe */
static u64 simple_stripe_get_logical(struct map_lookup *map,
static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
struct btrfs_block_group *bg,
int stripe_index)
{
......@@ -2150,7 +2150,7 @@ static u64 simple_stripe_get_logical(struct map_lookup *map,
}
/* Get the mirror number for the stripe */
static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
{
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10));
......@@ -2162,7 +2162,7 @@ static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
static int scrub_simple_stripe(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
struct btrfs_device *device,
int stripe_index)
{
......@@ -2195,18 +2195,17 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
struct extent_map *em,
struct btrfs_chunk_map *map,
struct btrfs_device *scrub_dev,
int stripe_index)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct map_lookup *map = em->map_lookup;
const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
const u64 chunk_logical = bg->start;
int ret;
int ret2;
u64 physical = map->stripes[stripe_index].physical;
const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
const u64 physical_end = physical + dev_stripe_len;
u64 logical;
u64 logic_end;
......@@ -2369,17 +2368,12 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
u64 dev_extent_len)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
struct btrfs_chunk_map *map;
int i;
int ret = 0;
read_lock(&map_tree->lock);
em = lookup_extent_mapping(map_tree, bg->start, bg->length);
read_unlock(&map_tree->lock);
if (!em) {
map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
if (!map) {
/*
* Might have been an unused block group deleted by the cleaner
* kthread or relocation.
......@@ -2391,22 +2385,21 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
return ret;
}
if (em->start != bg->start)
if (map->start != bg->start)
goto out;
if (em->len < dev_extent_len)
if (map->chunk_len < dev_extent_len)
goto out;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; ++i) {
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
if (ret)
goto out;
}
}
out:
free_extent_map(em);
btrfs_free_chunk_map(map);
return ret;
}
......
......@@ -28,6 +28,7 @@ const char *test_error[] = {
[TEST_ALLOC_INODE] = "cannot allocate inode",
[TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group",
[TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map",
[TEST_ALLOC_CHUNK_MAP] = "cannot allocate chunk map",
};
static const struct super_operations btrfs_test_super_ops = {
......@@ -185,7 +186,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->buffer_lock);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_mapping_tree_free(fs_info);
list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,
dev_list) {
btrfs_free_dummy_device(dev);
......
......@@ -23,6 +23,7 @@ enum {
TEST_ALLOC_INODE,
TEST_ALLOC_BLOCK_GROUP,
TEST_ALLOC_EXTENT_MAP,
TEST_ALLOC_CHUNK_MAP,
};
extern const char *test_error[];
......
......@@ -859,33 +859,21 @@ struct rmap_test_vector {
static int test_rmap_block(struct btrfs_fs_info *fs_info,
struct rmap_test_vector *test)
{
struct extent_map *em;
struct map_lookup *map = NULL;
struct btrfs_chunk_map *map;
u64 *logical = NULL;
int i, out_ndaddrs, out_stripe_len;
int ret;
em = alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
}
map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL);
map = btrfs_alloc_chunk_map(test->num_stripes, GFP_KERNEL);
if (!map) {
kfree(em);
test_std_err(TEST_ALLOC_EXTENT_MAP);
test_std_err(TEST_ALLOC_CHUNK_MAP);
return -ENOMEM;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
/* Start at 4GiB logical address */
em->start = SZ_4G;
em->len = test->data_stripe_size * test->num_data_stripes;
em->block_len = em->len;
em->orig_block_len = test->data_stripe_size;
em->map_lookup = map;
map->start = SZ_4G;
map->chunk_len = test->data_stripe_size * test->num_data_stripes;
map->stripe_size = test->data_stripe_size;
map->num_stripes = test->num_stripes;
map->type = test->raid_type;
......@@ -901,15 +889,13 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
map->stripes[i].physical = test->data_stripe_phys_start[i];
}
write_lock(&fs_info->mapping_tree.lock);
ret = add_extent_mapping(&fs_info->mapping_tree, em, 0);
write_unlock(&fs_info->mapping_tree.lock);
ret = btrfs_add_chunk_map(fs_info, map);
if (ret) {
test_err("error adding block group mapping to mapping tree");
test_err("error adding chunk map to mapping tree");
goto out_free;
}
ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1),
ret = btrfs_rmap_block(fs_info, map->start, btrfs_sb_offset(1),
&logical, &out_ndaddrs, &out_stripe_len);
if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
test_err("didn't rmap anything but expected %d",
......@@ -938,14 +924,8 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
ret = 0;
out:
write_lock(&fs_info->mapping_tree.lock);
remove_extent_mapping(&fs_info->mapping_tree, em);
write_unlock(&fs_info->mapping_tree.lock);
/* For us */
free_extent_map(em);
btrfs_remove_chunk_map(fs_info, map);
out_free:
/* For the tree */
free_extent_map(em);
kfree(logical);
return ret;
}
......
This diff is collapsed.
......@@ -426,7 +426,8 @@ struct btrfs_discard_stripe {
struct btrfs_io_context {
refcount_t refs;
struct btrfs_fs_info *fs_info;
u64 map_type; /* get from map_lookup->type */
/* Taken from struct btrfs_chunk_map::type. */
u64 map_type;
struct bio *orig_bio;
atomic_t error;
u16 max_errors;
......@@ -529,18 +530,32 @@ struct btrfs_raid_attr {
extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
struct map_lookup {
struct btrfs_chunk_map {
struct rb_node rb_node;
/* For mount time dev extent verification. */
int verified_stripes;
refcount_t refs;
u64 start;
u64 chunk_len;
u64 stripe_size;
u64 type;
int io_align;
int io_width;
int num_stripes;
int sub_stripes;
int verified_stripes; /* For mount time dev extent verification */
struct btrfs_io_stripe stripes[];
};
#define map_lookup_size(n) (sizeof(struct map_lookup) + \
(sizeof(struct btrfs_io_stripe) * (n)))
#define btrfs_chunk_map_size(n) (sizeof(struct btrfs_chunk_map) + \
(sizeof(struct btrfs_io_stripe) * (n)))
static inline void btrfs_free_chunk_map(struct btrfs_chunk_map *map)
{
if (map && refcount_dec_and_test(&map->refs)) {
ASSERT(RB_EMPTY_NODE(&map->rb_node));
kfree(map);
}
}
struct btrfs_balance_args;
struct btrfs_balance_progress;
......@@ -624,7 +639,7 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
blk_mode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
......@@ -680,13 +695,25 @@ int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
u64 btrfs_calc_stripe_length(const struct extent_map *em);
u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map);
int btrfs_nr_parity_stripes(u64 type);
int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *bg);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp);
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
#endif
struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp);
struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
void btrfs_release_disk_super(struct btrfs_super_block *super);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
......
......@@ -1290,7 +1290,7 @@ struct zone_info {
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
struct map_lookup *map)
struct btrfs_chunk_map *map)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device = map->stripes[zone_idx].dev;
......@@ -1393,7 +1393,7 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
}
static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
unsigned long *active)
{
......@@ -1435,7 +1435,7 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
}
static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
unsigned long *active)
{
......@@ -1483,7 +1483,7 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
}
static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
unsigned long *active)
{
......@@ -1515,7 +1515,7 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
}
static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
struct map_lookup *map,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
unsigned long *active)
{
......@@ -1552,9 +1552,7 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_chunk_map *map;
u64 logical = cache->start;
u64 length = cache->length;
struct zone_info *zone_info = NULL;
......@@ -1575,17 +1573,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
return -EIO;
}
/* Get the chunk mapping */
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, length);
read_unlock(&em_tree->lock);
if (!em)
map = btrfs_find_chunk_map(fs_info, logical, length);
if (!map)
return -EINVAL;
map = em->map_lookup;
cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
if (!cache->physical_map) {
ret = -ENOMEM;
goto out;
......@@ -1687,12 +1679,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
spin_unlock(&fs_info->zone_active_bgs_lock);
}
} else {
kfree(cache->physical_map);
btrfs_free_chunk_map(cache->physical_map);
cache->physical_map = NULL;
}
bitmap_free(active);
kfree(zone_info);
free_extent_map(em);
return ret;
}
......@@ -2082,7 +2073,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct map_lookup *map;
struct btrfs_chunk_map *map;
struct btrfs_device *device;
u64 physical;
const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
......@@ -2194,7 +2185,7 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct map_lookup *map;
struct btrfs_chunk_map *map;
const bool is_metadata = (block_group->flags &
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
int ret = 0;
......@@ -2643,7 +2634,7 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
/* Release reservation for currently active block groups. */
spin_lock(&fs_info->zone_active_bgs_lock);
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
struct map_lookup *map = block_group->physical_map;
struct btrfs_chunk_map *map = block_group->physical_map;
if (!(block_group->flags &
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
......
......@@ -21,7 +21,7 @@ struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
struct map_lookup;
struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
struct btrfs_workqueue;
......@@ -277,8 +277,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
{ (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
{ (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
{ (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
{ (1 << EXTENT_FLAG_FILLING), "FILLING" },\
{ (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
{ (1 << EXTENT_FLAG_FILLING), "FILLING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
......@@ -1061,7 +1060,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct map_lookup *map, u64 offset, u64 size),
const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
......@@ -1095,7 +1094,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct map_lookup *map, u64 offset, u64 size),
const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
......@@ -1103,7 +1102,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct map_lookup *map, u64 offset, u64 size),
const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment