Commit bdb7d303 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: fix tree log remove space corner case

The tree log stuff can have allocated space that we end up having split
across a bitmap and a real extent.  The free space code does not deal with
this, it assumes that if it finds an extent or bitmap entry that the entire
range must fall within the entry it finds.  This isn't necessarily the case,
so rework the remove function so it can handle this case properly.  This
fixed two panics the user hit, first in the case where the space was
initially in a bitmap and then in an extent entry, and then the reverse
case.  Thanks,
Reported-and-tested-by: default avatarShaun Reich <sreich@kde.org>
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent 6bf02314
...@@ -1542,29 +1542,26 @@ static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1542,29 +1542,26 @@ static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
/* /*
* XXX - this can go away after a few releases. * We need to search for bits in this bitmap. We could only cover some
* * of the extent in this bitmap thanks to how we add space, so we need
* since the only user of btrfs_remove_free_space is the tree logging * to search for as much as it as we can and clear that amount, and then
* stuff, and the only way to test that is under crash conditions, we * go searching for the next bit.
* want to have this debug stuff here just in case somethings not
* working. Search the bitmap for the space we are trying to use to
* make sure its actually there. If its not there then we need to stop
* because something has gone wrong.
*/ */
search_start = *offset; search_start = *offset;
search_bytes = *bytes; search_bytes = ctl->unit;
search_bytes = min(search_bytes, end - search_start + 1); search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
BUG_ON(ret < 0 || search_start != *offset); BUG_ON(ret < 0 || search_start != *offset);
if (*offset > bitmap_info->offset && *offset + *bytes > end) { /* We may have found more bits than what we need */
bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); search_bytes = min(search_bytes, *bytes);
*bytes -= end - *offset + 1;
*offset = end + 1; /* Cannot clear past the end of the bitmap */
} else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { search_bytes = min(search_bytes, end - search_start + 1);
bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
*bytes = 0; bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
} *offset += search_bytes;
*bytes -= search_bytes;
if (*bytes) { if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index); struct rb_node *next = rb_next(&bitmap_info->offset_index);
...@@ -1595,7 +1592,7 @@ static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1595,7 +1592,7 @@ static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
* everything over again. * everything over again.
*/ */
search_start = *offset; search_start = *offset;
search_bytes = *bytes; search_bytes = ctl->unit;
ret = search_bitmap(ctl, bitmap_info, &search_start, ret = search_bitmap(ctl, bitmap_info, &search_start,
&search_bytes); &search_bytes);
if (ret < 0 || search_start != *offset) if (ret < 0 || search_start != *offset)
...@@ -1878,12 +1875,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1878,12 +1875,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
{ {
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info; struct btrfs_free_space *info;
struct btrfs_free_space *next_info = NULL;
int ret = 0; int ret = 0;
spin_lock(&ctl->tree_lock); spin_lock(&ctl->tree_lock);
again: again:
if (!bytes)
goto out_lock;
info = tree_search_offset(ctl, offset, 0, 0); info = tree_search_offset(ctl, offset, 0, 0);
if (!info) { if (!info) {
/* /*
...@@ -1904,88 +1903,48 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1904,88 +1903,48 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
} }
} }
if (info->bytes < bytes && rb_next(&info->offset_index)) { if (!info->bitmap) {
u64 end;
next_info = rb_entry(rb_next(&info->offset_index),
struct btrfs_free_space,
offset_index);
if (next_info->bitmap)
end = next_info->offset +
BITS_PER_BITMAP * ctl->unit - 1;
else
end = next_info->offset + next_info->bytes;
if (next_info->bytes < bytes ||
next_info->offset > offset || offset > end) {
printk(KERN_CRIT "Found free space at %llu, size %llu,"
" trying to use %llu\n",
(unsigned long long)info->offset,
(unsigned long long)info->bytes,
(unsigned long long)bytes);
WARN_ON(1);
ret = -EINVAL;
goto out_lock;
}
info = next_info;
}
if (info->bytes == bytes) {
unlink_free_space(ctl, info); unlink_free_space(ctl, info);
if (info->bitmap) { if (offset == info->offset) {
kfree(info->bitmap); u64 to_free = min(bytes, info->bytes);
ctl->total_bitmaps--;
} info->bytes -= to_free;
kmem_cache_free(btrfs_free_space_cachep, info); info->offset += to_free;
ret = 0; if (info->bytes) {
goto out_lock; ret = link_free_space(ctl, info);
} WARN_ON(ret);
} else {
if (!info->bitmap && info->offset == offset) { kmem_cache_free(btrfs_free_space_cachep, info);
unlink_free_space(ctl, info); }
info->offset += bytes;
info->bytes -= bytes;
ret = link_free_space(ctl, info);
WARN_ON(ret);
goto out_lock;
}
if (!info->bitmap && info->offset <= offset && offset += to_free;
info->offset + info->bytes >= offset + bytes) { bytes -= to_free;
u64 old_start = info->offset; goto again;
/* } else {
* we're freeing space in the middle of the info, u64 old_end = info->bytes + info->offset;
* this can happen during tree log replay
*
* first unlink the old info and then
* insert it again after the hole we're creating
*/
unlink_free_space(ctl, info);
if (offset + bytes < info->offset + info->bytes) {
u64 old_end = info->offset + info->bytes;
info->offset = offset + bytes; info->bytes = offset - info->offset;
info->bytes = old_end - info->offset;
ret = link_free_space(ctl, info); ret = link_free_space(ctl, info);
WARN_ON(ret); WARN_ON(ret);
if (ret) if (ret)
goto out_lock; goto out_lock;
} else {
/* the hole we're creating ends at the end
* of the info struct, just free the info
*/
kmem_cache_free(btrfs_free_space_cachep, info);
}
spin_unlock(&ctl->tree_lock);
/* step two, insert a new info struct to cover /* Not enough bytes in this entry to satisfy us */
* anything before the hole if (old_end < offset + bytes) {
*/ bytes -= old_end - offset;
ret = btrfs_add_free_space(block_group, old_start, offset = old_end;
offset - old_start); goto again;
WARN_ON(ret); /* -ENOMEM */ } else if (old_end == offset + bytes) {
goto out; /* all done */
goto out_lock;
}
spin_unlock(&ctl->tree_lock);
ret = btrfs_add_free_space(block_group, offset + bytes,
old_end - (offset + bytes));
WARN_ON(ret);
goto out;
}
} }
ret = remove_from_bitmap(ctl, info, &offset, &bytes); ret = remove_from_bitmap(ctl, info, &offset, &bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment