Commit 594831c4 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: fix potential race in extent buffer freeing

This sounds sort of impossible but it is the only thing I can think of and
at the very least it is theoretically possible so here it goes.

If we are in try_release_extent_buffer we will check that the ref count on
the extent buffer is 1 and not under IO, and then go down and clear the tree
ref.  If between this check and clearing the tree ref somebody else comes in
and grabs a ref on the eb and the marks it dirty before
try_release_extent_buffer() does it's tree ref clear we can end up with a
dirty eb that will be freed while it is still dirty which will result in a
panic.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent e64860aa
...@@ -4123,11 +4123,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) ...@@ -4123,11 +4123,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone * So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added. * beat us to it, drop the ref we added.
*/ */
if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs); atomic_inc(&eb->refs);
if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) spin_unlock(&eb->refs_lock);
atomic_dec(&eb->refs);
}
} }
static void mark_extent_buffer_accessed(struct extent_buffer *eb) static void mark_extent_buffer_accessed(struct extent_buffer *eb)
...@@ -4239,9 +4238,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -4239,9 +4238,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
goto free_eb; goto free_eb;
} }
/* add one reference for the tree */ /* add one reference for the tree */
spin_lock(&eb->refs_lock);
check_buffer_tree_ref(eb); check_buffer_tree_ref(eb);
spin_unlock(&eb->refs_lock);
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
radix_tree_preload_end(); radix_tree_preload_end();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment