Commit 815a51c7 authored by Jan Schmidt's avatar Jan Schmidt

Btrfs: dummy extent buffers for tree mod log

The tree modification log needs two ways to create dummy extent buffers,
once by allocating a fresh one (to rebuild an old root) and once by
cloning an existing one (to make private rewind modifications) to it.
Signed-off-by: default avatarJan Schmidt <list.btrfs@jan-o-sch.net>
parent 64947ec0
...@@ -3930,6 +3930,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3930,6 +3930,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
eb->start = start; eb->start = start;
eb->len = len; eb->len = len;
eb->tree = tree; eb->tree = tree;
eb->bflags = 0;
rwlock_init(&eb->lock); rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0); atomic_set(&eb->write_locks, 0);
atomic_set(&eb->read_locks, 0); atomic_set(&eb->read_locks, 0);
...@@ -3967,6 +3968,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3967,6 +3968,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
return eb; return eb;
} }
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
unsigned long i;
struct page *p;
struct extent_buffer *new;
unsigned long num_pages = num_extent_pages(src->start, src->len);
new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
if (new == NULL)
return NULL;
for (i = 0; i < num_pages; i++) {
p = alloc_page(GFP_ATOMIC);
BUG_ON(!p);
attach_extent_buffer_page(new, p);
WARN_ON(PageDirty(p));
SetPageUptodate(p);
new->pages[i] = p;
}
copy_extent_buffer(new, src, 0, 0, src->len);
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
return new;
}
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
{
struct extent_buffer *eb;
unsigned long num_pages = num_extent_pages(0, len);
unsigned long i;
eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
if (!eb)
return NULL;
for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_ATOMIC);
if (!eb->pages[i])
goto err;
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
return eb;
err:
for (i--; i > 0; i--)
__free_page(eb->pages[i]);
__free_extent_buffer(eb);
return NULL;
}
static int extent_buffer_under_io(struct extent_buffer *eb) static int extent_buffer_under_io(struct extent_buffer *eb)
{ {
return (atomic_read(&eb->io_pages) || return (atomic_read(&eb->io_pages) ||
...@@ -3982,6 +4037,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, ...@@ -3982,6 +4037,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
{ {
unsigned long index; unsigned long index;
struct page *page; struct page *page;
int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb)); BUG_ON(extent_buffer_under_io(eb));
...@@ -3992,7 +4048,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, ...@@ -3992,7 +4048,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
do { do {
index--; index--;
page = extent_buffer_page(eb, index); page = extent_buffer_page(eb, index);
if (page) { if (page && mapped) {
spin_lock(&page->mapping->private_lock); spin_lock(&page->mapping->private_lock);
/* /*
* We do this since we'll remove the pages after we've * We do this since we'll remove the pages after we've
...@@ -4017,6 +4073,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, ...@@ -4017,6 +4073,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
} }
spin_unlock(&page->mapping->private_lock); spin_unlock(&page->mapping->private_lock);
}
if (page) {
/* One for when we alloced the page */ /* One for when we alloced the page */
page_cache_release(page); page_cache_release(page);
} }
...@@ -4235,6 +4293,9 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask) ...@@ -4235,6 +4293,9 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
{ {
WARN_ON(atomic_read(&eb->refs) == 0); WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) { if (atomic_dec_and_test(&eb->refs)) {
if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
spin_unlock(&eb->refs_lock);
} else {
struct extent_io_tree *tree = eb->tree; struct extent_io_tree *tree = eb->tree;
spin_unlock(&eb->refs_lock); spin_unlock(&eb->refs_lock);
...@@ -4243,6 +4304,7 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask) ...@@ -4243,6 +4304,7 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
radix_tree_delete(&tree->buffer, radix_tree_delete(&tree->buffer,
eb->start >> PAGE_CACHE_SHIFT); eb->start >> PAGE_CACHE_SHIFT);
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
}
/* Should be safe to release our pages at this point */ /* Should be safe to release our pages at this point */
btrfs_release_extent_buffer_page(eb, 0); btrfs_release_extent_buffer_page(eb, 0);
...@@ -4259,6 +4321,10 @@ void free_extent_buffer(struct extent_buffer *eb) ...@@ -4259,6 +4321,10 @@ void free_extent_buffer(struct extent_buffer *eb)
return; return;
spin_lock(&eb->refs_lock); spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
atomic_dec(&eb->refs);
if (atomic_read(&eb->refs) == 2 && if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) && !extent_buffer_under_io(eb) &&
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define EXTENT_BUFFER_STALE 6 #define EXTENT_BUFFER_STALE 6
#define EXTENT_BUFFER_WRITEBACK 7 #define EXTENT_BUFFER_WRITEBACK 7
#define EXTENT_BUFFER_IOERR 8 #define EXTENT_BUFFER_IOERR 8
#define EXTENT_BUFFER_DUMMY 9
/* these are flags for extent_clear_unlock_delalloc */ /* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
...@@ -265,6 +266,8 @@ void set_page_extent_mapped(struct page *page); ...@@ -265,6 +266,8 @@ void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len); u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len); u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb); void free_extent_buffer(struct extent_buffer *eb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment