Commit 291d673e authored by Chris Mason's avatar Chris Mason

Btrfs: Do delalloc accounting via hooks in the extent_state code

Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 9c58309d
...@@ -2180,7 +2180,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, ...@@ -2180,7 +2180,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
unsigned long last_index; unsigned long last_index;
unsigned long i; unsigned long i;
struct page *page; struct page *page;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct file_ra_state *ra; struct file_ra_state *ra;
...@@ -2220,11 +2219,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, ...@@ -2220,11 +2219,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
set_extent_delalloc(io_tree, page_start, set_extent_delalloc(io_tree, page_start,
page_end, GFP_NOFS); page_end, GFP_NOFS);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
existing_delalloc;
spin_unlock(&root->fs_info->delalloc_lock);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page); set_page_dirty(page);
unlock_page(page); unlock_page(page);
......
...@@ -256,6 +256,26 @@ static int merge_state(struct extent_io_tree *tree, ...@@ -256,6 +256,26 @@ static int merge_state(struct extent_io_tree *tree,
return 0; return 0;
} }
static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state,
unsigned long bits)
{
if (tree->ops && tree->ops->set_bit_hook) {
tree->ops->set_bit_hook(tree->mapping->host, state->start,
state->end, bits);
}
}
static void clear_state_cb(struct extent_io_tree *tree,
struct extent_state *state,
unsigned long bits)
{
if (tree->ops && tree->ops->set_bit_hook) {
tree->ops->clear_bit_hook(tree->mapping->host, state->start,
state->end, bits);
}
}
/* /*
* insert an extent_state struct into the tree. 'bits' are set on the * insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted. * struct before it is inserted.
...@@ -281,6 +301,7 @@ static int insert_state(struct extent_io_tree *tree, ...@@ -281,6 +301,7 @@ static int insert_state(struct extent_io_tree *tree,
state->state |= bits; state->state |= bits;
state->start = start; state->start = start;
state->end = end; state->end = end;
set_state_cb(tree, state, bits);
node = tree_insert(&tree->state, end, &state->rb_node); node = tree_insert(&tree->state, end, &state->rb_node);
if (node) { if (node) {
struct extent_state *found; struct extent_state *found;
...@@ -349,6 +370,7 @@ static int clear_state_bit(struct extent_io_tree *tree, ...@@ -349,6 +370,7 @@ static int clear_state_bit(struct extent_io_tree *tree,
tree->dirty_bytes -= range; tree->dirty_bytes -= range;
} }
state->state &= ~bits; state->state &= ~bits;
clear_state_cb(tree, state, bits);
if (wake) if (wake)
wake_up(&state->wq); wake_up(&state->wq);
if (delete || state->state == 0) { if (delete || state->state == 0) {
...@@ -553,6 +575,7 @@ static void set_state_bits(struct extent_io_tree *tree, ...@@ -553,6 +575,7 @@ static void set_state_bits(struct extent_io_tree *tree,
tree->dirty_bytes += range; tree->dirty_bytes += range;
} }
state->state |= bits; state->state |= bits;
set_state_cb(tree, state, bits);
} }
/* /*
...@@ -975,6 +998,7 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, ...@@ -975,6 +998,7 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree,
goto search_again; goto search_again;
} }
state->state |= EXTENT_LOCKED; state->state |= EXTENT_LOCKED;
set_state_cb(tree, state, EXTENT_LOCKED);
if (!found) if (!found)
*start = state->start; *start = state->start;
found++; found++;
...@@ -1474,6 +1498,7 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1474,6 +1498,7 @@ static int end_bio_extent_readpage(struct bio *bio,
state = NULL; state = NULL;
} }
clear->state |= EXTENT_UPTODATE; clear->state |= EXTENT_UPTODATE;
set_state_cb(tree, clear, EXTENT_UPTODATE);
clear_state_bit(tree, clear, EXTENT_LOCKED, clear_state_bit(tree, clear, EXTENT_LOCKED,
1, 0); 1, 0);
if (cur == start) if (cur == start)
......
...@@ -33,6 +33,10 @@ struct extent_io_ops { ...@@ -33,6 +33,10 @@ struct extent_io_ops {
struct extent_state *state); struct extent_state *state);
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state); struct extent_state *state);
int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
unsigned long bits);
int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
unsigned long bits);
}; };
struct extent_io_tree { struct extent_io_tree {
......
...@@ -323,10 +323,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -323,10 +323,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
} }
set_extent_delalloc(io_tree, start_pos, end_of_last_block, set_extent_delalloc(io_tree, start_pos, end_of_last_block,
GFP_NOFS); GFP_NOFS);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
start_pos) - existing_delalloc;
spin_unlock(&root->fs_info->delalloc_lock);
btrfs_add_ordered_inode(inode); btrfs_add_ordered_inode(inode);
} else { } else {
u64 aligned_end; u64 aligned_end;
......
...@@ -80,8 +80,6 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, ...@@ -80,8 +80,6 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
u64 thresh; u64 thresh;
int ret = 0; int ret = 0;
return 0;
if (for_del) if (for_del)
thresh = total * 90; thresh = total * 90;
else else
...@@ -249,7 +247,6 @@ static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) ...@@ -249,7 +247,6 @@ static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
static int run_delalloc_range(struct inode *inode, u64 start, u64 end) static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
u64 num_bytes;
int ret; int ret;
mutex_lock(&root->fs_info->fs_mutex); mutex_lock(&root->fs_info->fs_mutex);
if (btrfs_test_opt(root, NODATACOW) || if (btrfs_test_opt(root, NODATACOW) ||
...@@ -258,20 +255,34 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) ...@@ -258,20 +255,34 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
else else
ret = cow_file_range(inode, start, end); ret = cow_file_range(inode, start, end);
spin_lock(&root->fs_info->delalloc_lock);
num_bytes = end + 1 - start;
if (root->fs_info->delalloc_bytes < num_bytes) {
printk("delalloc accounting error total %llu sub %llu\n",
root->fs_info->delalloc_bytes, num_bytes);
} else {
root->fs_info->delalloc_bytes -= num_bytes;
}
spin_unlock(&root->fs_info->delalloc_lock);
mutex_unlock(&root->fs_info->fs_mutex); mutex_unlock(&root->fs_info->fs_mutex);
return ret; return ret;
} }
int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long bits)
{
if ((bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += end - start + 1;
spin_unlock(&root->fs_info->delalloc_lock);
}
return 0;
}
int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long bits)
{
if ((bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes -= end - start + 1;
spin_unlock(&root->fs_info->delalloc_lock);
}
return 0;
}
int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end) int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
...@@ -908,28 +919,17 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page, ...@@ -908,28 +919,17 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
{ {
char *kaddr; char *kaddr;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 page_end = page_start + PAGE_CACHE_SIZE - 1; u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
u64 existing_delalloc;
u64 delalloc_start;
int ret = 0; int ret = 0;
WARN_ON(!PageLocked(page)); WARN_ON(!PageLocked(page));
set_page_extent_mapped(page); set_page_extent_mapped(page);
lock_extent(io_tree, page_start, page_end, GFP_NOFS); lock_extent(io_tree, page_start, page_end, GFP_NOFS);
delalloc_start = page_start;
existing_delalloc = count_range_bits(&BTRFS_I(inode)->io_tree,
&delalloc_start, page_end,
PAGE_CACHE_SIZE, EXTENT_DELALLOC);
set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
page_end, GFP_NOFS); page_end, GFP_NOFS);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - existing_delalloc;
spin_unlock(&root->fs_info->delalloc_lock);
if (zero_start != PAGE_CACHE_SIZE) { if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page); kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
...@@ -2456,8 +2456,6 @@ int btrfs_defrag_file(struct file *file) { ...@@ -2456,8 +2456,6 @@ int btrfs_defrag_file(struct file *file) {
unsigned long ra_index = 0; unsigned long ra_index = 0;
u64 page_start; u64 page_start;
u64 page_end; u64 page_end;
u64 delalloc_start;
u64 existing_delalloc;
unsigned long i; unsigned long i;
int ret; int ret;
...@@ -2491,19 +2489,9 @@ int btrfs_defrag_file(struct file *file) { ...@@ -2491,19 +2489,9 @@ int btrfs_defrag_file(struct file *file) {
page_end = page_start + PAGE_CACHE_SIZE - 1; page_end = page_start + PAGE_CACHE_SIZE - 1;
lock_extent(io_tree, page_start, page_end, GFP_NOFS); lock_extent(io_tree, page_start, page_end, GFP_NOFS);
delalloc_start = page_start;
existing_delalloc =
count_range_bits(&BTRFS_I(inode)->io_tree,
&delalloc_start, page_end,
PAGE_CACHE_SIZE, EXTENT_DELALLOC);
set_extent_delalloc(io_tree, page_start, set_extent_delalloc(io_tree, page_start,
page_end, GFP_NOFS); page_end, GFP_NOFS);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
existing_delalloc;
spin_unlock(&root->fs_info->delalloc_lock);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page); set_page_dirty(page);
unlock_page(page); unlock_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment