Commit 7f366cfe authored by Chris Mason's avatar Chris Mason

Btrfs: reduce stack in cow_file_range

The fs/btrfs/inode.c code to run delayed allocation during writout
needed some stack usage optimization.  This is the first pass, it does
the check for compression earlier on, which allows us to do the common
(no compression) case higher up in the call chain.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent b7ec40d7
...@@ -204,7 +204,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, ...@@ -204,7 +204,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
* does the checks required to make sure the data is small enough * does the checks required to make sure the data is small enough
* to fit as an inline extent. * to fit as an inline extent.
*/ */
static int cow_file_range_inline(struct btrfs_trans_handle *trans, static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode, u64 start, u64 end, struct inode *inode, u64 start, u64 end,
size_t compressed_size, size_t compressed_size,
...@@ -854,11 +854,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, ...@@ -854,11 +854,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 cur_end; u64 cur_end;
int limit = 10 * 1024 * 1042; int limit = 10 * 1024 * 1042;
if (!btrfs_test_opt(root, COMPRESS)) {
return cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
EXTENT_DELALLOC, 1, 0, GFP_NOFS); EXTENT_DELALLOC, 1, 0, GFP_NOFS);
while (start < end) { while (start < end) {
...@@ -935,7 +930,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root, ...@@ -935,7 +930,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
* If no cow copies or snapshots exist, we write directly to the existing * If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk * blocks on disk
*/ */
static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started, int force, u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written) unsigned long *nr_written)
{ {
...@@ -1133,6 +1129,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, ...@@ -1133,6 +1129,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
unsigned long *nr_written) unsigned long *nr_written)
{ {
int ret; int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_test_flag(inode, NODATACOW)) if (btrfs_test_flag(inode, NODATACOW))
ret = run_delalloc_nocow(inode, locked_page, start, end, ret = run_delalloc_nocow(inode, locked_page, start, end,
...@@ -1140,10 +1137,12 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, ...@@ -1140,10 +1137,12 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
else if (btrfs_test_flag(inode, PREALLOC)) else if (btrfs_test_flag(inode, PREALLOC))
ret = run_delalloc_nocow(inode, locked_page, start, end, ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written); page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else else
ret = cow_file_range_async(inode, locked_page, start, end, ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written); page_started, nr_written);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment