Commit 00361589 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: avoid starting a transaction in the write path

I noticed while looking at a deadlock that we are always starting a transaction
in cow_file_range().  This isn't really needed since we only need a transaction
if we are doing an inline extent, or if the allocator needs to allocate a chunk.
So push down all the transaction start stuff to be closer to where we actually
need a transaction in all of these cases.  This will hopefully reduce our write
latency when we are committing often.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>
parent 9ffba8cd
...@@ -3165,10 +3165,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, ...@@ -3165,10 +3165,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 root_objectid, u64 owner, u64 offset, u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins); struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_trans_handle *trans, int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
struct btrfs_root *root, u64 min_alloc_size, u64 empty_size, u64 hint_byte,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data); struct btrfs_key *ins, int is_data);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int for_cow); struct extent_buffer *buf, int full_backref, int for_cow);
...@@ -3612,8 +3610,7 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); ...@@ -3612,8 +3610,7 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len, size_t pg_offset, u64 start, u64 len,
int create); int create);
noinline int can_nocow_extent(struct btrfs_trans_handle *trans, noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len, u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes); u64 *ram_bytes);
......
...@@ -6121,8 +6121,7 @@ enum btrfs_loop_type { ...@@ -6121,8 +6121,7 @@ enum btrfs_loop_type {
* ins->offset == number of blocks * ins->offset == number of blocks
* Any available blocks before search_start are skipped. * Any available blocks before search_start are skipped.
*/ */
static noinline int find_free_extent(struct btrfs_trans_handle *trans, static noinline int find_free_extent(struct btrfs_root *orig_root,
struct btrfs_root *orig_root,
u64 num_bytes, u64 empty_size, u64 num_bytes, u64 empty_size,
u64 hint_byte, struct btrfs_key *ins, u64 hint_byte, struct btrfs_key *ins,
u64 flags) u64 flags)
...@@ -6345,9 +6344,9 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -6345,9 +6344,9 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
block_group->full_stripe_len); block_group->full_stripe_len);
/* allocate a cluster in this block group */ /* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root, ret = btrfs_find_space_cluster(root, block_group,
block_group, last_ptr, last_ptr, search_start,
search_start, num_bytes, num_bytes,
aligned_cluster); aligned_cluster);
if (ret == 0) { if (ret == 0) {
/* /*
...@@ -6479,18 +6478,29 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -6479,18 +6478,29 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
index = 0; index = 0;
loop++; loop++;
if (loop == LOOP_ALLOC_CHUNK) { if (loop == LOOP_ALLOC_CHUNK) {
struct btrfs_trans_handle *trans;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = do_chunk_alloc(trans, root, flags, ret = do_chunk_alloc(trans, root, flags,
CHUNK_ALLOC_FORCE); CHUNK_ALLOC_FORCE);
/* /*
* Do not bail out on ENOSPC since we * Do not bail out on ENOSPC since we
* can do more things. * can do more things.
*/ */
if (ret < 0 && ret != -ENOSPC) { if (ret < 0 && ret != -ENOSPC)
btrfs_abort_transaction(trans, btrfs_abort_transaction(trans,
root, ret); root, ret);
else
ret = 0;
btrfs_end_transaction(trans, root);
if (ret)
goto out; goto out;
} }
}
if (loop == LOOP_NO_EMPTY_SIZE) { if (loop == LOOP_NO_EMPTY_SIZE) {
empty_size = 0; empty_size = 0;
...@@ -6553,8 +6563,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes, ...@@ -6553,8 +6563,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
up_read(&info->groups_sem); up_read(&info->groups_sem);
} }
int btrfs_reserve_extent(struct btrfs_trans_handle *trans, int btrfs_reserve_extent(struct btrfs_root *root,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size, u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte, u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data) struct btrfs_key *ins, int is_data)
...@@ -6566,8 +6575,8 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, ...@@ -6566,8 +6575,8 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
flags = btrfs_get_alloc_profile(root, is_data); flags = btrfs_get_alloc_profile(root, is_data);
again: again:
WARN_ON(num_bytes < root->sectorsize); WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size, ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
hint_byte, ins, flags); flags);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
if (!final_tried) { if (!final_tried) {
...@@ -6955,7 +6964,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, ...@@ -6955,7 +6964,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
if (IS_ERR(block_rsv)) if (IS_ERR(block_rsv))
return ERR_CAST(block_rsv); return ERR_CAST(block_rsv);
ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, ret = btrfs_reserve_extent(root, blocksize, blocksize,
empty_size, hint, &ins, 0); empty_size, hint, &ins, 0);
if (ret) { if (ret) {
unuse_block_rsv(root->fs_info, block_rsv, blocksize); unuse_block_rsv(root->fs_info, block_rsv, blocksize);
......
...@@ -1339,7 +1339,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -1339,7 +1339,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
static noinline int check_can_nocow(struct inode *inode, loff_t pos, static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes) size_t *write_bytes)
{ {
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend; u64 lockstart, lockend;
...@@ -1361,16 +1360,8 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, ...@@ -1361,16 +1360,8 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
} }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
return PTR_ERR(trans);
}
num_bytes = lockend - lockstart + 1; num_bytes = lockend - lockstart + 1;
ret = can_nocow_extent(trans, inode, lockstart, &num_bytes, NULL, NULL, ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
NULL);
btrfs_end_transaction(trans, root);
if (ret <= 0) { if (ret <= 0) {
ret = 0; ret = 0;
} else { } else {
......
...@@ -2525,8 +2525,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -2525,8 +2525,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise * returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc * it returns -enospc
*/ */
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, int btrfs_find_space_cluster(struct btrfs_root *root,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size) u64 offset, u64 bytes, u64 empty_size)
......
...@@ -98,8 +98,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -98,8 +98,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes); u64 bytes);
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, int btrfs_find_space_cluster(struct btrfs_root *root,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size); u64 offset, u64 bytes, u64 empty_size);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment