Commit adbfbcd1 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: apply updated fallocate i_size fix
  Btrfs: do not try and lookup the file extent when finishing ordered io
  Btrfs: Fix oopsen when dropping empty tree.
  Btrfs: remove BUG_ON() due to mounting bad filesystem
  Btrfs: make error return negative in btrfs_sync_file()
  Btrfs: fix race between allocate and release extent buffer.
parents fc76be43 23b5c509
...@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) { if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root); ret = btrfs_recover_relocation(tree_root);
BUG_ON(ret); if (ret < 0) {
printk(KERN_WARNING
"btrfs: failed to recover relocation\n");
err = -EINVAL;
goto fail_trans_kthread;
}
} }
location.objectid = BTRFS_FS_TREE_OBJECTID; location.objectid = BTRFS_FS_TREE_OBJECTID;
......
...@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
int ret; int ret;
while (level >= 0) { while (level >= 0) {
if (path->slots[level] >=
btrfs_header_nritems(path->nodes[level]))
break;
ret = walk_down_proc(trans, root, path, wc, lookup_info); ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0) if (ret > 0)
break; break;
...@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (level == 0) if (level == 0)
break; break;
if (path->slots[level] >=
btrfs_header_nritems(path->nodes[level]))
break;
ret = do_walk_down(trans, root, path, wc, &lookup_info); ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) { if (ret > 0) {
path->slots[level]++; path->slots[level]++;
......
...@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
goto free_eb; goto free_eb;
} }
spin_unlock(&tree->buffer_lock);
/* add one reference for the tree */ /* add one reference for the tree */
atomic_inc(&eb->refs); atomic_inc(&eb->refs);
spin_unlock(&tree->buffer_lock);
return eb; return eb;
free_eb: free_eb:
......
...@@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) ...@@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
} }
mutex_lock(&dentry->d_inode->i_mutex); mutex_lock(&dentry->d_inode->i_mutex);
out: out:
return ret > 0 ? EIO : ret; return ret > 0 ? -EIO : ret;
} }
static const struct vm_operations_struct btrfs_file_vm_ops = { static const struct vm_operations_struct btrfs_file_vm_ops = {
......
...@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ...@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* before we start the transaction. It limits the amount of btree * before we start the transaction. It limits the amount of btree
* reads required while inside the transaction. * reads required while inside the transaction.
*/ */
static noinline void reada_csum(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_ordered_extent *ordered_extent)
{
struct btrfs_ordered_sum *sum;
u64 bytenr;
sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
list);
bytenr = sum->sums[0].bytenr;
/*
* we don't care about the results, the point of this search is
* just to get the btree leaves into ram
*/
btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
}
/* as ordered data IO finishes, this gets called so we can finish /* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are * an ordered extent if the range of bytes in the file it covers are
* fully written. * fully written.
...@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent = NULL; struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_path *path;
int compressed = 0; int compressed = 0;
int ret; int ret;
...@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
if (!ret) if (!ret)
return 0; return 0;
/* ordered_extent = btrfs_lookup_ordered_extent(inode, start);
* before we join the transaction, try to do some of our IO.
* This will limit the amount of IO that we have to do with
* the transaction running. We're unlikely to need to do any
* IO if the file extents are new, the disk_i_size checks
* covers the most common case.
*/
if (start < BTRFS_I(inode)->disk_i_size) {
path = btrfs_alloc_path();
if (path) {
ret = btrfs_lookup_file_extent(NULL, root, path,
inode->i_ino,
start, 0);
ordered_extent = btrfs_lookup_ordered_extent(inode,
start);
if (!list_empty(&ordered_extent->list)) {
btrfs_release_path(root, path);
reada_csum(root, path, ordered_extent);
}
btrfs_free_path(path);
}
}
if (!ordered_extent)
ordered_extent = btrfs_lookup_ordered_extent(inode, start);
BUG_ON(!ordered_extent); BUG_ON(!ordered_extent);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); BUG_ON(!list_empty(&ordered_extent->list));
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
...@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, ...@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
inode->i_ctime = CURRENT_TIME; inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) && if (!(mode & FALLOC_FL_KEEP_SIZE) &&
cur_offset > inode->i_size) { (actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len) if (cur_offset > actual_len)
i_size = actual_len; i_size = actual_len;
else else
......
...@@ -3764,7 +3764,8 @@ int btrfs_recover_relocation(struct btrfs_root *root) ...@@ -3764,7 +3764,8 @@ int btrfs_recover_relocation(struct btrfs_root *root)
BTRFS_DATA_RELOC_TREE_OBJECTID); BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root)) if (IS_ERR(fs_root))
err = PTR_ERR(fs_root); err = PTR_ERR(fs_root);
btrfs_orphan_cleanup(fs_root); else
btrfs_orphan_cleanup(fs_root);
} }
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment