Commit 9785dbdf authored by Chris Mason's avatar Chris Mason

Merge branch 'for-chris' of git://git.jan-o-sch.net/btrfs-unstable into integration

parents d756bd2d 6bf7e080
...@@ -8,6 +8,6 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ ...@@ -8,6 +8,6 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o reada.o backref.o ulist.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
This diff is collapsed.
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define __BTRFS_BACKREF__ #define __BTRFS_BACKREF__
#include "ioctl.h" #include "ioctl.h"
#include "ulist.h"
struct inode_fs_paths { struct inode_fs_paths {
struct btrfs_path *btrfs_path; struct btrfs_path *btrfs_path;
...@@ -54,6 +55,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ...@@ -54,6 +55,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 seq, struct ulist **roots);
struct btrfs_data_container *init_data_container(u32 total_bytes); struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path); struct btrfs_path *path);
......
...@@ -240,7 +240,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, ...@@ -240,7 +240,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
cow = btrfs_alloc_free_block(trans, root, buf->len, 0, cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
new_root_objectid, &disk_key, level, new_root_objectid, &disk_key, level,
buf->start, 0); buf->start, 0, 1);
if (IS_ERR(cow)) if (IS_ERR(cow))
return PTR_ERR(cow); return PTR_ERR(cow);
...@@ -261,9 +261,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, ...@@ -261,9 +261,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
WARN_ON(btrfs_header_generation(buf) > trans->transid); WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1); ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else else
ret = btrfs_inc_ref(trans, root, cow, 0); ret = btrfs_inc_ref(trans, root, cow, 0, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -350,14 +350,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ...@@ -350,14 +350,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if ((owner == root->root_key.objectid || if ((owner == root->root_key.objectid ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1); ret = btrfs_inc_ref(trans, root, buf, 1, 1);
BUG_ON(ret); BUG_ON(ret);
if (root->root_key.objectid == if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) { BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0); ret = btrfs_dec_ref(trans, root, buf, 0, 1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_inc_ref(trans, root, cow, 1); ret = btrfs_inc_ref(trans, root, cow, 1, 1);
BUG_ON(ret); BUG_ON(ret);
} }
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
...@@ -365,9 +365,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ...@@ -365,9 +365,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (root->root_key.objectid == if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1); ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else else
ret = btrfs_inc_ref(trans, root, cow, 0); ret = btrfs_inc_ref(trans, root, cow, 0, 1);
BUG_ON(ret); BUG_ON(ret);
} }
if (new_flags != 0) { if (new_flags != 0) {
...@@ -381,11 +381,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ...@@ -381,11 +381,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
if (root->root_key.objectid == if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1); ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else else
ret = btrfs_inc_ref(trans, root, cow, 0); ret = btrfs_inc_ref(trans, root, cow, 0, 1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_dec_ref(trans, root, buf, 1); ret = btrfs_dec_ref(trans, root, buf, 1, 1);
BUG_ON(ret); BUG_ON(ret);
} }
clean_tree_block(trans, root, buf); clean_tree_block(trans, root, buf);
...@@ -446,7 +446,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ...@@ -446,7 +446,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
root->root_key.objectid, &disk_key, root->root_key.objectid, &disk_key,
level, search_start, empty_size); level, search_start, empty_size, 1);
if (IS_ERR(cow)) if (IS_ERR(cow))
return PTR_ERR(cow); return PTR_ERR(cow);
...@@ -484,7 +484,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ...@@ -484,7 +484,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
rcu_assign_pointer(root->node, cow); rcu_assign_pointer(root->node, cow);
btrfs_free_tree_block(trans, root, buf, parent_start, btrfs_free_tree_block(trans, root, buf, parent_start,
last_ref); last_ref, 1);
free_extent_buffer(buf); free_extent_buffer(buf);
add_root_to_dirty_list(root); add_root_to_dirty_list(root);
} else { } else {
...@@ -500,7 +500,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ...@@ -500,7 +500,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid); trans->transid);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
btrfs_free_tree_block(trans, root, buf, parent_start, btrfs_free_tree_block(trans, root, buf, parent_start,
last_ref); last_ref, 1);
} }
if (unlock_orig) if (unlock_orig)
btrfs_tree_unlock(buf); btrfs_tree_unlock(buf);
...@@ -957,7 +957,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -957,7 +957,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
free_extent_buffer(mid); free_extent_buffer(mid);
root_sub_used(root, mid->len); root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1); btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
/* once for the root ptr */ /* once for the root ptr */
free_extent_buffer(mid); free_extent_buffer(mid);
return 0; return 0;
...@@ -1015,7 +1015,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -1015,7 +1015,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret) if (wret)
ret = wret; ret = wret;
root_sub_used(root, right->len); root_sub_used(root, right->len);
btrfs_free_tree_block(trans, root, right, 0, 1); btrfs_free_tree_block(trans, root, right, 0, 1, 0);
free_extent_buffer(right); free_extent_buffer(right);
right = NULL; right = NULL;
} else { } else {
...@@ -1055,7 +1055,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -1055,7 +1055,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret) if (wret)
ret = wret; ret = wret;
root_sub_used(root, mid->len); root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1); btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
free_extent_buffer(mid); free_extent_buffer(mid);
mid = NULL; mid = NULL;
} else { } else {
...@@ -2089,7 +2089,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, ...@@ -2089,7 +2089,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
root->root_key.objectid, &lower_key, root->root_key.objectid, &lower_key,
level, root->node->start, 0); level, root->node->start, 0, 0);
if (IS_ERR(c)) if (IS_ERR(c))
return PTR_ERR(c); return PTR_ERR(c);
...@@ -2216,7 +2216,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans, ...@@ -2216,7 +2216,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
root->root_key.objectid, root->root_key.objectid,
&disk_key, level, c->start, 0); &disk_key, level, c->start, 0, 0);
if (IS_ERR(split)) if (IS_ERR(split))
return PTR_ERR(split); return PTR_ERR(split);
...@@ -2970,7 +2970,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, ...@@ -2970,7 +2970,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
root->root_key.objectid, root->root_key.objectid,
&disk_key, 0, l->start, 0); &disk_key, 0, l->start, 0, 0);
if (IS_ERR(right)) if (IS_ERR(right))
return PTR_ERR(right); return PTR_ERR(right);
...@@ -3781,7 +3781,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, ...@@ -3781,7 +3781,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len); root_sub_used(root, leaf->len);
btrfs_free_tree_block(trans, root, leaf, 0, 1); btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
return 0; return 0;
} }
/* /*
......
...@@ -2439,11 +2439,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, ...@@ -2439,11 +2439,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize, struct btrfs_root *root, u32 blocksize,
u64 parent, u64 root_objectid, u64 parent, u64 root_objectid,
struct btrfs_disk_key *key, int level, struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size); u64 hint, u64 empty_size, int for_cow);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans, void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *buf, struct extent_buffer *buf,
u64 parent, int last_ref); u64 parent, int last_ref, int for_cow);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u32 blocksize, u64 bytenr, u32 blocksize,
...@@ -2463,17 +2463,17 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, ...@@ -2463,17 +2463,17 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
u64 search_end, struct btrfs_key *ins, u64 search_end, struct btrfs_key *ins,
u64 data); u64 data);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref); struct extent_buffer *buf, int full_backref, int for_cow);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref); struct extent_buffer *buf, int full_backref, int for_cow);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags, u64 bytenr, u64 num_bytes, u64 flags,
int is_data); int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans, int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 root_objectid, u64 owner, u64 offset); u64 owner, u64 offset, int for_cow);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
...@@ -2485,7 +2485,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, ...@@ -2485,7 +2485,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset); u64 root_objectid, u64 owner, u64 offset, int for_cow);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root); struct btrfs_root *root);
...@@ -2644,10 +2644,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, ...@@ -2644,10 +2644,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
} }
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
{
++p->slots[0];
if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
return btrfs_next_leaf(root, p);
return 0;
}
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
void btrfs_drop_snapshot(struct btrfs_root *root, void btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref); struct btrfs_block_rsv *block_rsv, int update_ref,
int for_reloc);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans, int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *node, struct extent_buffer *node,
......
This diff is collapsed.
...@@ -33,6 +33,9 @@ struct btrfs_delayed_ref_node { ...@@ -33,6 +33,9 @@ struct btrfs_delayed_ref_node {
/* the size of the extent */ /* the size of the extent */
u64 num_bytes; u64 num_bytes;
/* seq number to keep track of insertion order */
u64 seq;
/* ref count on this data structure */ /* ref count on this data structure */
atomic_t refs; atomic_t refs;
...@@ -98,19 +101,15 @@ struct btrfs_delayed_ref_head { ...@@ -98,19 +101,15 @@ struct btrfs_delayed_ref_head {
struct btrfs_delayed_tree_ref { struct btrfs_delayed_tree_ref {
struct btrfs_delayed_ref_node node; struct btrfs_delayed_ref_node node;
union { u64 root;
u64 root; u64 parent;
u64 parent;
};
int level; int level;
}; };
struct btrfs_delayed_data_ref { struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_node node; struct btrfs_delayed_ref_node node;
union { u64 root;
u64 root; u64 parent;
u64 parent;
};
u64 objectid; u64 objectid;
u64 offset; u64 offset;
}; };
...@@ -140,6 +139,26 @@ struct btrfs_delayed_ref_root { ...@@ -140,6 +139,26 @@ struct btrfs_delayed_ref_root {
int flushing; int flushing;
u64 run_delayed_start; u64 run_delayed_start;
/*
* seq number of delayed refs. We need to know if a backref was being
* added before the currently processed ref or afterwards.
*/
u64 seq;
/*
* seq_list holds a list of all seq numbers that are currently being
* added to the list. While walking backrefs (btrfs_find_all_roots,
* qgroups), which might take some time, no newer ref must be processed,
* as it might influence the outcome of the walk.
*/
struct list_head seq_head;
/*
* when the only refs we have in the list must not be processed, we want
* to wait for more refs to show up or for the end of backref walking.
*/
wait_queue_head_t seq_wait;
}; };
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
...@@ -151,16 +170,21 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) ...@@ -151,16 +170,21 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
} }
} }
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action, u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op); struct btrfs_delayed_extent_op *extent_op,
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, int for_cow);
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root, u64 parent, u64 ref_root,
u64 owner, u64 offset, int action, u64 owner, u64 offset, int action,
struct btrfs_delayed_extent_op *extent_op); struct btrfs_delayed_extent_op *extent_op,
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, int for_cow);
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op); struct btrfs_delayed_extent_op *extent_op);
...@@ -170,6 +194,60 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, ...@@ -170,6 +194,60 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head *head);
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start); struct list_head *cluster, u64 search_start);
struct seq_list {
struct list_head list;
u64 seq;
};
static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
{
assert_spin_locked(&delayed_refs->lock);
++delayed_refs->seq;
return delayed_refs->seq;
}
static inline void
btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
struct seq_list *elem)
{
assert_spin_locked(&delayed_refs->lock);
elem->seq = delayed_refs->seq;
list_add_tail(&elem->list, &delayed_refs->seq_head);
}
static inline void
btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
struct seq_list *elem)
{
spin_lock(&delayed_refs->lock);
list_del(&elem->list);
wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
}
int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
u64 seq);
/*
* delayed refs with a ref_seq > 0 must be held back during backref walking.
* this only applies to items in one of the fs-trees. for_cow items never need
* to be held back, so they won't get a ref_seq number.
*/
static inline int need_ref_seq(int for_cow, u64 rootid)
{
if (for_cow)
return 0;
if (rootid == BTRFS_FS_TREE_OBJECTID)
return 1;
if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
return 1;
return 0;
}
/* /*
* a node might live in a head or a regular ref, this lets you * a node might live in a head or a regular ref, this lets you
* test for the proper type to use. * test for the proper type to use.
......
...@@ -1243,7 +1243,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, ...@@ -1243,7 +1243,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
root->ref_cows = 0; root->ref_cows = 0;
leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0); BTRFS_TREE_LOG_OBJECTID, NULL,
0, 0, 0, 0);
if (IS_ERR(leaf)) { if (IS_ERR(leaf)) {
kfree(root); kfree(root);
return ERR_CAST(leaf); return ERR_CAST(leaf);
......
This diff is collapsed.
...@@ -3579,6 +3579,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3579,6 +3579,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
atomic_set(&eb->blocking_writers, 0); atomic_set(&eb->blocking_writers, 0);
atomic_set(&eb->spinning_readers, 0); atomic_set(&eb->spinning_readers, 0);
atomic_set(&eb->spinning_writers, 0); atomic_set(&eb->spinning_writers, 0);
eb->lock_nested = 0;
init_waitqueue_head(&eb->write_lock_wq); init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq); init_waitqueue_head(&eb->read_lock_wq);
......
...@@ -129,6 +129,7 @@ struct extent_buffer { ...@@ -129,6 +129,7 @@ struct extent_buffer {
struct list_head leak_list; struct list_head leak_list;
struct rcu_head rcu_head; struct rcu_head rcu_head;
atomic_t refs; atomic_t refs;
pid_t lock_owner;
/* count of read lock holders on the extent buffer */ /* count of read lock holders on the extent buffer */
atomic_t write_locks; atomic_t write_locks;
...@@ -137,6 +138,7 @@ struct extent_buffer { ...@@ -137,6 +138,7 @@ struct extent_buffer {
atomic_t blocking_readers; atomic_t blocking_readers;
atomic_t spinning_readers; atomic_t spinning_readers;
atomic_t spinning_writers; atomic_t spinning_writers;
int lock_nested;
/* protects write locks */ /* protects write locks */
rwlock_t lock; rwlock_t lock;
......
...@@ -678,7 +678,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, ...@@ -678,7 +678,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
disk_bytenr, num_bytes, 0, disk_bytenr, num_bytes, 0,
root->root_key.objectid, root->root_key.objectid,
new_key.objectid, new_key.objectid,
start - extent_offset); start - extent_offset, 0);
BUG_ON(ret); BUG_ON(ret);
*hint_byte = disk_bytenr; *hint_byte = disk_bytenr;
} }
...@@ -753,7 +753,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, ...@@ -753,7 +753,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
disk_bytenr, num_bytes, 0, disk_bytenr, num_bytes, 0,
root->root_key.objectid, root->root_key.objectid,
key.objectid, key.offset - key.objectid, key.offset -
extent_offset); extent_offset, 0);
BUG_ON(ret); BUG_ON(ret);
inode_sub_bytes(inode, inode_sub_bytes(inode,
extent_end - key.offset); extent_end - key.offset);
...@@ -962,7 +962,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, ...@@ -962,7 +962,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid, root->root_key.objectid,
ino, orig_offset); ino, orig_offset, 0);
BUG_ON(ret); BUG_ON(ret);
if (split == start) { if (split == start) {
...@@ -989,7 +989,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, ...@@ -989,7 +989,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
del_nr++; del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes, ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid, 0, root->root_key.objectid,
ino, orig_offset); ino, orig_offset, 0);
BUG_ON(ret); BUG_ON(ret);
} }
other_start = 0; other_start = 0;
...@@ -1006,7 +1006,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, ...@@ -1006,7 +1006,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
del_nr++; del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes, ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid, 0, root->root_key.objectid,
ino, orig_offset); ino, orig_offset, 0);
BUG_ON(ret); BUG_ON(ret);
} }
if (del_nr == 0) { if (del_nr == 0) {
......
...@@ -3179,7 +3179,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -3179,7 +3179,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
ret = btrfs_free_extent(trans, root, extent_start, ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0, extent_num_bytes, 0,
btrfs_header_owner(leaf), btrfs_header_owner(leaf),
ino, extent_offset); ino, extent_offset, 0);
BUG_ON(ret); BUG_ON(ret);
} }
...@@ -5121,7 +5121,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -5121,7 +5121,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
} }
flush_dcache_page(page); flush_dcache_page(page);
} else if (create && PageUptodate(page)) { } else if (create && PageUptodate(page)) {
WARN_ON(1); BUG();
if (!trans) { if (!trans) {
kunmap(page); kunmap(page);
free_extent_map(em); free_extent_map(em);
......
...@@ -368,7 +368,7 @@ static noinline int create_subvol(struct btrfs_root *root, ...@@ -368,7 +368,7 @@ static noinline int create_subvol(struct btrfs_root *root,
return PTR_ERR(trans); return PTR_ERR(trans);
leaf = btrfs_alloc_free_block(trans, root, root->leafsize, leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0); 0, objectid, NULL, 0, 0, 0, 0);
if (IS_ERR(leaf)) { if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf); ret = PTR_ERR(leaf);
goto fail; goto fail;
...@@ -2468,7 +2468,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2468,7 +2468,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
disko, diskl, 0, disko, diskl, 0,
root->root_key.objectid, root->root_key.objectid,
btrfs_ino(inode), btrfs_ino(inode),
new_key.offset - datao); new_key.offset - datao,
0);
BUG_ON(ret); BUG_ON(ret);
} }
} else if (type == BTRFS_FILE_EXTENT_INLINE) { } else if (type == BTRFS_FILE_EXTENT_INLINE) {
...@@ -3018,7 +3019,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3018,7 +3019,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
{ {
int ret = 0; int ret = 0;
int size; int size;
u64 extent_offset; u64 extent_item_pos;
struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL; struct btrfs_data_container *inodes = NULL;
struct btrfs_path *path = NULL; struct btrfs_path *path = NULL;
...@@ -3049,15 +3050,17 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3049,15 +3050,17 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
} }
ret = extent_from_logical(root->fs_info, loi->logical, path, &key); ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
btrfs_release_path(path);
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
ret = -ENOENT; ret = -ENOENT;
if (ret < 0) if (ret < 0)
goto out; goto out;
extent_offset = loi->logical - key.objectid; extent_item_pos = loi->logical - key.objectid;
ret = iterate_extent_inodes(root->fs_info, path, key.objectid, ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
extent_offset, build_ino_list, inodes); extent_item_pos, build_ino_list,
inodes);
if (ret < 0) if (ret < 0)
goto out; goto out;
......
...@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb); ...@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
*/ */
void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
{ {
if (eb->lock_nested) {
read_lock(&eb->lock);
if (eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock);
return;
}
read_unlock(&eb->lock);
}
if (rw == BTRFS_WRITE_LOCK) { if (rw == BTRFS_WRITE_LOCK) {
if (atomic_read(&eb->blocking_writers) == 0) { if (atomic_read(&eb->blocking_writers) == 0) {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); WARN_ON(atomic_read(&eb->spinning_writers) != 1);
...@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
*/ */
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
{ {
if (eb->lock_nested) {
read_lock(&eb->lock);
if (&eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock);
return;
}
read_unlock(&eb->lock);
}
if (rw == BTRFS_WRITE_LOCK_BLOCKING) { if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_writers) != 1); BUG_ON(atomic_read(&eb->blocking_writers) != 1);
write_lock(&eb->lock); write_lock(&eb->lock);
...@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
void btrfs_tree_read_lock(struct extent_buffer *eb) void btrfs_tree_read_lock(struct extent_buffer *eb)
{ {
again: again:
read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner) {
/*
* This extent is already write-locked by our thread. We allow
* an additional read lock to be added because it's for the same
* thread. btrfs_find_all_roots() depends on this as it may be
* called on a partly (write-)locked tree.
*/
BUG_ON(eb->lock_nested);
eb->lock_nested = 1;
read_unlock(&eb->lock);
return;
}
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
read_lock(&eb->lock); read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers)) { if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock); read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
atomic_read(&eb->blocking_writers) == 0);
goto again; goto again;
} }
atomic_inc(&eb->read_locks); atomic_inc(&eb->read_locks);
...@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) ...@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
} }
atomic_inc(&eb->write_locks); atomic_inc(&eb->write_locks);
atomic_inc(&eb->spinning_writers); atomic_inc(&eb->spinning_writers);
eb->lock_owner = current->pid;
return 1; return 1;
} }
...@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) ...@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
*/ */
void btrfs_tree_read_unlock(struct extent_buffer *eb) void btrfs_tree_read_unlock(struct extent_buffer *eb)
{ {
if (eb->lock_nested) {
read_lock(&eb->lock);
if (eb->lock_nested && current->pid == eb->lock_owner) {
eb->lock_nested = 0;
read_unlock(&eb->lock);
return;
}
read_unlock(&eb->lock);
}
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->spinning_readers) == 0); WARN_ON(atomic_read(&eb->spinning_readers) == 0);
atomic_dec(&eb->spinning_readers); atomic_dec(&eb->spinning_readers);
...@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) ...@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
*/ */
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
{ {
if (eb->lock_nested) {
read_lock(&eb->lock);
if (eb->lock_nested && current->pid == eb->lock_owner) {
eb->lock_nested = 0;
read_unlock(&eb->lock);
return;
}
read_unlock(&eb->lock);
}
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0); WARN_ON(atomic_read(&eb->blocking_readers) == 0);
if (atomic_dec_and_test(&eb->blocking_readers)) if (atomic_dec_and_test(&eb->blocking_readers))
...@@ -181,6 +229,7 @@ int btrfs_tree_lock(struct extent_buffer *eb) ...@@ -181,6 +229,7 @@ int btrfs_tree_lock(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->spinning_writers)); WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers); atomic_inc(&eb->spinning_writers);
atomic_inc(&eb->write_locks); atomic_inc(&eb->write_locks);
eb->lock_owner = current->pid;
return 0; return 0;
} }
......
...@@ -1604,12 +1604,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans, ...@@ -1604,12 +1604,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
ret = btrfs_inc_extent_ref(trans, root, new_bytenr, ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
num_bytes, parent, num_bytes, parent,
btrfs_header_owner(leaf), btrfs_header_owner(leaf),
key.objectid, key.offset); key.objectid, key.offset, 1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_free_extent(trans, root, bytenr, num_bytes, ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
parent, btrfs_header_owner(leaf), parent, btrfs_header_owner(leaf),
key.objectid, key.offset); key.objectid, key.offset, 1);
BUG_ON(ret); BUG_ON(ret);
} }
if (dirty) if (dirty)
...@@ -1778,21 +1778,23 @@ int replace_path(struct btrfs_trans_handle *trans, ...@@ -1778,21 +1778,23 @@ int replace_path(struct btrfs_trans_handle *trans,
ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
path->nodes[level]->start, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0); src->root_key.objectid, level - 1, 0,
1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
0, dest->root_key.objectid, level - 1, 0, dest->root_key.objectid, level - 1,
0); 0, 1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
path->nodes[level]->start, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0); src->root_key.objectid, level - 1, 0,
1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1, 0, dest->root_key.objectid, level - 1,
0); 0, 1);
BUG_ON(ret); BUG_ON(ret);
btrfs_unlock_up_safe(path, 0); btrfs_unlock_up_safe(path, 0);
...@@ -2244,7 +2246,7 @@ int merge_reloc_roots(struct reloc_control *rc) ...@@ -2244,7 +2246,7 @@ int merge_reloc_roots(struct reloc_control *rc)
} else { } else {
list_del_init(&reloc_root->root_list); list_del_init(&reloc_root->root_list);
} }
btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
} }
if (found) { if (found) {
...@@ -2558,7 +2560,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, ...@@ -2558,7 +2560,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
node->eb->start, blocksize, node->eb->start, blocksize,
upper->eb->start, upper->eb->start,
btrfs_header_owner(upper->eb), btrfs_header_owner(upper->eb),
node->level, 0); node->level, 0, 1);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_drop_subtree(trans, root, eb, upper->eb); ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
......
...@@ -309,7 +309,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, ...@@ -309,7 +309,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
u8 ref_level; u8 ref_level;
unsigned long ptr = 0; unsigned long ptr = 0;
const int bufsize = 4096; const int bufsize = 4096;
u64 extent_offset; u64 extent_item_pos;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
...@@ -329,12 +329,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, ...@@ -329,12 +329,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
if (ret < 0) if (ret < 0)
goto out; goto out;
extent_offset = swarn.logical - found_key.objectid; extent_item_pos = swarn.logical - found_key.objectid;
swarn.extent_item_size = found_key.offset; swarn.extent_item_size = found_key.offset;
eb = path->nodes[0]; eb = path->nodes[0];
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
item_size = btrfs_item_size_nr(eb, path->slots[0]); item_size = btrfs_item_size_nr(eb, path->slots[0]);
btrfs_release_path(path);
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
do { do {
...@@ -351,7 +352,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, ...@@ -351,7 +352,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
} else { } else {
swarn.path = path; swarn.path = path;
iterate_extent_inodes(fs_info, path, found_key.objectid, iterate_extent_inodes(fs_info, path, found_key.objectid,
extent_offset, extent_item_pos,
scrub_print_warning_inode, &swarn); scrub_print_warning_inode, &swarn);
} }
......
...@@ -36,6 +36,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) ...@@ -36,6 +36,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
WARN_ON(atomic_read(&transaction->use_count) == 0); WARN_ON(atomic_read(&transaction->use_count) == 0);
if (atomic_dec_and_test(&transaction->use_count)) { if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list)); BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
memset(transaction, 0, sizeof(*transaction)); memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction); kmem_cache_free(btrfs_transaction_cachep, transaction);
} }
...@@ -108,8 +110,11 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) ...@@ -108,8 +110,11 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
cur_trans->delayed_refs.num_heads = 0; cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0; cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0; cur_trans->delayed_refs.run_delayed_start = 0;
cur_trans->delayed_refs.seq = 1;
init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
spin_lock_init(&cur_trans->commit_lock); spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock); spin_lock_init(&cur_trans->delayed_refs.lock);
INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
INIT_LIST_HEAD(&cur_trans->pending_snapshots); INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list); list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
...@@ -1386,9 +1391,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) ...@@ -1386,9 +1391,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
if (btrfs_header_backref_rev(root->node) < if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV) BTRFS_MIXED_BACKREF_REV)
btrfs_drop_snapshot(root, NULL, 0); btrfs_drop_snapshot(root, NULL, 0, 0);
else else
btrfs_drop_snapshot(root, NULL, 1); btrfs_drop_snapshot(root, NULL, 1, 0);
} }
return 0; return 0;
} }
...@@ -589,7 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ...@@ -589,7 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_inc_extent_ref(trans, root, ret = btrfs_inc_extent_ref(trans, root,
ins.objectid, ins.offset, ins.objectid, ins.offset,
0, root->root_key.objectid, 0, root->root_key.objectid,
key->objectid, offset); key->objectid, offset, 0);
BUG_ON(ret); BUG_ON(ret);
} else { } else {
/* /*
......
/*
* Copyright (C) 2011 STRATO AG
* written by Arne Jansen <sensille@gmx.net>
* Distributed under the GNU GPL license version 2.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include "ulist.h"
/*
* ulist is a generic data structure to hold a collection of unique u64
* values. The only operations it supports is adding to the list and
* enumerating it.
* It is possible to store an auxiliary value along with the key.
*
* The implementation is preliminary and can probably be sped up
* significantly. A first step would be to store the values in an rbtree
* as soon as ULIST_SIZE is exceeded.
*
* A sample usage for ulists is the enumeration of directed graphs without
* visiting a node twice. The pseudo-code could look like this:
*
* ulist = ulist_alloc();
* ulist_add(ulist, root);
* elem = NULL;
*
* while ((elem = ulist_next(ulist, elem)) {
* for (all child nodes n in elem)
* ulist_add(ulist, n);
* do something useful with the node;
* }
* ulist_free(ulist);
*
* This assumes the graph nodes are adressable by u64. This stems from the
* usage for tree enumeration in btrfs, where the logical addresses are
* 64 bit.
*
* It is also useful for tree enumeration which could be done elegantly
* recursively, but is not possible due to kernel stack limitations. The
* loop would be similar to the above.
*/
/**
* ulist_init - freshly initialize a ulist
* @ulist: the ulist to initialize
*
* Note: don't use this function to init an already used ulist, use
* ulist_reinit instead.
*/
void ulist_init(struct ulist *ulist)
{
ulist->nnodes = 0;
ulist->nodes = ulist->int_nodes;
ulist->nodes_alloced = ULIST_SIZE;
}
EXPORT_SYMBOL(ulist_init);
/**
* ulist_fini - free up additionally allocated memory for the ulist
* @ulist: the ulist from which to free the additional memory
*
* This is useful in cases where the base 'struct ulist' has been statically
* allocated.
*/
void ulist_fini(struct ulist *ulist)
{
/*
* The first ULIST_SIZE elements are stored inline in struct ulist.
* Only if more elements are alocated they need to be freed.
*/
if (ulist->nodes_alloced > ULIST_SIZE)
kfree(ulist->nodes);
ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
}
EXPORT_SYMBOL(ulist_fini);
/**
* ulist_reinit - prepare a ulist for reuse
* @ulist: ulist to be reused
*
* Free up all additional memory allocated for the list elements and reinit
* the ulist.
*/
void ulist_reinit(struct ulist *ulist)
{
ulist_fini(ulist);
ulist_init(ulist);
}
EXPORT_SYMBOL(ulist_reinit);
/**
* ulist_alloc - dynamically allocate a ulist
* @gfp_mask: allocation flags to for base allocation
*
* The allocated ulist will be returned in an initialized state.
*/
struct ulist *ulist_alloc(unsigned long gfp_mask)
{
struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
if (!ulist)
return NULL;
ulist_init(ulist);
return ulist;
}
EXPORT_SYMBOL(ulist_alloc);
/**
* ulist_free - free dynamically allocated ulist
* @ulist: ulist to free
*
* It is not necessary to call ulist_fini before.
*/
void ulist_free(struct ulist *ulist)
{
if (!ulist)
return;
ulist_fini(ulist);
kfree(ulist);
}
EXPORT_SYMBOL(ulist_free);
/**
* ulist_add - add an element to the ulist
* @ulist: ulist to add the element to
* @val: value to add to ulist
* @aux: auxiliary value to store along with val
* @gfp_mask: flags to use for allocation
*
* Note: locking must be provided by the caller. In case of rwlocks write
* locking is needed
*
* Add an element to a ulist. The @val will only be added if it doesn't
* already exist. If it is added, the auxiliary value @aux is stored along with
* it. In case @val already exists in the ulist, @aux is ignored, even if
* it differs from the already stored value.
*
* ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
* inserted.
* In case of allocation failure -ENOMEM is returned and the ulist stays
* unaltered.
*/
int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
unsigned long gfp_mask)
{
int i;
for (i = 0; i < ulist->nnodes; ++i) {
if (ulist->nodes[i].val == val)
return 0;
}
if (ulist->nnodes >= ulist->nodes_alloced) {
u64 new_alloced = ulist->nodes_alloced + 128;
struct ulist_node *new_nodes;
void *old = NULL;
/*
* if nodes_alloced == ULIST_SIZE no memory has been allocated
* yet, so pass NULL to krealloc
*/
if (ulist->nodes_alloced > ULIST_SIZE)
old = ulist->nodes;
new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
gfp_mask);
if (!new_nodes)
return -ENOMEM;
if (!old)
memcpy(new_nodes, ulist->int_nodes,
sizeof(ulist->int_nodes));
ulist->nodes = new_nodes;
ulist->nodes_alloced = new_alloced;
}
ulist->nodes[ulist->nnodes].val = val;
ulist->nodes[ulist->nnodes].aux = aux;
++ulist->nnodes;
return 1;
}
EXPORT_SYMBOL(ulist_add);
/**
* ulist_next - iterate ulist
* @ulist: ulist to iterate
* @prev: previously returned element or %NULL to start iteration
*
* Note: locking must be provided by the caller. In case of rwlocks only read
* locking is needed
*
* This function is used to iterate an ulist. The iteration is started with
* @prev = %NULL. It returns the next element from the ulist or %NULL when the
* end is reached. No guarantee is made with respect to the order in which
* the elements are returned. They might neither be returned in order of
* addition nor in ascending order.
* It is allowed to call ulist_add during an enumeration. Newly added items
* are guaranteed to show up in the running enumeration.
*/
struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
{
int next;
if (ulist->nnodes == 0)
return NULL;
if (!prev)
return &ulist->nodes[0];
next = (prev - ulist->nodes) + 1;
if (next < 0 || next >= ulist->nnodes)
return NULL;
return &ulist->nodes[next];
}
EXPORT_SYMBOL(ulist_next);
/*
* Copyright (C) 2011 STRATO AG
* written by Arne Jansen <sensille@gmx.net>
* Distributed under the GNU GPL license version 2.
*
*/
#ifndef __ULIST__
#define __ULIST__
/*
* ulist is a generic data structure to hold a collection of unique u64
* values. The only operations it supports is adding to the list and
* enumerating it.
* It is possible to store an auxiliary value along with the key.
*
* The implementation is preliminary and can probably be sped up
* significantly. A first step would be to store the values in an rbtree
* as soon as ULIST_SIZE is exceeded.
*/
/*
* number of elements statically allocated inside struct ulist
*/
#define ULIST_SIZE 16
/*
* element of the list
*/
struct ulist_node {
u64 val; /* value to store */
unsigned long aux; /* auxiliary value saved along with the val */
};
struct ulist {
/*
* number of elements stored in list
*/
unsigned long nnodes;
/*
* number of nodes we already have room for
*/
unsigned long nodes_alloced;
/*
* pointer to the array storing the elements. The first ULIST_SIZE
* elements are stored inline. In this case the it points to int_nodes.
* After exceeding ULIST_SIZE, dynamic memory is allocated.
*/
struct ulist_node *nodes;
/*
* inline storage space for the first ULIST_SIZE entries
*/
struct ulist_node int_nodes[ULIST_SIZE];
};
void ulist_init(struct ulist *ulist);
void ulist_fini(struct ulist *ulist);
void ulist_reinit(struct ulist *ulist);
struct ulist *ulist_alloc(unsigned long gfp_mask);
void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
unsigned long gfp_mask);
struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment