Commit c43a6ef9 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: btree_bkey_cached_common

This is prep work for the btree key cache: btree iterators will point to
either struct btree, or a new struct bkey_cached.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 5e82a9a1
...@@ -1529,7 +1529,7 @@ static bool flush_held_btree_writes(struct bch_fs *c) ...@@ -1529,7 +1529,7 @@ static bool flush_held_btree_writes(struct bch_fs *c)
rcu_read_unlock(); rcu_read_unlock();
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write(c, b, SIX_LOCK_read); bch2_btree_node_write(c, b, SIX_LOCK_read);
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
goto again; goto again;
} else { } else {
nodes_unwritten = true; nodes_unwritten = true;
......
...@@ -27,7 +27,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c) ...@@ -27,7 +27,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++) for (i = 0; i < BTREE_ID_NR; i++)
if (c->btree_roots[i].b) if (c->btree_roots[i].b)
reserve += min_t(unsigned, 1, reserve += min_t(unsigned, 1,
c->btree_roots[i].b->level) * 8; c->btree_roots[i].b->c.level) * 8;
c->btree_cache.reserve = reserve; c->btree_cache.reserve = reserve;
} }
...@@ -98,8 +98,8 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) ...@@ -98,8 +98,8 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL; return NULL;
bkey_btree_ptr_init(&b->key); bkey_btree_ptr_init(&b->key);
six_lock_init(&b->lock); six_lock_init(&b->c.lock);
lockdep_set_novalidate_class(&b->lock); lockdep_set_novalidate_class(&b->c.lock);
INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked); INIT_LIST_HEAD(&b->write_blocked);
...@@ -128,8 +128,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, ...@@ -128,8 +128,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
{ {
int ret; int ret;
b->level = level; b->c.level = level;
b->btree_id = id; b->c.btree_id = id;
mutex_lock(&bc->lock); mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b); ret = __bch2_btree_node_hash_insert(bc, b);
...@@ -159,10 +159,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) ...@@ -159,10 +159,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
lockdep_assert_held(&bc->lock); lockdep_assert_held(&bc->lock);
if (!six_trylock_intent(&b->lock)) if (!six_trylock_intent(&b->c.lock))
return -ENOMEM; return -ENOMEM;
if (!six_trylock_write(&b->lock)) if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent; goto out_unlock_intent;
if (btree_node_noevict(b)) if (btree_node_noevict(b))
...@@ -203,9 +203,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) ...@@ -203,9 +203,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
trace_btree_node_reap(c, b); trace_btree_node_reap(c, b);
return ret; return ret;
out_unlock: out_unlock:
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
out_unlock_intent: out_unlock_intent:
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -263,8 +263,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -263,8 +263,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
if (++i > 3 && if (++i > 3 &&
!btree_node_reclaim(c, b)) { !btree_node_reclaim(c, b)) {
btree_node_data_free(c, b); btree_node_data_free(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
freed++; freed++;
} }
} }
...@@ -290,8 +290,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -290,8 +290,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
bch2_btree_node_hash_remove(bc, b); bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
if (freed >= nr) if (freed >= nr)
goto out; goto out;
...@@ -530,8 +530,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c) ...@@ -530,8 +530,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
if (b->data) if (b->data)
goto out_unlock; goto out_unlock;
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
goto err; goto err;
} }
...@@ -539,8 +539,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c) ...@@ -539,8 +539,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
if (!b) if (!b)
goto err; goto err;
BUG_ON(!six_trylock_intent(&b->lock)); BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->lock)); BUG_ON(!six_trylock_write(&b->c.lock));
out_unlock: out_unlock:
BUG_ON(btree_node_hashed(b)); BUG_ON(btree_node_hashed(b));
BUG_ON(btree_node_write_in_flight(b)); BUG_ON(btree_node_write_in_flight(b));
...@@ -611,8 +611,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -611,8 +611,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
list_add(&b->list, &bc->freeable); list_add(&b->list, &bc->freeable);
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
return NULL; return NULL;
} }
...@@ -630,15 +630,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -630,15 +630,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
bch2_btree_node_read(c, b, sync); bch2_btree_node_read(c, b, sync);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
if (!sync) { if (!sync) {
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
return NULL; return NULL;
} }
if (lock_type == SIX_LOCK_read) if (lock_type == SIX_LOCK_read)
six_lock_downgrade(&b->lock); six_lock_downgrade(&b->c.lock);
return b; return b;
} }
...@@ -727,9 +727,9 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter, ...@@ -727,9 +727,9 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) || if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
b->level != level || b->c.level != level ||
race_fault())) { race_fault())) {
six_unlock_type(&b->lock, lock_type); six_unlock_type(&b->c.lock, lock_type);
if (bch2_btree_node_relock(iter, level + 1)) if (bch2_btree_node_relock(iter, level + 1))
goto retry; goto retry;
...@@ -758,11 +758,11 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter, ...@@ -758,11 +758,11 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
set_btree_node_accessed(b); set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) { if (unlikely(btree_node_read_error(b))) {
six_unlock_type(&b->lock, lock_type); six_unlock_type(&b->c.lock, lock_type);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
EBUG_ON(b->btree_id != iter->btree_id || EBUG_ON(b->c.btree_id != iter->btree_id ||
BTREE_NODE_LEVEL(b->data) != level || BTREE_NODE_LEVEL(b->data) != level ||
bkey_cmp(b->data->max_key, k->k.p)); bkey_cmp(b->data->max_key, k->k.p));
...@@ -780,7 +780,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c, ...@@ -780,7 +780,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
struct bkey_packed *k; struct bkey_packed *k;
BKEY_PADDED(k) tmp; BKEY_PADDED(k) tmp;
struct btree *ret = NULL; struct btree *ret = NULL;
unsigned level = b->level; unsigned level = b->c.level;
parent = btree_iter_node(iter, level + 1); parent = btree_iter_node(iter, level + 1);
if (!parent) if (!parent)
...@@ -789,7 +789,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c, ...@@ -789,7 +789,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
if (!bch2_btree_node_relock(iter, level + 1)) if (!bch2_btree_node_relock(iter, level + 1))
goto out_upgrade; goto out_upgrade;
node_iter = iter->l[parent->level].iter; node_iter = iter->l[parent->c.level].iter;
k = bch2_btree_node_iter_peek_all(&node_iter, parent); k = bch2_btree_node_iter_peek_all(&node_iter, parent);
BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p)); BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
...@@ -836,7 +836,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c, ...@@ -836,7 +836,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK); btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
if (!IS_ERR(ret)) { if (!IS_ERR(ret)) {
six_unlock_intent(&ret->lock); six_unlock_intent(&ret->c.lock);
ret = ERR_PTR(-EINTR); ret = ERR_PTR(-EINTR);
} }
} }
...@@ -859,7 +859,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c, ...@@ -859,7 +859,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
if (sib != btree_prev_sib) if (sib != btree_prev_sib)
swap(n1, n2); swap(n1, n2);
BUG_ON(bkey_cmp(btree_type_successor(n1->btree_id, BUG_ON(bkey_cmp(btree_type_successor(n1->c.btree_id,
n1->key.k.p), n1->key.k.p),
n2->data->min_key)); n2->data->min_key));
} }
...@@ -904,7 +904,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, ...@@ -904,7 +904,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
pr_buf(out, pr_buf(out,
"l %u %llu:%llu - %llu:%llu:\n" "l %u %llu:%llu - %llu:%llu:\n"
" ptrs: ", " ptrs: ",
b->level, b->c.level,
b->data->min_key.inode, b->data->min_key.inode,
b->data->min_key.offset, b->data->min_key.offset,
b->data->max_key.inode, b->data->max_key.inode,
......
...@@ -83,7 +83,7 @@ static inline unsigned btree_blocks(struct bch_fs *c) ...@@ -83,7 +83,7 @@ static inline unsigned btree_blocks(struct bch_fs *c)
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \ (BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2)) (BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2))
#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->btree_id].b) #define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->c.btree_id].b)
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
struct btree *); struct btree *);
......
...@@ -71,10 +71,10 @@ static void btree_node_range_checks_init(struct range_checks *r, unsigned depth) ...@@ -71,10 +71,10 @@ static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
static void btree_node_range_checks(struct bch_fs *c, struct btree *b, static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
struct range_checks *r) struct range_checks *r)
{ {
struct range_level *l = &r->l[b->level]; struct range_level *l = &r->l[b->c.level];
struct bpos expected_min = bkey_cmp(l->min, l->max) struct bpos expected_min = bkey_cmp(l->min, l->max)
? btree_type_successor(b->btree_id, l->max) ? btree_type_successor(b->c.btree_id, l->max)
: l->max; : l->max;
bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c, bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
...@@ -86,8 +86,8 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b, ...@@ -86,8 +86,8 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
l->max = b->data->max_key; l->max = b->data->max_key;
if (b->level > r->depth) { if (b->c.level > r->depth) {
l = &r->l[b->level - 1]; l = &r->l[b->c.level - 1];
bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c, bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
"btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu", "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
...@@ -105,7 +105,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b, ...@@ -105,7 +105,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
if (bkey_cmp(b->data->max_key, POS_MAX)) if (bkey_cmp(b->data->max_key, POS_MAX))
l->min = l->max = l->min = l->max =
btree_type_successor(b->btree_id, btree_type_successor(b->c.btree_id,
b->data->max_key); b->data->max_key);
} }
} }
...@@ -261,7 +261,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, ...@@ -261,7 +261,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (!btree_node_fake(b)) if (!btree_node_fake(b))
ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key), ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, initial); &max_stale, initial);
gc_pos_set(c, gc_pos_btree_root(b->btree_id)); gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
mutex_unlock(&c->btree_root_lock); mutex_unlock(&c->btree_root_lock);
return ret; return ret;
...@@ -932,9 +932,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -932,9 +932,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
set_btree_bset_end(n1, n1->set); set_btree_bset_end(n1, n1->set);
six_unlock_write(&n2->lock); six_unlock_write(&n2->c.lock);
bch2_btree_node_free_never_inserted(c, n2); bch2_btree_node_free_never_inserted(c, n2);
six_unlock_intent(&n2->lock); six_unlock_intent(&n2->c.lock);
memmove(new_nodes + i - 1, memmove(new_nodes + i - 1,
new_nodes + i, new_nodes + i,
...@@ -970,7 +970,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -970,7 +970,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
btree_node_reset_sib_u64s(n); btree_node_reset_sib_u64s(n);
bch2_btree_build_aux_trees(n); bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock); six_unlock_write(&n->c.lock);
bch2_btree_node_write(c, n, SIX_LOCK_intent); bch2_btree_node_write(c, n, SIX_LOCK_intent);
} }
...@@ -1013,7 +1013,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -1013,7 +1013,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
BUG_ON(!bch2_keylist_empty(&keylist)); BUG_ON(!bch2_keylist_empty(&keylist));
BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]); BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]);
bch2_btree_iter_node_replace(iter, new_nodes[0]); bch2_btree_iter_node_replace(iter, new_nodes[0]);
...@@ -1035,7 +1035,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -1035,7 +1035,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
} else { } else {
old_nodes[i] = NULL; old_nodes[i] = NULL;
if (new_nodes[i]) if (new_nodes[i])
six_unlock_intent(&new_nodes[i]->lock); six_unlock_intent(&new_nodes[i]->c.lock);
} }
} }
...@@ -1078,11 +1078,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) ...@@ -1078,11 +1078,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
for (i = 1; i < GC_MERGE_NODES; i++) { for (i = 1; i < GC_MERGE_NODES; i++) {
if (!merge[i] || if (!merge[i] ||
!six_relock_intent(&merge[i]->lock, lock_seq[i])) !six_relock_intent(&merge[i]->c.lock, lock_seq[i]))
break; break;
if (merge[i]->level != merge[0]->level) { if (merge[i]->c.level != merge[0]->c.level) {
six_unlock_intent(&merge[i]->lock); six_unlock_intent(&merge[i]->c.lock);
break; break;
} }
} }
...@@ -1091,11 +1091,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) ...@@ -1091,11 +1091,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
bch2_coalesce_nodes(c, iter, merge); bch2_coalesce_nodes(c, iter, merge);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) { for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
lock_seq[i] = merge[i]->lock.state.seq; lock_seq[i] = merge[i]->c.lock.state.seq;
six_unlock_intent(&merge[i]->lock); six_unlock_intent(&merge[i]->c.lock);
} }
lock_seq[0] = merge[0]->lock.state.seq; lock_seq[0] = merge[0]->c.lock.state.seq;
if (kthread && kthread_should_stop()) { if (kthread && kthread_should_stop()) {
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
......
...@@ -81,7 +81,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id, ...@@ -81,7 +81,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id,
*/ */
static inline struct gc_pos gc_pos_btree_node(struct btree *b) static inline struct gc_pos gc_pos_btree_node(struct btree *b)
{ {
return gc_pos_btree(b->btree_id, b->key.k.p, b->level); return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
} }
/* /*
......
...@@ -473,8 +473,8 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b, ...@@ -473,8 +473,8 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
struct btree_node_entry *bne; struct btree_node_entry *bne;
bool did_sort; bool did_sort;
EBUG_ON(!(b->lock.state.seq & 1)); EBUG_ON(!(b->c.lock.state.seq & 1));
EBUG_ON(iter && iter->l[b->level].b != b); EBUG_ON(iter && iter->l[b->c.level].b != b);
did_sort = btree_node_compact(c, b, iter); did_sort = btree_node_compact(c, b, iter);
...@@ -524,8 +524,8 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, ...@@ -524,8 +524,8 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
"at btree %u level %u/%u\n" "at btree %u level %u/%u\n"
"pos %llu:%llu node offset %u", "pos %llu:%llu node offset %u",
write ? "before write " : "", write ? "before write " : "",
b->btree_id, b->level, b->c.btree_id, b->c.level,
c->btree_roots[b->btree_id].level, c->btree_roots[b->c.btree_id].level,
b->key.k.p.inode, b->key.k.p.offset, b->key.k.p.inode, b->key.k.p.offset,
b->written); b->written);
if (i) if (i)
...@@ -610,11 +610,11 @@ static int validate_bset(struct bch_fs *c, struct btree *b, ...@@ -610,11 +610,11 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
if (i == &b->data->keys) { if (i == &b->data->keys) {
/* These indicate that we read the wrong btree node: */ /* These indicate that we read the wrong btree node: */
btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id, btree_err_on(BTREE_NODE_ID(b->data) != b->c.btree_id,
BTREE_ERR_MUST_RETRY, c, b, i, BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect btree id"); "incorrect btree id");
btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level, btree_err_on(BTREE_NODE_LEVEL(b->data) != b->c.level,
BTREE_ERR_MUST_RETRY, c, b, i, BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect level"); "incorrect level");
...@@ -1105,8 +1105,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, ...@@ -1105,8 +1105,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
bch2_btree_set_root_for_read(c, b); bch2_btree_set_root_for_read(c, b);
err: err:
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
return ret; return ret;
} }
...@@ -1153,15 +1153,15 @@ static void bch2_btree_node_write_error(struct bch_fs *c, ...@@ -1153,15 +1153,15 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
bch2_trans_init(&trans, c); bch2_trans_init(&trans, c);
iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p, iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->level, 0); BTREE_MAX_DEPTH, b->c.level, 0);
retry: retry:
ret = bch2_btree_iter_traverse(iter); ret = bch2_btree_iter_traverse(iter);
if (ret) if (ret)
goto err; goto err;
/* has node been freed? */ /* has node been freed? */
if (iter->l[b->level].b != b) { if (iter->l[b->c.level].b != b) {
/* node has been freed: */ /* node has been freed: */
BUG_ON(!btree_node_dying(b)); BUG_ON(!btree_node_dying(b));
goto out; goto out;
...@@ -1359,9 +1359,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, ...@@ -1359,9 +1359,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
* doing btree writes: * doing btree writes:
*/ */
if (lock_type_held == SIX_LOCK_intent && if (lock_type_held == SIX_LOCK_intent &&
six_trylock_write(&b->lock)) { six_trylock_write(&b->c.lock)) {
__bch2_compact_whiteouts(c, b, COMPACT_WRITTEN); __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
} else { } else {
__bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK); __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
} }
...@@ -1606,18 +1606,18 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, ...@@ -1606,18 +1606,18 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(lock_type_held == SIX_LOCK_write); BUG_ON(lock_type_held == SIX_LOCK_write);
if (lock_type_held == SIX_LOCK_intent || if (lock_type_held == SIX_LOCK_intent ||
six_lock_tryupgrade(&b->lock)) { six_lock_tryupgrade(&b->c.lock)) {
__bch2_btree_node_write(c, b, SIX_LOCK_intent); __bch2_btree_node_write(c, b, SIX_LOCK_intent);
/* don't cycle lock unnecessarily: */ /* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) && if (btree_node_just_written(b) &&
six_trylock_write(&b->lock)) { six_trylock_write(&b->c.lock)) {
bch2_btree_post_write_cleanup(c, b); bch2_btree_post_write_cleanup(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
} }
if (lock_type_held == SIX_LOCK_read) if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->lock); six_lock_downgrade(&b->c.lock);
} else { } else {
__bch2_btree_node_write(c, b, SIX_LOCK_read); __bch2_btree_node_write(c, b, SIX_LOCK_read);
} }
...@@ -1688,7 +1688,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf) ...@@ -1688,7 +1688,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
b, b,
(flags & (1 << BTREE_NODE_dirty)) != 0, (flags & (1 << BTREE_NODE_dirty)) != 0,
(flags & (1 << BTREE_NODE_need_write)) != 0, (flags & (1 << BTREE_NODE_need_write)) != 0,
b->level, b->c.level,
b->written, b->written,
!list_empty_careful(&b->write_blocked), !list_empty_careful(&b->write_blocked),
b->will_make_reachable != 0, b->will_make_reachable != 0,
......
...@@ -111,7 +111,7 @@ static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b) ...@@ -111,7 +111,7 @@ static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b)
break; break;
} }
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
btree_node_wait_on_io(b); btree_node_wait_on_io(b);
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
} }
......
...@@ -54,7 +54,7 @@ static inline int btree_iter_pos_cmp(struct btree_iter *iter, ...@@ -54,7 +54,7 @@ static inline int btree_iter_pos_cmp(struct btree_iter *iter,
const struct btree *b, const struct btree *b,
const struct bkey_packed *k) const struct bkey_packed *k)
{ {
return __btree_iter_pos_cmp(iter, b, k, b->level != 0); return __btree_iter_pos_cmp(iter, b, k, b->c.level != 0);
} }
/* Btree node locking: */ /* Btree node locking: */
...@@ -67,13 +67,13 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter) ...@@ -67,13 +67,13 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
{ {
struct btree_iter *linked; struct btree_iter *linked;
EBUG_ON(iter->l[b->level].b != b); EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq); EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_iter_with_node(iter->trans, b, linked) trans_for_each_iter_with_node(iter->trans, b, linked)
linked->l[b->level].lock_seq += 2; linked->l[b->c.level].lock_seq += 2;
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
} }
void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
...@@ -81,11 +81,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) ...@@ -81,11 +81,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked; struct btree_iter *linked;
unsigned readers = 0; unsigned readers = 0;
EBUG_ON(btree_node_read_locked(iter, b->level)); EBUG_ON(btree_node_read_locked(iter, b->c.level));
trans_for_each_iter(iter->trans, linked) trans_for_each_iter(iter->trans, linked)
if (linked->l[b->level].b == b && if (linked->l[b->c.level].b == b &&
btree_node_read_locked(linked, b->level)) btree_node_read_locked(linked, b->c.level))
readers++; readers++;
/* /*
...@@ -95,10 +95,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) ...@@ -95,10 +95,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
* locked: * locked:
*/ */
atomic64_sub(__SIX_VAL(read_lock, readers), atomic64_sub(__SIX_VAL(read_lock, readers),
&b->lock.state.counter); &b->c.lock.state.counter);
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write); btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
atomic64_add(__SIX_VAL(read_lock, readers), atomic64_add(__SIX_VAL(read_lock, readers),
&b->lock.state.counter); &b->c.lock.state.counter);
} }
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
...@@ -112,8 +112,8 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) ...@@ -112,8 +112,8 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
if (race_fault()) if (race_fault())
return false; return false;
if (!six_relock_type(&b->lock, want, iter->l[level].lock_seq) && if (!six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) &&
!(iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 && !(iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1 &&
btree_node_lock_increment(iter, b, level, want))) btree_node_lock_increment(iter, b, level, want)))
return false; return false;
...@@ -137,11 +137,11 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level) ...@@ -137,11 +137,11 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
return false; return false;
if (btree_node_locked(iter, level) if (btree_node_locked(iter, level)
? six_lock_tryupgrade(&b->lock) ? six_lock_tryupgrade(&b->c.lock)
: six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq)) : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
goto success; goto success;
if (iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 && if (iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1 &&
btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) { btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(iter, level); btree_node_unlock(iter, level);
goto success; goto success;
...@@ -378,7 +378,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter, ...@@ -378,7 +378,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
btree_node_unlock(linked, l); btree_node_unlock(linked, l);
} else { } else {
if (btree_node_intent_locked(linked, l)) { if (btree_node_intent_locked(linked, l)) {
six_lock_downgrade(&linked->l[l].b->lock); six_lock_downgrade(&linked->l[l].b->c.lock);
linked->nodes_intent_locked ^= 1 << l; linked->nodes_intent_locked ^= 1 << l;
} }
break; break;
...@@ -427,7 +427,7 @@ void bch2_btree_trans_unlock(struct btree_trans *trans) ...@@ -427,7 +427,7 @@ void bch2_btree_trans_unlock(struct btree_trans *trans)
static void __bch2_btree_iter_verify(struct btree_iter *iter, static void __bch2_btree_iter_verify(struct btree_iter *iter,
struct btree *b) struct btree *b)
{ {
struct btree_iter_level *l = &iter->l[b->level]; struct btree_iter_level *l = &iter->l[b->c.level];
struct btree_node_iter tmp = l->iter; struct btree_node_iter tmp = l->iter;
struct bkey_packed *k; struct bkey_packed *k;
...@@ -446,7 +446,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter, ...@@ -446,7 +446,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
* For extents, the iterator may have skipped past deleted keys (but not * For extents, the iterator may have skipped past deleted keys (but not
* whiteouts) * whiteouts)
*/ */
k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS k = b->c.level || iter->flags & BTREE_ITER_IS_EXTENTS
? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard) ? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
: bch2_btree_node_iter_prev_all(&tmp, b); : bch2_btree_node_iter_prev_all(&tmp, b);
if (k && btree_iter_pos_cmp(iter, b, k) > 0) { if (k && btree_iter_pos_cmp(iter, b, k) > 0) {
...@@ -519,7 +519,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -519,7 +519,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
bch2_btree_node_iter_push(node_iter, b, where, end); bch2_btree_node_iter_push(node_iter, b, where, end);
if (!b->level && if (!b->c.level &&
node_iter == &iter->l[0].iter) node_iter == &iter->l[0].iter)
bkey_disassemble(b, bkey_disassemble(b,
bch2_btree_node_iter_peek_all(node_iter, b), bch2_btree_node_iter_peek_all(node_iter, b),
...@@ -548,7 +548,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -548,7 +548,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_node_iter_sort(node_iter, b); bch2_btree_node_iter_sort(node_iter, b);
if (!b->level && node_iter == &iter->l[0].iter) { if (!b->c.level && node_iter == &iter->l[0].iter) {
/* /*
* not legal to call bkey_debugcheck() here, because we're * not legal to call bkey_debugcheck() here, because we're
* called midway through the update path after update has been * called midway through the update path after update has been
...@@ -590,7 +590,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -590,7 +590,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
* always point to the key for the child node the btree iterator points * always point to the key for the child node the btree iterator points
* to. * to.
*/ */
if (b->level && new_u64s && if (b->c.level && new_u64s &&
btree_iter_pos_cmp(iter, b, where) > 0) { btree_iter_pos_cmp(iter, b, where) > 0) {
struct bset_tree *t, *where_set = bch2_bkey_to_bset_inlined(b, where); struct bset_tree *t, *where_set = bch2_bkey_to_bset_inlined(b, where);
struct bkey_packed *k; struct bkey_packed *k;
...@@ -633,13 +633,13 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -633,13 +633,13 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where); struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
struct btree_iter *linked; struct btree_iter *linked;
if (node_iter != &iter->l[b->level].iter) if (node_iter != &iter->l[b->c.level].iter)
__bch2_btree_node_iter_fix(iter, b, node_iter, t, __bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s); where, clobber_u64s, new_u64s);
trans_for_each_iter_with_node(iter->trans, b, linked) trans_for_each_iter_with_node(iter->trans, b, linked)
__bch2_btree_node_iter_fix(linked, b, __bch2_btree_node_iter_fix(linked, b,
&linked->l[b->level].iter, t, &linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s); where, clobber_u64s, new_u64s);
} }
...@@ -715,7 +715,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) ...@@ -715,7 +715,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return; return;
plevel = b->level + 1; plevel = b->c.level + 1;
if (!btree_iter_node(iter, plevel)) if (!btree_iter_node(iter, plevel))
return; return;
...@@ -738,7 +738,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) ...@@ -738,7 +738,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
} }
if (!parent_locked) if (!parent_locked)
btree_node_unlock(iter, b->level + 1); btree_node_unlock(iter, b->c.level + 1);
} }
static inline bool btree_iter_pos_after_node(struct btree_iter *iter, static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
...@@ -751,7 +751,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter, ...@@ -751,7 +751,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
static inline bool btree_iter_pos_in_node(struct btree_iter *iter, static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
struct btree *b) struct btree *b)
{ {
return iter->btree_id == b->btree_id && return iter->btree_id == b->c.btree_id &&
bkey_cmp(iter->pos, b->data->min_key) >= 0 && bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
!btree_iter_pos_after_node(iter, b); !btree_iter_pos_after_node(iter, b);
} }
...@@ -779,11 +779,11 @@ static inline void btree_iter_node_set(struct btree_iter *iter, ...@@ -779,11 +779,11 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
btree_iter_verify_new_node(iter, b); btree_iter_verify_new_node(iter, b);
EBUG_ON(!btree_iter_pos_in_node(iter, b)); EBUG_ON(!btree_iter_pos_in_node(iter, b));
EBUG_ON(b->lock.state.seq & 1); EBUG_ON(b->c.lock.state.seq & 1);
iter->l[b->level].lock_seq = b->lock.state.seq; iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
iter->l[b->level].b = b; iter->l[b->c.level].b = b;
__btree_iter_init(iter, b->level); __btree_iter_init(iter, b->c.level);
} }
/* /*
...@@ -802,24 +802,24 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) ...@@ -802,24 +802,24 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
* the old node we're replacing has already been * the old node we're replacing has already been
* unlocked and the pointer invalidated * unlocked and the pointer invalidated
*/ */
BUG_ON(btree_node_locked(linked, b->level)); BUG_ON(btree_node_locked(linked, b->c.level));
t = btree_lock_want(linked, b->level); t = btree_lock_want(linked, b->c.level);
if (t != BTREE_NODE_UNLOCKED) { if (t != BTREE_NODE_UNLOCKED) {
six_lock_increment(&b->lock, (enum six_lock_type) t); six_lock_increment(&b->c.lock, (enum six_lock_type) t);
mark_btree_node_locked(linked, b->level, (enum six_lock_type) t); mark_btree_node_locked(linked, b->c.level, (enum six_lock_type) t);
} }
btree_iter_node_set(linked, b); btree_iter_node_set(linked, b);
} }
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
} }
void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{ {
struct btree_iter *linked; struct btree_iter *linked;
unsigned level = b->level; unsigned level = b->c.level;
trans_for_each_iter(iter->trans, linked) trans_for_each_iter(iter->trans, linked)
if (linked->l[level].b == b) { if (linked->l[level].b == b) {
...@@ -837,7 +837,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b) ...@@ -837,7 +837,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked; struct btree_iter *linked;
trans_for_each_iter_with_node(iter->trans, b, linked) trans_for_each_iter_with_node(iter->trans, b, linked)
__btree_iter_init(linked, b->level); __btree_iter_init(linked, b->c.level);
} }
static inline int btree_iter_lock_root(struct btree_iter *iter, static inline int btree_iter_lock_root(struct btree_iter *iter,
...@@ -852,7 +852,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, ...@@ -852,7 +852,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
while (1) { while (1) {
b = READ_ONCE(c->btree_roots[iter->btree_id].b); b = READ_ONCE(c->btree_roots[iter->btree_id].b);
iter->level = READ_ONCE(b->level); iter->level = READ_ONCE(b->c.level);
if (unlikely(iter->level < depth_want)) { if (unlikely(iter->level < depth_want)) {
/* /*
...@@ -872,7 +872,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, ...@@ -872,7 +872,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
return -EINTR; return -EINTR;
if (likely(b == c->btree_roots[iter->btree_id].b && if (likely(b == c->btree_roots[iter->btree_id].b &&
b->level == iter->level && b->c.level == iter->level &&
!race_fault())) { !race_fault())) {
for (i = 0; i < iter->level; i++) for (i = 0; i < iter->level; i++)
iter->l[i].b = BTREE_ITER_NOT_END; iter->l[i].b = BTREE_ITER_NOT_END;
...@@ -884,7 +884,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, ...@@ -884,7 +884,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
} }
six_unlock_type(&b->lock, lock_type); six_unlock_type(&b->c.lock, lock_type);
} }
} }
...@@ -1842,7 +1842,7 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans, ...@@ -1842,7 +1842,7 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
for (i = 0; i < BTREE_MAX_DEPTH; i++) for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(iter, i)) if (btree_node_locked(iter, i))
six_lock_increment(&iter->l[i].b->lock, six_lock_increment(&iter->l[i].b->c.lock,
__btree_lock_want(iter, i)); __btree_lock_want(iter, i));
return &trans->iters[idx]; return &trans->iters[idx];
......
...@@ -17,10 +17,23 @@ static inline struct btree *btree_iter_node(struct btree_iter *iter, ...@@ -17,10 +17,23 @@ static inline struct btree *btree_iter_node(struct btree_iter *iter,
return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL; return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
} }
static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
const struct btree *b, unsigned level)
{
/*
* We don't compare the low bits of the lock sequence numbers because
* @iter might have taken a write lock on @b, and we don't want to skip
* the linked iterator if the sequence numbers were equal before taking
* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
static inline struct btree *btree_node_parent(struct btree_iter *iter, static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b) struct btree *b)
{ {
return btree_iter_node(iter, b->level + 1); return btree_iter_node(iter, b->c.level + 1);
} }
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans) static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
...@@ -55,16 +68,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx) ...@@ -55,16 +68,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
static inline bool __iter_has_node(const struct btree_iter *iter, static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b) const struct btree *b)
{ {
/* return iter->l[b->c.level].b == b &&
* We don't compare the low bits of the lock sequence numbers because btree_node_lock_seq_matches(iter, b, b->c.level);
* @iter might have taken a write lock on @b, and we don't want to skip
* the linked iterator if the sequence numbers were equal before taking
* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
return iter->l[b->level].b == b &&
iter->l[b->level].lock_seq >> 1 == b->lock.state.seq >> 1;
} }
static inline struct btree_iter * static inline struct btree_iter *
......
...@@ -101,7 +101,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level) ...@@ -101,7 +101,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED) if (lock_type != BTREE_NODE_UNLOCKED)
six_unlock_type(&iter->l[level].b->lock, lock_type); six_unlock_type(&iter->l[level].b->c.lock, lock_type);
mark_btree_node_unlocked(iter, level); mark_btree_node_unlocked(iter, level);
} }
...@@ -142,14 +142,14 @@ static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b, ...@@ -142,14 +142,14 @@ static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
{ {
u64 start_time = local_clock(); u64 start_time = local_clock();
six_lock_type(&b->lock, type, NULL, NULL); six_lock_type(&b->c.lock, type, NULL, NULL);
bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time); bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
} }
static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b, static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
enum six_lock_type type) enum six_lock_type type)
{ {
if (!six_trylock_type(&b->lock, type)) if (!six_trylock_type(&b->c.lock, type))
__btree_node_lock_type(c, b, type); __btree_node_lock_type(c, b, type);
} }
...@@ -167,7 +167,7 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter, ...@@ -167,7 +167,7 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
if (linked != iter && if (linked != iter &&
linked->l[level].b == b && linked->l[level].b == b &&
btree_node_locked_type(linked, level) >= want) { btree_node_locked_type(linked, level) >= want) {
six_lock_increment(&b->lock, want); six_lock_increment(&b->c.lock, want);
return true; return true;
} }
...@@ -185,7 +185,7 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos, ...@@ -185,7 +185,7 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos,
{ {
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
return likely(six_trylock_type(&b->lock, type)) || return likely(six_trylock_type(&b->c.lock, type)) ||
btree_node_lock_increment(iter, b, level, type) || btree_node_lock_increment(iter, b, level, type) ||
__bch2_btree_node_lock(b, pos, level, iter, __bch2_btree_node_lock(b, pos, level, iter,
type, may_drop_locks); type, may_drop_locks);
...@@ -210,10 +210,10 @@ void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *); ...@@ -210,10 +210,10 @@ void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{ {
EBUG_ON(iter->l[b->level].b != b); EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq); EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
if (!six_trylock_write(&b->lock)) if (!six_trylock_write(&b->c.lock))
__bch2_btree_node_lock_write(b, iter); __bch2_btree_node_lock_write(b, iter);
} }
......
...@@ -60,19 +60,22 @@ struct btree_alloc { ...@@ -60,19 +60,22 @@ struct btree_alloc {
BKEY_PADDED(k); BKEY_PADDED(k);
}; };
struct btree_bkey_cached_common {
struct six_lock lock;
u8 level;
u8 btree_id;
};
struct btree { struct btree {
/* Hottest entries first */ struct btree_bkey_cached_common c;
struct rhash_head hash; struct rhash_head hash;
/* Key/pointer for this btree node */ /* Key/pointer for this btree node */
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
struct six_lock lock;
unsigned long flags; unsigned long flags;
u16 written; u16 written;
u8 level;
u8 btree_id;
u8 nsets; u8 nsets;
u8 nr_key_bits; u8 nr_key_bits;
...@@ -451,7 +454,7 @@ static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_ ...@@ -451,7 +454,7 @@ static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_
/* Type of keys @b contains: */ /* Type of keys @b contains: */
static inline enum btree_node_type btree_node_type(struct btree *b) static inline enum btree_node_type btree_node_type(struct btree *b)
{ {
return __btree_node_type(b->level, b->btree_id); return __btree_node_type(b->c.level, b->c.btree_id);
} }
static inline bool btree_node_type_is_extents(enum btree_node_type type) static inline bool btree_node_type_is_extents(enum btree_node_type type)
......
...@@ -33,7 +33,7 @@ static void btree_node_interior_verify(struct btree *b) ...@@ -33,7 +33,7 @@ static void btree_node_interior_verify(struct btree *b)
struct btree_node_iter iter; struct btree_node_iter iter;
struct bkey_packed *k; struct bkey_packed *k;
BUG_ON(!b->level); BUG_ON(!b->c.level);
bch2_btree_node_iter_init(&iter, b, &b->key.k.p); bch2_btree_node_iter_init(&iter, b, &b->key.k.p);
#if 1 #if 1
...@@ -229,7 +229,7 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b) ...@@ -229,7 +229,7 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
btree_node_lock_type(c, b, SIX_LOCK_write); btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b); __btree_node_free(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
bch2_open_buckets_put(c, &ob); bch2_open_buckets_put(c, &ob);
} }
...@@ -240,7 +240,7 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b, ...@@ -240,7 +240,7 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
struct btree_iter *linked; struct btree_iter *linked;
trans_for_each_iter(iter->trans, linked) trans_for_each_iter(iter->trans, linked)
BUG_ON(linked->l[b->level].b == b); BUG_ON(linked->l[b->c.level].b == b);
/* /*
* Is this a node that isn't reachable on disk yet? * Is this a node that isn't reachable on disk yet?
...@@ -253,10 +253,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b, ...@@ -253,10 +253,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
*/ */
btree_update_drop_new_node(c, b); btree_update_drop_new_node(c, b);
six_lock_write(&b->lock, NULL, NULL); six_lock_write(&b->c.lock, NULL, NULL);
__btree_node_free(c, b); __btree_node_free(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
} }
static void bch2_btree_node_free_ondisk(struct bch_fs *c, static void bch2_btree_node_free_ondisk(struct bch_fs *c,
...@@ -387,7 +387,7 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as, ...@@ -387,7 +387,7 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
{ {
struct btree *n; struct btree *n;
n = bch2_btree_node_alloc(as, b->level); n = bch2_btree_node_alloc(as, b->c.level);
n->data->min_key = b->data->min_key; n->data->min_key = b->data->min_key;
n->data->max_key = b->data->max_key; n->data->max_key = b->data->max_key;
...@@ -431,7 +431,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level) ...@@ -431,7 +431,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
btree_node_set_format(b, b->data->format); btree_node_set_format(b, b->data->format);
bch2_btree_build_aux_trees(b); bch2_btree_build_aux_trees(b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
return b; return b;
} }
...@@ -445,7 +445,7 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser ...@@ -445,7 +445,7 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser
while (reserve->nr) { while (reserve->nr) {
struct btree *b = reserve->b[--reserve->nr]; struct btree *b = reserve->b[--reserve->nr];
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
if (c->btree_reserve_cache_nr < if (c->btree_reserve_cache_nr <
ARRAY_SIZE(c->btree_reserve_cache)) { ARRAY_SIZE(c->btree_reserve_cache)) {
...@@ -461,9 +461,9 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser ...@@ -461,9 +461,9 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser
btree_node_lock_type(c, b, SIX_LOCK_write); btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b); __btree_node_free(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
} }
mutex_unlock(&c->btree_reserve_cache_lock); mutex_unlock(&c->btree_reserve_cache_lock);
...@@ -586,7 +586,7 @@ static void btree_update_nodes_reachable(struct closure *cl) ...@@ -586,7 +586,7 @@ static void btree_update_nodes_reachable(struct closure *cl)
*/ */
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write_cond(c, b, btree_node_need_write(b)); bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
mutex_lock(&c->btree_interior_update_lock); mutex_lock(&c->btree_interior_update_lock);
} }
...@@ -641,10 +641,10 @@ static void btree_update_nodes_written(struct closure *cl) ...@@ -641,10 +641,10 @@ static void btree_update_nodes_written(struct closure *cl)
/* The usual case: */ /* The usual case: */
b = READ_ONCE(as->b); b = READ_ONCE(as->b);
if (!six_trylock_read(&b->lock)) { if (!six_trylock_read(&b->c.lock)) {
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
goto retry; goto retry;
} }
...@@ -665,7 +665,7 @@ static void btree_update_nodes_written(struct closure *cl) ...@@ -665,7 +665,7 @@ static void btree_update_nodes_written(struct closure *cl)
* write it now if it needs to be written: * write it now if it needs to be written:
*/ */
bch2_btree_node_write_cond(c, b, true); bch2_btree_node_write_cond(c, b, true);
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
break; break;
case BTREE_INTERIOR_UPDATING_AS: case BTREE_INTERIOR_UPDATING_AS:
...@@ -688,15 +688,15 @@ static void btree_update_nodes_written(struct closure *cl) ...@@ -688,15 +688,15 @@ static void btree_update_nodes_written(struct closure *cl)
/* b is the new btree root: */ /* b is the new btree root: */
b = READ_ONCE(as->b); b = READ_ONCE(as->b);
if (!six_trylock_read(&b->lock)) { if (!six_trylock_read(&b->c.lock)) {
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
goto retry; goto retry;
} }
BUG_ON(c->btree_roots[b->btree_id].as != as); BUG_ON(c->btree_roots[b->c.btree_id].as != as);
c->btree_roots[b->btree_id].as = NULL; c->btree_roots[b->c.btree_id].as = NULL;
bch2_btree_set_root_ondisk(c, b, WRITE); bch2_btree_set_root_ondisk(c, b, WRITE);
...@@ -707,7 +707,7 @@ static void btree_update_nodes_written(struct closure *cl) ...@@ -707,7 +707,7 @@ static void btree_update_nodes_written(struct closure *cl)
* have the pointer to the new root, and before the allocator * have the pointer to the new root, and before the allocator
* can reuse the old nodes it'll have to do a journal commit: * can reuse the old nodes it'll have to do a journal commit:
*/ */
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
/* /*
...@@ -908,8 +908,8 @@ static void btree_interior_update_add_node_reference(struct btree_update *as, ...@@ -908,8 +908,8 @@ static void btree_interior_update_add_node_reference(struct btree_update *as,
d = &as->pending[as->nr_pending++]; d = &as->pending[as->nr_pending++];
d->index_update_done = false; d->index_update_done = false;
d->seq = b->data->keys.seq; d->seq = b->data->keys.seq;
d->btree_id = b->btree_id; d->btree_id = b->c.btree_id;
d->level = b->level; d->level = b->c.level;
bkey_copy(&d->key, &b->key); bkey_copy(&d->key, &b->key);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
...@@ -1053,7 +1053,7 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) ...@@ -1053,7 +1053,7 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
mutex_lock(&c->btree_root_lock); mutex_lock(&c->btree_root_lock);
BUG_ON(btree_node_root(c, b) && BUG_ON(btree_node_root(c, b) &&
(b->level < btree_node_root(c, b)->level || (b->c.level < btree_node_root(c, b)->c.level ||
!btree_node_dying(btree_node_root(c, b)))); !btree_node_dying(btree_node_root(c, b))));
btree_node_root(c, b) = b; btree_node_root(c, b) = b;
...@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, &fs_usage->u, 0, 0); true, 0, &fs_usage->u, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, NULL, 0, true, 0, NULL, 0,
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
...@@ -1094,13 +1094,13 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1094,13 +1094,13 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw) static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
{ {
struct btree_root *r = &c->btree_roots[b->btree_id]; struct btree_root *r = &c->btree_roots[b->c.btree_id];
mutex_lock(&c->btree_root_lock); mutex_lock(&c->btree_root_lock);
BUG_ON(b != r->b); BUG_ON(b != r->b);
bkey_copy(&r->key, &b->key); bkey_copy(&r->key, &b->key);
r->level = b->level; r->level = b->c.level;
r->alive = true; r->alive = true;
if (rw == WRITE) if (rw == WRITE)
c->btree_roots_dirty = true; c->btree_roots_dirty = true;
...@@ -1214,7 +1214,7 @@ static struct btree *__btree_split_node(struct btree_update *as, ...@@ -1214,7 +1214,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
struct bset *set1, *set2; struct bset *set1, *set2;
struct bkey_packed *k, *prev = NULL; struct bkey_packed *k, *prev = NULL;
n2 = bch2_btree_node_alloc(as, n1->level); n2 = bch2_btree_node_alloc(as, n1->c.level);
n2->data->max_key = n1->data->max_key; n2->data->max_key = n1->data->max_key;
n2->data->format = n1->format; n2->data->format = n1->format;
...@@ -1251,7 +1251,7 @@ static struct btree *__btree_split_node(struct btree_update *as, ...@@ -1251,7 +1251,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
n1->key.k.p = bkey_unpack_pos(n1, prev); n1->key.k.p = bkey_unpack_pos(n1, prev);
n1->data->max_key = n1->key.k.p; n1->data->max_key = n1->key.k.p;
n2->data->min_key = n2->data->min_key =
btree_type_successor(n1->btree_id, n1->key.k.p); btree_type_successor(n1->c.btree_id, n1->key.k.p);
set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k); set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s)); set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
...@@ -1282,7 +1282,7 @@ static struct btree *__btree_split_node(struct btree_update *as, ...@@ -1282,7 +1282,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
bch2_verify_btree_nr_keys(n1); bch2_verify_btree_nr_keys(n1);
bch2_verify_btree_nr_keys(n2); bch2_verify_btree_nr_keys(n2);
if (n1->level) { if (n1->c.level) {
btree_node_interior_verify(n1); btree_node_interior_verify(n1);
btree_node_interior_verify(n2); btree_node_interior_verify(n2);
} }
...@@ -1359,7 +1359,7 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1359,7 +1359,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
u64 start_time = local_clock(); u64 start_time = local_clock();
BUG_ON(!parent && (b != btree_node_root(c, b))); BUG_ON(!parent && (b != btree_node_root(c, b)));
BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level)); BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, b);
...@@ -1375,8 +1375,8 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1375,8 +1375,8 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_build_aux_trees(n2); bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1); bch2_btree_build_aux_trees(n1);
six_unlock_write(&n2->lock); six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->lock); six_unlock_write(&n1->c.lock);
bch2_btree_node_write(c, n2, SIX_LOCK_intent); bch2_btree_node_write(c, n2, SIX_LOCK_intent);
...@@ -1390,7 +1390,7 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1390,7 +1390,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (!parent) { if (!parent) {
/* Depth increases, make a new root */ /* Depth increases, make a new root */
n3 = __btree_root_alloc(as, b->level + 1); n3 = __btree_root_alloc(as, b->c.level + 1);
n3->sib_u64s[0] = U16_MAX; n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX; n3->sib_u64s[1] = U16_MAX;
...@@ -1403,7 +1403,7 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1403,7 +1403,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
trace_btree_compact(c, b); trace_btree_compact(c, b);
bch2_btree_build_aux_trees(n1); bch2_btree_build_aux_trees(n1);
six_unlock_write(&n1->lock); six_unlock_write(&n1->c.lock);
bch2_keylist_add(&as->parent_keys, &n1->key); bch2_keylist_add(&as->parent_keys, &n1->key);
} }
...@@ -1430,7 +1430,7 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1430,7 +1430,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
/* Successful split, update the iterator to point to the new nodes: */ /* Successful split, update the iterator to point to the new nodes: */
six_lock_increment(&b->lock, SIX_LOCK_intent); six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b); bch2_btree_iter_node_drop(iter, b);
if (n3) if (n3)
bch2_btree_iter_node_replace(iter, n3); bch2_btree_iter_node_replace(iter, n3);
...@@ -1456,7 +1456,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, ...@@ -1456,7 +1456,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
struct bkey_packed *k; struct bkey_packed *k;
/* Don't screw up @iter's position: */ /* Don't screw up @iter's position: */
node_iter = iter->l[b->level].iter; node_iter = iter->l[b->c.level].iter;
/* /*
* btree_split(), btree_gc_coalesce() will insert keys before * btree_split(), btree_gc_coalesce() will insert keys before
...@@ -1477,7 +1477,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, ...@@ -1477,7 +1477,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node(as, b); btree_update_updated_node(as, b);
trans_for_each_iter_with_node(iter->trans, b, linked) trans_for_each_iter_with_node(iter->trans, b, linked)
bch2_btree_node_iter_peek(&linked->l[b->level].iter, b); bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
bch2_btree_iter_verify(iter, b); bch2_btree_iter_verify(iter, b);
} }
...@@ -1503,8 +1503,8 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b, ...@@ -1503,8 +1503,8 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
int old_live_u64s = b->nr.live_u64s; int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added; int live_u64s_added, u64s_added;
BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level)); BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
BUG_ON(!b->level); BUG_ON(!b->c.level);
BUG_ON(!as || as->b); BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys); bch2_verify_keylist_sorted(keys);
...@@ -1541,7 +1541,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b, ...@@ -1541,7 +1541,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
* the btree iterator yet, so the merge path's unlock/wait/relock dance * the btree iterator yet, so the merge path's unlock/wait/relock dance
* won't work: * won't work:
*/ */
bch2_foreground_maybe_merge(c, iter, b->level, bch2_foreground_maybe_merge(c, iter, b->c.level,
flags|BTREE_INSERT_NOUNLOCK); flags|BTREE_INSERT_NOUNLOCK);
return; return;
split: split:
...@@ -1686,7 +1686,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1686,7 +1686,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
b->sib_u64s[sib] = sib_u64s; b->sib_u64s[sib] = sib_u64s;
if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) { if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
six_unlock_intent(&m->lock); six_unlock_intent(&m->c.lock);
goto out; goto out;
} }
...@@ -1716,7 +1716,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1716,7 +1716,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, b);
bch2_btree_interior_update_will_free_node(as, m); bch2_btree_interior_update_will_free_node(as, m);
n = bch2_btree_node_alloc(as, b->level); n = bch2_btree_node_alloc(as, b->c.level);
n->data->min_key = prev->data->min_key; n->data->min_key = prev->data->min_key;
n->data->max_key = next->data->max_key; n->data->max_key = next->data->max_key;
...@@ -1729,7 +1729,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1729,7 +1729,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_sort_into(c, n, next); bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n); bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock); six_unlock_write(&n->c.lock);
bkey_init(&delete.k); bkey_init(&delete.k);
delete.k.p = prev->key.k.p; delete.k.p = prev->key.k.p;
...@@ -1742,7 +1742,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1742,7 +1742,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_open_buckets_put(c, &n->ob); bch2_open_buckets_put(c, &n->ob);
six_lock_increment(&b->lock, SIX_LOCK_intent); six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b); bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_drop(iter, m); bch2_btree_iter_node_drop(iter, m);
...@@ -1773,7 +1773,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1773,7 +1773,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
return; return;
err_cycle_gc_lock: err_cycle_gc_lock:
six_unlock_intent(&m->lock); six_unlock_intent(&m->c.lock);
if (flags & BTREE_INSERT_NOUNLOCK) if (flags & BTREE_INSERT_NOUNLOCK)
goto out; goto out;
...@@ -1786,7 +1786,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1786,7 +1786,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
goto err; goto err;
err_unlock: err_unlock:
six_unlock_intent(&m->lock); six_unlock_intent(&m->c.lock);
if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
up_read(&c->gc_lock); up_read(&c->gc_lock);
err: err:
...@@ -1828,7 +1828,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter, ...@@ -1828,7 +1828,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
n = bch2_btree_node_alloc_replacement(as, b); n = bch2_btree_node_alloc_replacement(as, b);
bch2_btree_build_aux_trees(n); bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock); six_unlock_write(&n->c.lock);
trace_btree_gc_rewrite_node(c, b); trace_btree_gc_rewrite_node(c, b);
...@@ -1843,7 +1843,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter, ...@@ -1843,7 +1843,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_open_buckets_put(c, &n->ob); bch2_open_buckets_put(c, &n->ob);
six_lock_increment(&b->lock, SIX_LOCK_intent); six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b); bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n); bch2_btree_iter_node_replace(iter, n);
bch2_btree_node_free_inmem(c, b, iter); bch2_btree_node_free_inmem(c, b, iter);
...@@ -1963,7 +1963,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1963,7 +1963,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
if (new_hash) { if (new_hash) {
bkey_copy(&new_hash->key, &new_key->k_i); bkey_copy(&new_hash->key, &new_key->k_i);
ret = bch2_btree_node_hash_insert(&c->btree_cache, ret = bch2_btree_node_hash_insert(&c->btree_cache,
new_hash, b->level, b->btree_id); new_hash, b->c.level, b->c.btree_id);
BUG_ON(ret); BUG_ON(ret);
} }
...@@ -1996,7 +1996,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1996,7 +1996,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, &fs_usage->u, 0, 0); true, 0, &fs_usage->u, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, NULL, 0, true, 0, NULL, 0,
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
...@@ -2110,8 +2110,8 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter, ...@@ -2110,8 +2110,8 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
list_move(&new_hash->list, &c->btree_cache.freeable); list_move(&new_hash->list, &c->btree_cache.freeable);
mutex_unlock(&c->btree_cache.lock); mutex_unlock(&c->btree_cache.lock);
six_unlock_write(&new_hash->lock); six_unlock_write(&new_hash->c.lock);
six_unlock_intent(&new_hash->lock); six_unlock_intent(&new_hash->c.lock);
} }
up_read(&c->gc_lock); up_read(&c->gc_lock);
closure_sync(&cl); closure_sync(&cl);
...@@ -2151,8 +2151,8 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) ...@@ -2151,8 +2151,8 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b); set_btree_node_fake(b);
b->level = 0; b->c.level = 0;
b->btree_id = id; b->c.btree_id = id;
bkey_btree_ptr_init(&b->key); bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX; b->key.k.p = POS_MAX;
...@@ -2166,13 +2166,14 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) ...@@ -2166,13 +2166,14 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->data->format = bch2_btree_calc_format(b); b->data->format = bch2_btree_calc_format(b);
btree_node_set_format(b, b->data->format); btree_node_set_format(b, b->data->format);
ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id); ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
b->c.level, b->c.btree_id);
BUG_ON(ret); BUG_ON(ret);
__bch2_btree_set_root_inmem(c, b); __bch2_btree_set_root_inmem(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->lock); six_unlock_intent(&b->c.lock);
} }
ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf) ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
......
...@@ -190,7 +190,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); ...@@ -190,7 +190,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
static inline unsigned btree_update_reserve_required(struct bch_fs *c, static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b) struct btree *b)
{ {
unsigned depth = btree_node_root(c, b)->level + 1; unsigned depth = btree_node_root(c, b)->c.level + 1;
/* /*
* Number of nodes we might have to allocate in a worst case btree * Number of nodes we might have to allocate in a worst case btree
...@@ -198,9 +198,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c, ...@@ -198,9 +198,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
* a new root, unless we're already at max depth: * a new root, unless we're already at max depth:
*/ */
if (depth < BTREE_MAX_DEPTH) if (depth < BTREE_MAX_DEPTH)
return (depth - b->level) * 2 + 1; return (depth - b->c.level) * 2 + 1;
else else
return (depth - b->level) * 2 - 1; return (depth - b->c.level) * 2 - 1;
} }
static inline void btree_node_reset_sib_u64s(struct btree *b) static inline void btree_node_reset_sib_u64s(struct btree *b)
......
...@@ -155,7 +155,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, ...@@ -155,7 +155,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
btree_node_lock_type(c, b, SIX_LOCK_read); btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write_cond(c, b, bch2_btree_node_write_cond(c, b,
(btree_current_write(b) == w && w->journal.seq == seq)); (btree_current_write(b) == w && w->journal.seq == seq));
six_unlock_read(&b->lock); six_unlock_read(&b->c.lock);
} }
static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq) static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
...@@ -198,7 +198,7 @@ void bch2_btree_journal_key(struct btree_trans *trans, ...@@ -198,7 +198,7 @@ void bch2_btree_journal_key(struct btree_trans *trans,
struct btree *b = iter->l[0].b; struct btree *b = iter->l[0].b;
struct btree_write *w = btree_current_write(b); struct btree_write *w = btree_current_write(b);
EBUG_ON(iter->level || b->level); EBUG_ON(iter->level || b->c.level);
EBUG_ON(trans->journal_res.ref != EBUG_ON(trans->journal_res.ref !=
!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)); !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
......
...@@ -52,8 +52,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) ...@@ -52,8 +52,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
bkey_copy(&v->key, &b->key); bkey_copy(&v->key, &b->key);
v->written = 0; v->written = 0;
v->level = b->level; v->c.level = b->c.level;
v->btree_id = b->btree_id; v->c.btree_id = b->c.btree_id;
bch2_btree_keys_init(v, &c->expensive_debug_checks); bch2_btree_keys_init(v, &c->expensive_debug_checks);
if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
......
...@@ -144,8 +144,8 @@ DECLARE_EVENT_CLASS(btree_node, ...@@ -144,8 +144,8 @@ DECLARE_EVENT_CLASS(btree_node,
TP_fast_assign( TP_fast_assign(
memcpy(__entry->uuid, c->sb.user_uuid.b, 16); memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->level = b->level; __entry->level = b->c.level;
__entry->id = b->btree_id; __entry->id = b->c.btree_id;
__entry->inode = b->key.k.p.inode; __entry->inode = b->key.k.p.inode;
__entry->offset = b->key.k.p.offset; __entry->offset = b->key.k.p.offset;
), ),
...@@ -262,7 +262,7 @@ TRACE_EVENT(btree_insert_key, ...@@ -262,7 +262,7 @@ TRACE_EVENT(btree_insert_key,
), ),
TP_fast_assign( TP_fast_assign(
__entry->id = b->btree_id; __entry->id = b->c.btree_id;
__entry->inode = k->k.p.inode; __entry->inode = k->k.p.inode;
__entry->offset = k->k.p.offset; __entry->offset = k->k.p.offset;
__entry->size = k->k.size; __entry->size = k->k.size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment