Commit ad7ae8d6 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Btree locking fix, refactoring

Hit an assertion, probably spurious, indicating an iterator was unlocked
when it shouldn't have been (spurious because it wasn't locked at all
when the caller called btree_insert_at()).

Add a flag, BTREE_ITER_NOUNLOCK, and tighten up the assertions
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent de5bb710
...@@ -1113,7 +1113,6 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -1113,7 +1113,6 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
/* Free the old nodes and update our sliding window */ /* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) { for (i = 0; i < nr_old_nodes; i++) {
bch2_btree_node_free_inmem(c, old_nodes[i], iter); bch2_btree_node_free_inmem(c, old_nodes[i], iter);
six_unlock_intent(&old_nodes[i]->lock);
/* /*
* the index update might have triggered a split, in which case * the index update might have triggered a split, in which case
......
...@@ -264,10 +264,13 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -264,10 +264,13 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
/* Btree iterator locking: */ /* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify_locks(struct btree_iter *iter) void __bch2_btree_iter_verify_locks(struct btree_iter *iter)
{ {
unsigned l; unsigned l;
BUG_ON((iter->flags & BTREE_ITER_NOUNLOCK) &&
!btree_node_locked(iter, 0));
for (l = 0; btree_iter_node(iter, l); l++) { for (l = 0; btree_iter_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK && if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l)) !btree_node_locked(iter, l))
...@@ -277,6 +280,15 @@ void bch2_btree_iter_verify_locks(struct btree_iter *iter) ...@@ -277,6 +280,15 @@ void bch2_btree_iter_verify_locks(struct btree_iter *iter)
btree_node_locked_type(iter, l)); btree_node_locked_type(iter, l));
} }
} }
void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
struct btree_iter *linked;
for_each_btree_iter(iter, linked)
__bch2_btree_iter_verify_locks(linked);
}
#endif #endif
__flatten __flatten
...@@ -382,9 +394,9 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter, ...@@ -382,9 +394,9 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
break; break;
} }
} }
bch2_btree_iter_verify_locks(linked);
} }
bch2_btree_iter_verify_locks(iter);
} }
int bch2_btree_iter_unlock(struct btree_iter *iter) int bch2_btree_iter_unlock(struct btree_iter *iter)
...@@ -776,9 +788,17 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) ...@@ -776,9 +788,17 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked; struct btree_iter *linked;
unsigned level = b->level; unsigned level = b->level;
/* caller now responsible for unlocking @b */
BUG_ON(iter->l[level].b != b);
BUG_ON(!btree_node_intent_locked(iter, level));
iter->l[level].b = BTREE_ITER_NOT_END;
mark_btree_node_unlocked(iter, level);
for_each_btree_iter(iter, linked) for_each_btree_iter(iter, linked)
if (linked->l[level].b == b) { if (linked->l[level].b == b) {
btree_node_unlock(linked, level); __btree_node_unlock(linked, level);
linked->l[level].b = BTREE_ITER_NOT_END; linked->l[level].b = BTREE_ITER_NOT_END;
} }
} }
......
...@@ -95,7 +95,7 @@ btree_lock_want(struct btree_iter *iter, int level) ...@@ -95,7 +95,7 @@ btree_lock_want(struct btree_iter *iter, int level)
return BTREE_NODE_UNLOCKED; return BTREE_NODE_UNLOCKED;
} }
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level) static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
{ {
int lock_type = btree_node_locked_type(iter, level); int lock_type = btree_node_locked_type(iter, level);
...@@ -106,6 +106,13 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level) ...@@ -106,6 +106,13 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
mark_btree_node_unlocked(iter, level); mark_btree_node_unlocked(iter, level);
} }
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
BUG_ON(!level && iter->flags & BTREE_ITER_NOUNLOCK);
__btree_node_unlock(iter, level);
}
static inline void __bch2_btree_iter_unlock(struct btree_iter *iter) static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
{ {
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK); btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
......
...@@ -192,6 +192,7 @@ enum btree_iter_type { ...@@ -192,6 +192,7 @@ enum btree_iter_type {
*/ */
#define BTREE_ITER_IS_EXTENTS (1 << 4) #define BTREE_ITER_IS_EXTENTS (1 << 4)
#define BTREE_ITER_ERROR (1 << 5) #define BTREE_ITER_ERROR (1 << 5)
#define BTREE_ITER_NOUNLOCK (1 << 6)
enum btree_iter_uptodate { enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0, BTREE_ITER_UPTODATE = 0,
......
...@@ -257,6 +257,11 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b) ...@@ -257,6 +257,11 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b, void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
struct btree_iter *iter) struct btree_iter *iter)
{ {
struct btree_iter *linked;
for_each_btree_iter(iter, linked)
BUG_ON(linked->l[b->level].b == b);
/* /*
* Is this a node that isn't reachable on disk yet? * Is this a node that isn't reachable on disk yet?
* *
...@@ -268,11 +273,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b, ...@@ -268,11 +273,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
*/ */
btree_update_drop_new_node(c, b); btree_update_drop_new_node(c, b);
__bch2_btree_node_lock_write(b, iter); six_lock_write(&b->lock, NULL, NULL);
__btree_node_free(c, b); __btree_node_free(c, b);
six_unlock_write(&b->lock); six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
bch2_btree_iter_node_drop(iter, b);
} }
static void bch2_btree_node_free_ondisk(struct bch_fs *c, static void bch2_btree_node_free_ondisk(struct bch_fs *c,
...@@ -1421,25 +1425,19 @@ static void btree_split(struct btree_update *as, struct btree *b, ...@@ -1421,25 +1425,19 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (n3) if (n3)
bch2_open_buckets_put(c, &n3->ob); bch2_open_buckets_put(c, &n3->ob);
/*
* Note - at this point other linked iterators could still have @b read
* locked; we're depending on the bch2_btree_iter_node_replace() calls
* below removing all references to @b so we don't return with other
* iterators pointing to a node they have locked that's been freed.
*
* We have to free the node first because the bch2_iter_node_replace()
* calls will drop _our_ iterator's reference - and intent lock - to @b.
*/
bch2_btree_node_free_inmem(c, b, iter);
/* Successful split, update the iterator to point to the new nodes: */ /* Successful split, update the iterator to point to the new nodes: */
bch2_btree_iter_node_drop(iter, b);
if (n3) if (n3)
bch2_btree_iter_node_replace(iter, n3); bch2_btree_iter_node_replace(iter, n3);
if (n2) if (n2)
bch2_btree_iter_node_replace(iter, n2); bch2_btree_iter_node_replace(iter, n2);
bch2_btree_iter_node_replace(iter, n1); bch2_btree_iter_node_replace(iter, n1);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_iter_verify_locks(iter);
bch2_time_stats_update(&c->times[BCH_TIME_btree_split], start_time); bch2_time_stats_update(&c->times[BCH_TIME_btree_split], start_time);
} }
...@@ -1735,17 +1733,21 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1735,17 +1733,21 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags); bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
bch2_open_buckets_put(c, &n->ob); bch2_open_buckets_put(c, &n->ob);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter); bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n); bch2_btree_iter_node_replace(iter, n);
bch2_btree_iter_verify(iter, n); bch2_btree_iter_verify(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
bch2_btree_update_done(as); bch2_btree_update_done(as);
six_unlock_intent(&m->lock);
up_read(&c->gc_lock); up_read(&c->gc_lock);
out: out:
bch2_btree_iter_verify_locks(iter);
/* /*
* Don't downgrade locks here: we're called after successful insert, * Don't downgrade locks here: we're called after successful insert,
* and the caller will downgrade locks after a successful insert * and the caller will downgrade locks after a successful insert
...@@ -1828,9 +1830,9 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter, ...@@ -1828,9 +1830,9 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_open_buckets_put(c, &n->ob); bch2_open_buckets_put(c, &n->ob);
bch2_btree_node_free_inmem(c, b, iter); bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n); bch2_btree_iter_node_replace(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_update_done(as); bch2_btree_update_done(as);
return 0; return 0;
......
...@@ -187,7 +187,6 @@ bch2_insert_fixup_key(struct btree_insert *trans, ...@@ -187,7 +187,6 @@ bch2_insert_fixup_key(struct btree_insert *trans,
insert->k)) insert->k))
bch2_btree_journal_key(trans, iter, insert->k); bch2_btree_journal_key(trans, iter, insert->k);
trans->did_work = true;
return BTREE_INSERT_OK; return BTREE_INSERT_OK;
} }
...@@ -338,6 +337,7 @@ static inline int do_btree_insert_at(struct btree_insert *trans, ...@@ -338,6 +337,7 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i; struct btree_insert_entry *i;
struct btree_iter *linked;
unsigned u64s; unsigned u64s;
int ret; int ret;
...@@ -410,12 +410,25 @@ static inline int do_btree_insert_at(struct btree_insert *trans, ...@@ -410,12 +410,25 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
i->k->k.version = MAX_VERSION; i->k->k.version = MAX_VERSION;
} }
if (trans->flags & BTREE_INSERT_NOUNLOCK) {
/*
* linked iterators that weren't being updated may or may not
* have been traversed/locked, depending on what the caller was
* doing:
*/
for_each_btree_iter(trans->entries[0].iter, linked)
if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
linked->flags |= BTREE_ITER_NOUNLOCK;
}
trans->did_work = true;
trans_for_each_entry(trans, i) { trans_for_each_entry(trans, i) {
switch (btree_insert_key_leaf(trans, i)) { switch (btree_insert_key_leaf(trans, i)) {
case BTREE_INSERT_OK: case BTREE_INSERT_OK:
break; break;
case BTREE_INSERT_NEED_TRAVERSE: case BTREE_INSERT_NEED_TRAVERSE:
BUG_ON((trans->flags & BTREE_INSERT_ATOMIC)); BUG_ON((trans->flags &
(BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
ret = -EINTR; ret = -EINTR;
goto out; goto out;
default: default:
...@@ -461,8 +474,7 @@ int __bch2_btree_insert_at(struct btree_insert *trans) ...@@ -461,8 +474,7 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
BUG_ON(!trans->nr); BUG_ON(!trans->nr);
for_each_btree_iter(trans->entries[0].iter, linked) bch2_btree_iter_verify_locks(trans->entries[0].iter);
bch2_btree_iter_verify_locks(linked);
/* for the sake of sanity: */ /* for the sake of sanity: */
BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC)); BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
...@@ -504,15 +516,11 @@ int __bch2_btree_insert_at(struct btree_insert *trans) ...@@ -504,15 +516,11 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
out: out:
percpu_ref_put(&c->writes); percpu_ref_put(&c->writes);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
/* make sure we didn't drop or screw up locks: */ /* make sure we didn't drop or screw up locks: */
for_each_btree_iter(trans->entries[0].iter, linked) { bch2_btree_iter_verify_locks(trans->entries[0].iter);
bch2_btree_iter_verify_locks(linked);
BUG_ON((trans->flags & BTREE_INSERT_NOUNLOCK) && for_each_btree_iter(trans->entries[0].iter, linked)
trans->did_work && linked->flags &= ~BTREE_ITER_NOUNLOCK;
!btree_node_locked(linked, 0));
}
}
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR); BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
......
...@@ -1214,7 +1214,6 @@ static void extent_insert_committed(struct extent_insert_state *s) ...@@ -1214,7 +1214,6 @@ static void extent_insert_committed(struct extent_insert_state *s)
bch2_cut_front(s->committed, insert); bch2_cut_front(s->committed, insert);
insert->k.needs_whiteout = false; insert->k.needs_whiteout = false;
s->trans->did_work = true;
} }
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter) void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment