Commit ca563dcc authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch2_trans_unlock() must always be followed by relock() or begin()

We're about to add new asserts for btree_trans locking consistency, and
part of that requires that aren't using the btree_trans while it's
unlocked.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4984faff
...@@ -2172,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, ...@@ -2172,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
u64 now; u64 now;
int ret = 0; int ret = 0;
if (bch2_trans_relock(trans))
bch2_trans_begin(trans);
a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr)); a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
ret = PTR_ERR_OR_ZERO(a); ret = PTR_ERR_OR_ZERO(a);
if (ret) if (ret)
......
...@@ -1342,6 +1342,10 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, ...@@ -1342,6 +1342,10 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
*wp_ret = wp = writepoint_find(trans, write_point.v); *wp_ret = wp = writepoint_find(trans, write_point.v);
ret = bch2_trans_relock(trans);
if (ret)
goto err;
/* metadata may not allocate on cache devices: */ /* metadata may not allocate on cache devices: */
if (wp->data_type != BCH_DATA_user) if (wp->data_type != BCH_DATA_user)
have_cache = true; have_cache = true;
......
...@@ -729,6 +729,8 @@ transaction_restart: \ ...@@ -729,6 +729,8 @@ transaction_restart: \
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \ #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _do) \ _start, _end, _flags, _k, _do) \
({ \ ({ \
bch2_trans_begin(trans); \
\
struct btree_iter _iter; \ struct btree_iter _iter; \
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \ (_start), (_flags)); \
......
...@@ -737,9 +737,6 @@ static void btree_update_nodes_written(struct btree_update *as) ...@@ -737,9 +737,6 @@ static void btree_update_nodes_written(struct btree_update *as)
*/ */
b = READ_ONCE(as->b); b = READ_ONCE(as->b);
if (b) { if (b) {
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
/* /*
* @b is the node we did the final insert into: * @b is the node we did the final insert into:
* *
...@@ -763,6 +760,10 @@ static void btree_update_nodes_written(struct btree_update *as) ...@@ -763,6 +760,10 @@ static void btree_update_nodes_written(struct btree_update *as)
* have here: * have here:
*/ */
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
bch2_trans_begin(trans);
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
......
...@@ -386,6 +386,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans, ...@@ -386,6 +386,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
while (bio_sectors(bio)) { while (bio_sectors(bio)) {
unsigned sectors = bio_sectors(bio); unsigned sectors = bio_sectors(bio);
bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos, bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots); BTREE_ITER_slots);
ret = lockrestart_do(trans, ({ ret = lockrestart_do(trans, ({
......
...@@ -1036,6 +1036,10 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, ...@@ -1036,6 +1036,10 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_btree_iter_set_pos(&iter, bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors)); POS(iter.pos.inode, iter.pos.offset + sectors));
ret = bch2_trans_relock(trans);
if (ret)
break;
} }
start = iter.pos.offset; start = iter.pos.offset;
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
......
...@@ -1248,6 +1248,10 @@ static void bch2_nocow_write(struct bch_write_op *op) ...@@ -1248,6 +1248,10 @@ static void bch2_nocow_write(struct bch_write_op *op)
buckets.nr = 0; buckets.nr = 0;
ret = bch2_trans_relock(trans);
if (ret)
break;
k = bch2_btree_iter_peek_slot(&iter); k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k); ret = bkey_err(k);
if (ret) if (ret)
......
...@@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, ...@@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret))) if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
return ret; return ret;
bch2_trans_begin(trans);
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
......
...@@ -323,6 +323,8 @@ static int do_rebalance(struct moving_context *ctxt) ...@@ -323,6 +323,8 @@ static int do_rebalance(struct moving_context *ctxt)
struct bkey_s_c k; struct bkey_s_c k;
int ret = 0; int ret = 0;
bch2_trans_begin(trans);
bch2_move_stats_init(&r->work_stats, "rebalance_work"); bch2_move_stats_init(&r->work_stats, "rebalance_work");
bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
......
...@@ -202,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c) ...@@ -202,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c)
struct journal *j = &c->journal; struct journal *j = &c->journal;
u64 start_seq = c->journal_replay_seq_start; u64 start_seq = c->journal_replay_seq_start;
u64 end_seq = c->journal_replay_seq_start; u64 end_seq = c->journal_replay_seq_start;
struct btree_trans *trans = bch2_trans_get(c); struct btree_trans *trans = NULL;
bool immediate_flush = false; bool immediate_flush = false;
int ret = 0; int ret = 0;
...@@ -216,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c) ...@@ -216,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c)
BUG_ON(!atomic_read(&keys->ref)); BUG_ON(!atomic_read(&keys->ref));
move_gap(keys, keys->nr); move_gap(keys, keys->nr);
trans = bch2_trans_get(c);
/* /*
* First, attempt to replay keys in sorted order. This is more * First, attempt to replay keys in sorted order. This is more
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment