Commit a66f7989 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Refactor checking of btree topology

Still a lot of work to be done here: we can't yet repair btree topology
issues, but this patch refactors things so that we have better access to
what we need in the topology checks. Next up will be figuring out a way
to do btree updates during gc, before journal replay is done.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e4c3f386
...@@ -51,39 +51,46 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) ...@@ -51,39 +51,46 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
} }
static int bch2_gc_check_topology(struct bch_fs *c, static int bch2_gc_check_topology(struct bch_fs *c,
struct bkey_s_c k, struct btree *b,
struct bpos *expected_start, struct bkey_buf *prev,
struct bpos expected_end, struct bkey_buf cur,
bool is_last) bool is_last)
{ {
struct bpos node_start = b->data->min_key;
struct bpos node_end = b->data->max_key;
struct bpos expected_start = bkey_deleted(&prev->k->k)
? node_start
: bkey_successor(prev->k->k.p);
char buf1[200], buf2[200];
int ret = 0; int ret = 0;
if (k.k->type == KEY_TYPE_btree_ptr_v2) { if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
if (fsck_err_on(bkey_cmp(*expected_start, bp.v->min_key), c, if (bkey_deleted(&prev->k->k))
"btree node with incorrect min_key: got %llu:%llu, should be %llu:%llu", scnprintf(buf1, sizeof(buf1), "start of node: %llu:%llu",
bp.v->min_key.inode, node_start.inode,
bp.v->min_key.offset, node_start.offset);
expected_start->inode, else
expected_start->offset)) { bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
if (fsck_err_on(bkey_cmp(expected_start, bp->v.min_key), c,
"btree node with incorrect min_key:\n prev %s\n cur %s",
buf1,
(bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2))) {
BUG(); BUG();
} }
} }
*expected_start = bkey_cmp(k.k->p, POS_MAX)
? bkey_successor(k.k->p)
: k.k->p;
if (fsck_err_on(is_last && if (fsck_err_on(is_last &&
bkey_cmp(k.k->p, expected_end), c, bkey_cmp(cur.k->k.p, node_end), c,
"btree node with incorrect max_key: got %llu:%llu, should be %llu:%llu", "btree node with incorrect max_key:\n %s\n expected %s",
k.k->p.inode, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
k.k->p.offset, (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2))) {
expected_end.inode,
expected_end.offset)) {
BUG(); BUG();
} }
bch2_bkey_buf_copy(prev, c, cur.k);
fsck_err: fsck_err:
return ret; return ret;
} }
...@@ -169,10 +176,10 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -169,10 +176,10 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
bool initial) bool initial)
{ {
struct bpos next_node_start = b->data->min_key;
struct btree_node_iter iter; struct btree_node_iter iter;
struct bkey unpacked; struct bkey unpacked;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_buf prev, cur;
int ret = 0; int ret = 0;
*max_stale = 0; *max_stale = 0;
...@@ -181,6 +188,9 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, ...@@ -181,6 +188,9 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
return 0; return 0;
bch2_btree_node_iter_init_from_start(&iter, b); bch2_btree_node_iter_init_from_start(&iter, b);
bch2_bkey_buf_init(&prev);
bch2_bkey_buf_init(&cur);
bkey_init(&prev.k->k);
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) { while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
bch2_bkey_debugcheck(c, b, k); bch2_bkey_debugcheck(c, b, k);
...@@ -192,15 +202,17 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, ...@@ -192,15 +202,17 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
bch2_btree_node_iter_advance(&iter, b); bch2_btree_node_iter_advance(&iter, b);
if (b->c.level) { if (b->c.level) {
ret = bch2_gc_check_topology(c, k, bch2_bkey_buf_reassemble(&cur, c, k);
&next_node_start,
b->data->max_key, ret = bch2_gc_check_topology(c, b, &prev, cur,
bch2_btree_node_iter_end(&iter)); bch2_btree_node_iter_end(&iter));
if (ret) if (ret)
break; break;
} }
} }
bch2_bkey_buf_exit(&cur, c);
bch2_bkey_buf_exit(&prev, c);
return ret; return ret;
} }
...@@ -267,13 +279,14 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, ...@@ -267,13 +279,14 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
{ {
struct btree_and_journal_iter iter; struct btree_and_journal_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bpos next_node_start = b->data->min_key; struct bkey_buf cur, prev;
struct bkey_buf tmp;
u8 max_stale = 0; u8 max_stale = 0;
int ret = 0; int ret = 0;
bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b); bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
bch2_bkey_buf_init(&tmp); bch2_bkey_buf_init(&prev);
bch2_bkey_buf_init(&cur);
bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
bch2_bkey_debugcheck(c, b, k); bch2_bkey_debugcheck(c, b, k);
...@@ -288,20 +301,19 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, ...@@ -288,20 +301,19 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
if (b->c.level) { if (b->c.level) {
struct btree *child; struct btree *child;
bch2_bkey_buf_reassemble(&tmp, c, k); bch2_bkey_buf_reassemble(&cur, c, k);
k = bkey_i_to_s_c(tmp.k); k = bkey_i_to_s_c(cur.k);
bch2_btree_and_journal_iter_advance(&iter); bch2_btree_and_journal_iter_advance(&iter);
ret = bch2_gc_check_topology(c, k, ret = bch2_gc_check_topology(c, b,
&next_node_start, &prev, cur,
b->data->max_key,
!bch2_btree_and_journal_iter_peek(&iter).k); !bch2_btree_and_journal_iter_peek(&iter).k);
if (ret) if (ret)
break; break;
if (b->c.level > target_depth) { if (b->c.level > target_depth) {
child = bch2_btree_node_get_noiter(c, tmp.k, child = bch2_btree_node_get_noiter(c, cur.k,
b->c.btree_id, b->c.level - 1); b->c.btree_id, b->c.level - 1);
ret = PTR_ERR_OR_ZERO(child); ret = PTR_ERR_OR_ZERO(child);
if (ret) if (ret)
...@@ -319,7 +331,8 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, ...@@ -319,7 +331,8 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
} }
} }
bch2_bkey_buf_exit(&tmp, c); bch2_bkey_buf_exit(&cur, c);
bch2_bkey_buf_exit(&prev, c);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment