Commit 216c9fac authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Pass around bset_tree less

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent fc3268c1
......@@ -23,16 +23,7 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{
unsigned offset = __btree_node_key_to_offset(b, k);
struct bset_tree *t;
for_each_bset(b, t)
if (offset <= t->end_offset) {
EBUG_ON(offset < btree_bkey_first_offset(t));
return t;
}
BUG();
return bch2_bkey_to_bset_inlined(b, k);
}
/*
......@@ -1117,9 +1108,10 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
* modified, fix any auxiliary search tree by remaking all the nodes in the
* auxiliary search tree that @k corresponds to
*/
void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
struct bkey_packed *k)
void bch2_bset_fix_invalidated_key(struct btree *b, struct bkey_packed *k)
{
struct bset_tree *t = bch2_bkey_to_bset_inlined(b, k);
switch (bset_aux_tree_type(t)) {
case BSET_NO_AUX_TREE:
break;
......
......@@ -343,8 +343,7 @@ void bch2_bset_init_first(struct btree *, struct bset *);
void bch2_bset_init_next(struct bch_fs *, struct btree *,
struct btree_node_entry *);
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
void bch2_bset_fix_invalidated_key(struct btree *, struct bset_tree *,
struct bkey_packed *);
void bch2_bset_fix_invalidated_key(struct btree *, struct bkey_packed *);
void bch2_bset_insert(struct btree *, struct btree_node_iter *,
struct bkey_packed *, struct bkey_i *, unsigned);
......@@ -404,6 +403,21 @@ static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree *b,
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
}
static inline struct bset_tree *
bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
{
unsigned offset = __btree_node_key_to_offset(b, k);
struct bset_tree *t;
for_each_bset(b, t)
if (offset <= t->end_offset) {
EBUG_ON(offset < btree_bkey_first_offset(t));
return t;
}
BUG();
}
struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
......@@ -605,6 +619,13 @@ static inline void btree_keys_account_key(struct btree_nr_keys *n,
#define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
btree_keys_account_key(_nr, _bset_idx, _k, -1)
#define btree_account_key_add(_b, _k) \
btree_keys_account_key(&(_b)->nr, \
bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
#define btree_account_key_drop(_b, _k) \
btree_keys_account_key(&(_b)->nr, \
bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
struct bset_stats {
struct {
size_t nr, bytes;
......
......@@ -598,15 +598,14 @@ void bch2_gc(struct bch_fs *c)
static void recalc_packed_keys(struct btree *b)
{
struct bset *i = btree_bset_first(b);
struct bkey_packed *k;
memset(&b->nr, 0, sizeof(b->nr));
BUG_ON(b->nsets != 1);
for (k = btree_bkey_first(b, b->set);
k != btree_bkey_last(b, b->set);
k = bkey_next(k))
vstruct_for_each(i, k)
btree_keys_account_key_add(&b->nr, 0, k);
}
......
......@@ -519,11 +519,11 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
if (b->level && new_u64s && !bkey_deleted(where) &&
btree_iter_pos_cmp_packed(b, &iter->pos, where,
iter->flags & BTREE_ITER_IS_EXTENTS)) {
struct bset_tree *t;
struct bset_tree *t, *where_set = bch2_bkey_to_bset_inlined(b, where);
struct bkey_packed *k;
for_each_bset(b, t) {
if (bch2_bkey_to_bset(b, where) == t)
if (where_set == t)
continue;
k = bch2_bkey_prev_all(b, t,
......@@ -551,13 +551,13 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
}
void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bset_tree *t,
struct bkey_packed *where,
unsigned clobber_u64s,
unsigned new_u64s)
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_packed *where,
unsigned clobber_u64s,
unsigned new_u64s)
{
struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
struct btree_iter *linked;
if (node_iter != &iter->l[b->level].iter)
......
......@@ -99,8 +99,8 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bset_tree *,
struct bkey_packed *, unsigned, unsigned);
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
int bch2_btree_iter_unlock(struct btree_iter *);
......
......@@ -25,7 +25,6 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
{
const struct bkey_format *f = &b->format;
struct bkey_packed *k;
struct bset_tree *t;
unsigned clobber_u64s;
EBUG_ON(btree_node_just_written(b));
......@@ -38,8 +37,6 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
if (k && !bkey_cmp_packed(b, k, &insert->k)) {
BUG_ON(bkey_whiteout(k));
t = bch2_bkey_to_bset(b, k);
if (!bkey_written(b, k) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
!bkey_whiteout(&insert->k)) {
......@@ -51,9 +48,9 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
insert->k.needs_whiteout = k->needs_whiteout;
btree_keys_account_key_drop(&b->nr, t - b->set, k);
btree_account_key_drop(b, k);
if (t == bset_tree_last(b)) {
if (k >= btree_bset_last(b)->start) {
clobber_u64s = k->u64s;
/*
......@@ -63,7 +60,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
*/
if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
bch2_bset_delete(b, k, clobber_u64s);
bch2_btree_node_iter_fix(iter, b, node_iter, t,
bch2_btree_node_iter_fix(iter, b, node_iter,
k, clobber_u64s, 0);
bch2_btree_iter_verify(iter, b);
return true;
......@@ -73,7 +70,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
}
k->type = KEY_TYPE_DELETED;
bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
bch2_btree_node_iter_fix(iter, b, node_iter, k,
k->u64s, k->u64s);
bch2_btree_iter_verify(iter, b);
......@@ -93,13 +90,12 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
insert->k.needs_whiteout = false;
}
t = bset_tree_last(b);
k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
clobber_u64s = 0;
overwrite:
bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
bch2_btree_node_iter_fix(iter, b, node_iter, k,
clobber_u64s, k->u64s);
bch2_btree_iter_verify(iter, b);
return true;
......
......@@ -1171,7 +1171,6 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
struct bkey_i *insert)
{
struct btree_iter_level *l = &iter->l[0];
struct bset_tree *t = bset_tree_last(l->b);
struct btree_node_iter node_iter;
struct bkey_packed *k;
......@@ -1192,10 +1191,10 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
return;
k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, t);
k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
bch2_bset_insert(l->b, &l->iter, k, insert, 0);
bch2_btree_node_iter_fix(iter, l->b, &l->iter, t, k, 0, k->u64s);
bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
bch2_btree_iter_verify(iter, l->b);
}
......@@ -1328,20 +1327,19 @@ bch2_extent_can_insert(struct btree_insert *trans,
static void
extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
struct bkey_packed *_k, struct bkey_s k,
enum bch_extent_overlap overlap)
{
struct bch_fs *c = s->trans->c;
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
struct btree *b = l->b;
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, _k, k.k);
extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k);
break;
......@@ -1349,15 +1347,15 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
/* insert overlaps with end of k: */
bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, _k, k.k);
extent_save(l->b, _k, k.k);
/*
* As the auxiliary tree is indexed by the end of the
* key and we've just changed the end, update the
* auxiliary tree.
*/
bch2_bset_fix_invalidated_key(b, t, _k);
bch2_btree_node_iter_fix(iter, b, &l->iter, t,
bch2_bset_fix_invalidated_key(l->b, _k);
bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, _k->u64s, _k->u64s);
verify_modified_extent(iter, _k);
break;
......@@ -1365,21 +1363,20 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
case BCH_EXTENT_OVERLAP_ALL: {
/* The insert key completely covers k, invalidate k */
if (!bkey_whiteout(k.k))
btree_keys_account_key_drop(&b->nr,
t - b->set, _k);
btree_account_key_drop(l->b, _k);
bch2_drop_subtract(s, k);
if (t == bset_tree_last(l->b)) {
if (_k >= btree_bset_last(l->b)->start) {
unsigned u64s = _k->u64s;
bch2_bset_delete(l->b, _k, _k->u64s);
bch2_btree_node_iter_fix(iter, b, &l->iter, t,
bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, u64s, 0);
bch2_btree_iter_verify(iter, b);
bch2_btree_iter_verify(iter, l->b);
} else {
extent_save(b, _k, k.k);
bch2_btree_node_iter_fix(iter, b, &l->iter, t,
extent_save(l->b, _k, k.k);
bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, _k->u64s, _k->u64s);
verify_modified_extent(iter, _k);
}
......@@ -1403,14 +1400,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
* what k points to)
*/
bkey_reassemble(&split.k, k.s_c);
split.k.k.needs_whiteout |= bkey_written(b, _k);
split.k.k.needs_whiteout |= bkey_written(l->b, _k);
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, _k, k.k);
extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k);
bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
......@@ -1426,16 +1423,14 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
{
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
struct btree *b = l->b;
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
while (bkey_cmp(s->committed, insert->k.p) < 0 &&
(_k = bch2_btree_node_iter_peek_filter(&l->iter, b,
(_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
KEY_TYPE_DISCARD))) {
struct bset_tree *t = bch2_bkey_to_bset(b, _k);
struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
......@@ -1465,16 +1460,16 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
!bkey_cmp(insert->k.p, k.k->p) &&
!bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
if (!bkey_whiteout(k.k)) {
btree_keys_account_key_drop(&b->nr, t - b->set, _k);
btree_account_key_drop(l->b, _k);
bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD;
reserve_whiteout(b, _k);
reserve_whiteout(l->b, _k);
}
break;
}
if (k.k->needs_whiteout || bkey_written(b, _k)) {
if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
insert->k.needs_whiteout = true;
s->update_btree = true;
}
......@@ -1483,11 +1478,11 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
overlap == BCH_EXTENT_OVERLAP_ALL &&
bkey_whiteout(k.k) &&
k.k->needs_whiteout) {
unreserve_whiteout(b, _k);
unreserve_whiteout(l->b, _k);
_k->needs_whiteout = false;
}
extent_squash(s, insert, t, _k, k, overlap);
extent_squash(s, insert, _k, k, overlap);
if (!s->update_btree)
bch2_cut_front(s->committed, insert);
......@@ -1498,7 +1493,7 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
}
if (bkey_cmp(s->committed, insert->k.p) < 0)
s->committed = bpos_min(s->insert->k->k.p, b->key.k.p);
s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
/*
* may have skipped past some deleted extents greater than the insert
......@@ -1509,7 +1504,7 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
struct btree_node_iter node_iter = l->iter;
while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
bkey_cmp_left_packed(b, _k, &s->committed) > 0)
bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
l->iter = node_iter;
}
}
......@@ -2142,9 +2137,9 @@ static bool bch2_extent_merge_inline(struct bch_fs *c,
return false;
}
bch2_bset_fix_invalidated_key(b, t, m);
bch2_bset_fix_invalidated_key(b, m);
bch2_btree_node_iter_fix(iter, b, node_iter,
t, m, m->u64s, m->u64s);
m, m->u64s, m->u64s);
verify_modified_extent(iter, m);
return ret == BCH_MERGE_MERGE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment