Commit 1fe08f31 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bkey_written()

also cleanups of btree node offsets
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 617391ba
...@@ -20,12 +20,14 @@ ...@@ -20,12 +20,14 @@
struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k) struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{ {
unsigned offset = __btree_node_key_to_offset(b, k);
struct bset_tree *t; struct bset_tree *t;
for_each_bset(b, t) for_each_bset(b, t)
if (k >= btree_bkey_first(b, t) && if (offset <= t->end_offset) {
k < btree_bkey_last(b, t)) EBUG_ON(offset < btree_bkey_first_offset(t));
return t; return t;
}
BUG(); BUG();
} }
...@@ -172,34 +174,29 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter, ...@@ -172,34 +174,29 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
void bch2_btree_node_iter_verify(struct btree_node_iter *iter, void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
struct btree *b) struct btree *b)
{ {
struct btree_node_iter_set *set, *prev = NULL; struct btree_node_iter_set *set, *s2;
struct bset_tree *t; struct bset_tree *t;
struct bkey_packed *k, *first;
if (bch2_btree_node_iter_end(iter)) /* Verify no duplicates: */
return; btree_node_iter_for_each(iter, set)
btree_node_iter_for_each(iter, s2)
BUG_ON(set != s2 && set->end == s2->end);
/* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) { btree_node_iter_for_each(iter, set) {
k = __btree_node_offset_to_key(b, set->k); for_each_bset(b, t)
t = bch2_bkey_to_bset(b, k); if (set->end == t->end_offset)
goto found;
BUG_ON(__btree_node_offset_to_key(b, set->end) != BUG();
btree_bkey_last(b, t)); found:
BUG_ON(set->k < btree_bkey_first_offset(t) ||
BUG_ON(prev && set->k >= t->end_offset);
btree_node_iter_cmp(iter, b, *prev, *set) > 0);
prev = set;
} }
first = __btree_node_offset_to_key(b, iter->data[0].k); /* Verify iterator is sorted: */
btree_node_iter_for_each(iter, set)
for_each_bset(b, t) BUG_ON(set != iter->data &&
if (bch2_btree_node_iter_bset_pos(iter, b, t) == btree_node_iter_cmp(iter, b, set[-1], set[0]) > 0);
btree_bkey_last(b, t) &&
(k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
k, first) > 0);
} }
void bch2_verify_key_order(struct btree *b, void bch2_verify_key_order(struct btree *b,
......
...@@ -309,7 +309,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t, ...@@ -309,7 +309,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
if (mode == COMPACT_LAZY) { if (mode == COMPACT_LAZY) {
if (should_compact_bset_lazy(b, t) || if (should_compact_bset_lazy(b, t) ||
(compacting && bset_unwritten(b, bset(b, t)))) (compacting && !bset_written(b, bset(b, t))))
return dead_u64s; return dead_u64s;
} else { } else {
if (bset_written(b, bset(b, t))) if (bset_written(b, bset(b, t)))
...@@ -356,7 +356,7 @@ bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, ...@@ -356,7 +356,7 @@ bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
struct bkey_packed *k, *n, *out, *start, *end; struct bkey_packed *k, *n, *out, *start, *end;
struct btree_node_entry *src = NULL, *dst = NULL; struct btree_node_entry *src = NULL, *dst = NULL;
if (t != b->set && bset_unwritten(b, i)) { if (t != b->set && !bset_written(b, i)) {
src = container_of(i, struct btree_node_entry, keys); src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b), dst = max(write_block(b),
(void *) btree_bkey_last(b, t -1)); (void *) btree_bkey_last(b, t -1));
...@@ -396,7 +396,7 @@ bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, ...@@ -396,7 +396,7 @@ bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
continue; continue;
if (bkey_whiteout(k)) { if (bkey_whiteout(k)) {
unreserve_whiteout(b, t, k); unreserve_whiteout(b, k);
memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k)); memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
set_bkeyp_val_u64s(f, u_pos, 0); set_bkeyp_val_u64s(f, u_pos, 0);
u_pos = bkey_next(u_pos); u_pos = bkey_next(u_pos);
...@@ -467,7 +467,7 @@ static bool bch2_drop_whiteouts(struct btree *b) ...@@ -467,7 +467,7 @@ static bool bch2_drop_whiteouts(struct btree *b)
start = btree_bkey_first(b, t); start = btree_bkey_first(b, t);
end = btree_bkey_last(b, t); end = btree_bkey_last(b, t);
if (bset_unwritten(b, i) && if (!bset_written(b, i) &&
t != b->set) { t != b->set) {
struct bset *dst = struct bset *dst =
max_t(struct bset *, write_block(b), max_t(struct bset *, write_block(b),
...@@ -829,7 +829,7 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b, ...@@ -829,7 +829,7 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b,
for (unwritten_idx = 0; for (unwritten_idx = 0;
unwritten_idx < b->nsets; unwritten_idx < b->nsets;
unwritten_idx++) unwritten_idx++)
if (bset_unwritten(b, bset(b, &b->set[unwritten_idx]))) if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
break; break;
if (b->nsets - unwritten_idx > 1) { if (b->nsets - unwritten_idx > 1) {
...@@ -852,7 +852,7 @@ void bch2_btree_build_aux_trees(struct btree *b) ...@@ -852,7 +852,7 @@ void bch2_btree_build_aux_trees(struct btree *b)
for_each_bset(b, t) for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t, bch2_bset_build_aux_tree(b, t,
bset_unwritten(b, bset(b, t)) && !bset_written(b, bset(b, t)) &&
t == bset_tree_last(b)); t == bset_tree_last(b));
} }
...@@ -1949,9 +1949,9 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) ...@@ -1949,9 +1949,9 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
clear_btree_node_just_written(b); clear_btree_node_just_written(b);
/* /*
* Note: immediately after write, bset_unwritten()/bset_written() don't * Note: immediately after write, bset_written() doesn't work - the
* work - the amount of data we had to write after compaction might have * amount of data we had to write after compaction might have been
* been smaller than the offset of the last bset. * smaller than the offset of the last bset.
* *
* However, we know that all bsets have been written here, as long as * However, we know that all bsets have been written here, as long as
* we're still holding the write lock: * we're still holding the write lock:
......
...@@ -340,10 +340,38 @@ static inline struct bset_tree *bset_tree_last(struct btree *b) ...@@ -340,10 +340,38 @@ static inline struct bset_tree *bset_tree_last(struct btree *b)
return b->set + b->nsets - 1; return b->set + b->nsets - 1;
} }
static inline void *
__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
{
return (void *) ((u64 *) b->data + 1 + offset);
}
static inline u16
__btree_node_ptr_to_offset(const struct btree *b, const void *p)
{
u16 ret = (u64 *) p - 1 - (u64 *) b->data;
EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
return ret;
}
static inline struct bset *bset(const struct btree *b, static inline struct bset *bset(const struct btree *b,
const struct bset_tree *t) const struct bset_tree *t)
{ {
return (void *) b->data + t->data_offset * sizeof(u64); return __btree_node_offset_to_ptr(b, t->data_offset);
}
static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
{
t->end_offset =
__btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
}
static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
const struct bset *i)
{
t->data_offset = __btree_node_ptr_to_offset(b, i);
set_btree_bset_end(b, t);
} }
static inline struct bset *btree_bset_first(struct btree *b) static inline struct bset *btree_bset_first(struct btree *b)
...@@ -359,16 +387,13 @@ static inline struct bset *btree_bset_last(struct btree *b) ...@@ -359,16 +387,13 @@ static inline struct bset *btree_bset_last(struct btree *b)
static inline u16 static inline u16
__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k) __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
{ {
size_t ret = (u64 *) k - (u64 *) b->data - 1; return __btree_node_ptr_to_offset(b, k);
EBUG_ON(ret > U16_MAX);
return ret;
} }
static inline struct bkey_packed * static inline struct bkey_packed *
__btree_node_offset_to_key(const struct btree *b, u16 k) __btree_node_offset_to_key(const struct btree *b, u16 k)
{ {
return (void *) ((u64 *) b->data + k + 1); return __btree_node_offset_to_ptr(b, k);
} }
static inline unsigned btree_bkey_first_offset(const struct bset_tree *t) static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
...@@ -376,7 +401,13 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t) ...@@ -376,7 +401,13 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
return t->data_offset + offsetof(struct bset, _data) / sizeof(u64); return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
} }
#define btree_bkey_first(_b, _t) (bset(_b, _t)->start) #define btree_bkey_first(_b, _t) \
({ \
EBUG_ON(bset(_b, _t)->start != \
__btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
\
bset(_b, _t)->start; \
})
#define btree_bkey_last(_b, _t) \ #define btree_bkey_last(_b, _t) \
({ \ ({ \
...@@ -386,23 +417,6 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t) ...@@ -386,23 +417,6 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
__btree_node_offset_to_key(_b, (_t)->end_offset); \ __btree_node_offset_to_key(_b, (_t)->end_offset); \
}) })
static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
{
t->end_offset =
__btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
btree_bkey_last(b, t);
}
static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
const struct bset *i)
{
t->data_offset = (u64 *) i - (u64 *) b->data;
EBUG_ON(bset(b, t) != i);
set_btree_bset_end(b, t);
}
static inline unsigned bset_byte_offset(struct btree *b, void *i) static inline unsigned bset_byte_offset(struct btree *b, void *i)
{ {
return i - (void *) b->data; return i - (void *) b->data;
......
...@@ -241,14 +241,19 @@ static inline void *write_block(struct btree *b) ...@@ -241,14 +241,19 @@ static inline void *write_block(struct btree *b)
return (void *) b->data + (b->written << 9); return (void *) b->data + (b->written << 9);
} }
static inline bool __btree_addr_written(struct btree *b, void *p)
{
return p < write_block(b);
}
static inline bool bset_written(struct btree *b, struct bset *i) static inline bool bset_written(struct btree *b, struct bset *i)
{ {
return (void *) i < write_block(b); return __btree_addr_written(b, i);
} }
static inline bool bset_unwritten(struct btree *b, struct bset *i) static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
{ {
return (void *) i > write_block(b); return __btree_addr_written(b, k);
} }
static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
...@@ -307,10 +312,9 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, ...@@ -307,10 +312,9 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
return NULL; return NULL;
} }
static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t, static inline void unreserve_whiteout(struct btree *b, struct bkey_packed *k)
struct bkey_packed *k)
{ {
if (bset_written(b, bset(b, t))) { if (bkey_written(b, k)) {
EBUG_ON(b->uncompacted_whiteout_u64s < EBUG_ON(b->uncompacted_whiteout_u64s <
bkeyp_key_u64s(&b->format, k)); bkeyp_key_u64s(&b->format, k));
b->uncompacted_whiteout_u64s -= b->uncompacted_whiteout_u64s -=
...@@ -318,10 +322,9 @@ static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t, ...@@ -318,10 +322,9 @@ static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
} }
} }
static inline void reserve_whiteout(struct btree *b, struct bset_tree *t, static inline void reserve_whiteout(struct btree *b, struct bkey_packed *k)
struct bkey_packed *k)
{ {
if (bset_written(b, bset(b, t))) { if (bkey_written(b, k)) {
BUG_ON(!k->needs_whiteout); BUG_ON(!k->needs_whiteout);
b->uncompacted_whiteout_u64s += b->uncompacted_whiteout_u64s +=
bkeyp_key_u64s(&b->format, k); bkeyp_key_u64s(&b->format, k);
......
...@@ -40,7 +40,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter, ...@@ -40,7 +40,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
t = bch2_bkey_to_bset(b, k); t = bch2_bkey_to_bset(b, k);
if (bset_unwritten(b, bset(b, t)) && if (!bkey_written(b, k) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) && bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
!bkey_whiteout(&insert->k)) { !bkey_whiteout(&insert->k)) {
k->type = insert->k.type; k->type = insert->k.type;
...@@ -76,7 +76,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter, ...@@ -76,7 +76,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
k->u64s, k->u64s); k->u64s, k->u64s);
if (bkey_whiteout(&insert->k)) { if (bkey_whiteout(&insert->k)) {
reserve_whiteout(b, t, k); reserve_whiteout(b, k);
return true; return true;
} else { } else {
k->needs_whiteout = false; k->needs_whiteout = false;
......
...@@ -1429,7 +1429,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, ...@@ -1429,7 +1429,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
* what k points to) * what k points to)
*/ */
bkey_reassemble(&split.k, k.s_c); bkey_reassemble(&split.k, k.s_c);
split.k.k.needs_whiteout |= bset_written(b, bset(b, t)); split.k.k.needs_whiteout |= bkey_written(b, _k);
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k); bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k)); BUG_ON(bkey_deleted(&split.k.k));
...@@ -1499,9 +1499,9 @@ __bch2_delete_fixup_extent(struct extent_insert_state *s) ...@@ -1499,9 +1499,9 @@ __bch2_delete_fixup_extent(struct extent_insert_state *s)
bch2_subtract_sectors(s, k.s_c, bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size); bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD; _k->type = KEY_TYPE_DISCARD;
reserve_whiteout(b, t, _k); reserve_whiteout(b, _k);
} else if (k.k->needs_whiteout || } else if (k.k->needs_whiteout ||
bset_written(b, bset(b, t))) { bkey_written(b, _k)) {
struct bkey_i discard = *insert; struct bkey_i discard = *insert;
discard.k.type = KEY_TYPE_DISCARD; discard.k.type = KEY_TYPE_DISCARD;
...@@ -1573,13 +1573,13 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s) ...@@ -1573,13 +1573,13 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
break; break;
if (k.k->size && if (k.k->size &&
(k.k->needs_whiteout || bset_written(b, bset(b, t)))) (k.k->needs_whiteout || bkey_written(b, _k)))
insert->k.needs_whiteout = true; insert->k.needs_whiteout = true;
if (overlap == BCH_EXTENT_OVERLAP_ALL && if (overlap == BCH_EXTENT_OVERLAP_ALL &&
bkey_whiteout(k.k) && bkey_whiteout(k.k) &&
k.k->needs_whiteout) { k.k->needs_whiteout) {
unreserve_whiteout(b, t, _k); unreserve_whiteout(b, _k);
_k->needs_whiteout = false; _k->needs_whiteout = false;
} }
squash: squash:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment