Commit 4cf91b02 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Split out bpos_cmp() and bkey_cmp()

With snapshots, we're going to need to differentiate between comparisons
that should and shouldn't include the snapshot field. bpos_cmp is now
the comparison function that does include the snapshot field, used by
core btree code.

Upper level filesystem code generally does _not_ want to compare against
the snapshot field - that code wants keys to compare as equal even when
one of them is in an ancestor snapshot.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 43d00243
......@@ -1048,7 +1048,7 @@ int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
high_word(f, r),
b->nr_key_bits);
EBUG_ON(ret != bkey_cmp(bkey_unpack_pos(b, l),
EBUG_ON(ret != bpos_cmp(bkey_unpack_pos(b, l),
bkey_unpack_pos(b, r)));
return ret;
}
......@@ -1058,7 +1058,7 @@ int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
const struct bkey_packed *l,
const struct bpos *r)
{
return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r);
return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
}
__pure __flatten
......@@ -1079,7 +1079,7 @@ int bch2_bkey_cmp_packed(const struct btree *b,
r = (void*) &unpacked;
}
return bkey_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
}
__pure __flatten
......@@ -1090,7 +1090,7 @@ int __bch2_bkey_cmp_left_packed(const struct btree *b,
const struct bkey *l_unpacked;
return unlikely(l_unpacked = packed_to_bkey_c(l))
? bkey_cmp(l_unpacked->p, *r)
? bpos_cmp(l_unpacked->p, *r)
: __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
}
......
......@@ -148,29 +148,27 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
return bkey_cmp_left_packed(b, l, &r);
}
#if 1
static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
{
return cmp_int(l.inode, r.inode) ?:
cmp_int(l.offset, r.offset) ?:
cmp_int(l.snapshot, r.snapshot);
}
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{
if (l.inode != r.inode)
return l.inode < r.inode ? -1 : 1;
if (l.offset != r.offset)
return l.offset < r.offset ? -1 : 1;
if (l.snapshot != r.snapshot)
return l.snapshot < r.snapshot ? -1 : 1;
return 0;
return cmp_int(l.inode, r.inode) ?:
cmp_int(l.offset, r.offset);
}
#else
int bkey_cmp(struct bpos l, struct bpos r);
#endif
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
{
return bkey_cmp(l, r) < 0 ? l : r;
return bpos_cmp(l, r) < 0 ? l : r;
}
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
{
return bkey_cmp(l, r) > 0 ? l : r;
return bpos_cmp(l, r) > 0 ? l : r;
}
#define sbb(a, b, borrow) \
......@@ -198,7 +196,7 @@ static inline struct bpos bpos_sub(struct bpos a, struct bpos b)
static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
{
if (bkey_cmp(l, r) > 0)
if (bpos_cmp(l, r) > 0)
swap(l, r);
return bpos_sub(r, l);
......
......@@ -138,10 +138,10 @@ const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
{
if (bkey_cmp(k.k->p, b->data->min_key) < 0)
if (bpos_cmp(k.k->p, b->data->min_key) < 0)
return "key before start of btree node";
if (bkey_cmp(k.k->p, b->data->max_key) > 0)
if (bpos_cmp(k.k->p, b->data->max_key) > 0)
return "key past end of btree node";
return NULL;
......@@ -165,9 +165,9 @@ void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
{
if (!bkey_cmp(pos, POS_MIN))
if (!bpos_cmp(pos, POS_MIN))
pr_buf(out, "POS_MIN");
else if (!bkey_cmp(pos, POS_MAX))
else if (!bpos_cmp(pos, POS_MAX))
pr_buf(out, "POS_MAX");
else {
if (pos.inode == U64_MAX)
......@@ -256,7 +256,7 @@ enum merge_result bch2_bkey_merge(struct bch_fs *c,
!ops->key_merge ||
l.k->type != r.k->type ||
bversion_cmp(l.k->version, r.k->version) ||
bkey_cmp(l.k->p, bkey_start_pos(r.k)))
bpos_cmp(l.k->p, bkey_start_pos(r.k)))
return BCH_MERGE_NOMERGE;
ret = ops->key_merge(c, l, r);
......
......@@ -81,13 +81,13 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
n = bkey_unpack_key(b, _n);
if (bkey_cmp(bkey_start_pos(&n), k.k->p) < 0) {
if (bpos_cmp(n.p, k.k->p) < 0) {
printk(KERN_ERR "Key skipped backwards\n");
continue;
}
if (!bkey_deleted(k.k) &&
!bkey_cmp(n.p, k.k->p))
!bpos_cmp(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n");
}
}
......@@ -522,7 +522,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
goto start;
while (1) {
if (rw_aux_to_bkey(b, t, j) == k) {
BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k,
bkey_unpack_pos(b, k)));
start:
if (++j == t->size)
......@@ -1174,7 +1174,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
while (l + 1 != r) {
unsigned m = (l + r) >> 1;
if (bkey_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
l = m;
else
r = m;
......@@ -1306,7 +1306,7 @@ struct bkey_packed *__bch2_bset_search(struct btree *b,
* start and end - handle that here:
*/
if (bkey_cmp(*search, t->max_key) > 0)
if (bpos_cmp(*search, t->max_key) > 0)
return btree_bkey_last(b, t);
return bset_search_tree(b, t, search, lossy_packed_search);
......@@ -1456,7 +1456,7 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
struct bkey_packed *k[MAX_BSETS];
unsigned i;
EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0);
bset_aux_tree_verify(b);
memset(iter, 0, sizeof(*iter));
......
......@@ -378,7 +378,7 @@ static inline int bkey_cmp_p_or_unp(const struct btree *b,
EBUG_ON(r_packed && !bkey_packed(r_packed));
if (unlikely(!bkey_packed(l)))
return bkey_cmp(packed_to_bkey_c(l)->p, *r);
return bpos_cmp(packed_to_bkey_c(l)->p, *r);
if (likely(r_packed))
return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
......@@ -418,24 +418,6 @@ bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
return bch2_bkey_prev_filter(b, t, k, 1);
}
enum bch_extent_overlap {
BCH_EXTENT_OVERLAP_ALL = 0,
BCH_EXTENT_OVERLAP_BACK = 1,
BCH_EXTENT_OVERLAP_FRONT = 2,
BCH_EXTENT_OVERLAP_MIDDLE = 3,
};
/* Returns how k overlaps with m */
static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
const struct bkey *m)
{
int cmp1 = bkey_cmp(k->p, m->p) < 0;
int cmp2 = bkey_cmp(bkey_start_pos(k),
bkey_start_pos(m)) > 0;
return (cmp1 << 1) + cmp2;
}
/* Btree key iteration */
void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
......
......@@ -821,9 +821,9 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
EBUG_ON(b->c.btree_id != iter->btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
EBUG_ON(bkey_cmp(b->data->max_key, k->k.p));
EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
bkey_cmp(b->data->min_key,
bpos_cmp(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
return b;
......@@ -904,9 +904,9 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
EBUG_ON(b->c.btree_id != btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
EBUG_ON(bkey_cmp(b->data->max_key, k->k.p));
EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
bkey_cmp(b->data->min_key,
bpos_cmp(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
out:
bch2_btree_cache_cannibalize_unlock(c);
......@@ -1018,7 +1018,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
if (sib != btree_prev_sib)
swap(n1, n2);
if (bkey_cmp(bkey_successor(n1->key.k.p),
if (bpos_cmp(bkey_successor(n1->key.k.p),
n2->data->min_key)) {
char buf1[200], buf2[200];
......
......@@ -81,7 +81,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
}
if (fsck_err_on(bkey_cmp(expected_start, bp->v.min_key), c,
if (fsck_err_on(bpos_cmp(expected_start, bp->v.min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" cur %s",
......@@ -92,7 +92,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
}
if (fsck_err_on(is_last &&
bkey_cmp(cur.k->k.p, node_end), c,
bpos_cmp(cur.k->k.p, node_end), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
......@@ -489,8 +489,8 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
BUG_ON(bkey_cmp(k.k->p, b->data->min_key) < 0);
BUG_ON(bkey_cmp(k.k->p, b->data->max_key) > 0);
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
&k, &max_stale, true);
......@@ -581,13 +581,13 @@ static int bch2_gc_btree_init(struct bch_fs *c,
return 0;
six_lock_read(&b->c.lock, NULL, NULL);
if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c,
if (fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %s",
(bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
BUG();
}
if (fsck_err_on(bkey_cmp(b->data->max_key, POS_MAX), c,
if (fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c,
"btree root with incorrect max_key: %s",
(bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
BUG();
......@@ -1448,7 +1448,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
unsigned j;
for (j = 0; j < nr_new_nodes; j++)
if (!bkey_cmp(old_nodes[i]->key.k.p,
if (!bpos_cmp(old_nodes[i]->key.k.p,
new_nodes[j]->key.k.p))
goto next;
......
......@@ -45,13 +45,9 @@ static inline struct gc_pos gc_phase(enum gc_phase phase)
static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
{
if (l.phase != r.phase)
return l.phase < r.phase ? -1 : 1;
if (bkey_cmp(l.pos, r.pos))
return bkey_cmp(l.pos, r.pos);
if (l.level != r.level)
return l.level < r.level ? -1 : 1;
return 0;
return cmp_int(l.phase, r.phase) ?:
bpos_cmp(l.pos, r.pos) ?:
cmp_int(l.level, r.level);
}
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
......
......@@ -38,7 +38,7 @@ static void verify_no_dups(struct btree *b,
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
BUG_ON(bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
}
#endif
}
......@@ -631,14 +631,14 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
b->data->max_key = b->key.k.p;
}
btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
"incorrect min_key: got %s should be %s",
(bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
(bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
}
btree_err_on(bkey_cmp(bn->max_key, b->key.k.p),
btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, ca, b, i,
"incorrect max key %s",
(bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
......
......@@ -220,7 +220,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
bkey_cmp(bn->min_key, POS_MIN) &&
bpos_cmp(bn->min_key, POS_MIN) &&
write)
bn->min_key = bkey_predecessor(bn->min_key);
......@@ -229,7 +229,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
bkey_cmp(bn->min_key, POS_MIN) &&
bpos_cmp(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bkey_successor(bn->min_key);
}
......
......@@ -37,13 +37,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(iter->real_pos, b->data->min_key) < 0;
return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
}
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(b->key.k.p, iter->real_pos) < 0;
return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
......@@ -293,7 +293,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
btree_iter_type(linked))) <= 0) {
deadlock_iter = linked;
reason = 7;
......@@ -1392,7 +1392,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
if (!b)
return NULL;
BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
iter->pos = iter->real_pos = b->key.k.p;
......@@ -1429,7 +1429,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
if (!b)
return NULL;
if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
/*
* Haven't gotten to the end of the parent node: go back down to
* the next child node
......@@ -1461,7 +1461,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
{
int cmp = bkey_cmp(new_pos, iter->real_pos);
int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level;
if (!cmp)
......@@ -1505,7 +1505,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = bkey_cmp(pos, POS_MAX) != 0;
bool ret = bpos_cmp(pos, POS_MAX) != 0;
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(pos);
......@@ -1516,7 +1516,7 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
bool ret = bkey_cmp(pos, POS_MIN) != 0;
bool ret = bpos_cmp(pos, POS_MIN) != 0;
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(pos);
......@@ -1527,7 +1527,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
{
struct bpos next_pos = iter->l[0].b->key.k.p;
bool ret = bkey_cmp(next_pos, POS_MAX) != 0;
bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
/*
* Typically, we don't want to modify iter->pos here, since that
......@@ -1545,7 +1545,7 @@ static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
{
struct bpos next_pos = iter->l[0].b->data->min_key;
bool ret = bkey_cmp(next_pos, POS_MIN) != 0;
bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
if (ret)
btree_iter_set_search_pos(iter, bkey_predecessor(next_pos));
......
......@@ -21,7 +21,7 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
const struct bkey_cached_key *key = arg->key;
return cmp_int(ck->key.btree_id, key->btree_id) ?:
bkey_cmp(ck->key.pos, key->pos);
bpos_cmp(ck->key.pos, key->pos);
}
static const struct rhashtable_params bch2_btree_key_cache_params = {
......@@ -252,7 +252,7 @@ static int bkey_cached_check_fn(struct six_lock *lock, void *p)
const struct btree_iter *iter = p;
return ck->key.btree_id == iter->btree_id &&
!bkey_cmp(ck->key.pos, iter->pos) ? 0 : -1;
!bpos_cmp(ck->key.pos, iter->pos) ? 0 : -1;
}
__flatten
......@@ -293,7 +293,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
bkey_cached_check_fn, iter, _THIS_IP_)) {
if (ck->key.btree_id != iter->btree_id ||
bkey_cmp(ck->key.pos, iter->pos)) {
bpos_cmp(ck->key.pos, iter->pos)) {
goto retry;
}
......@@ -303,7 +303,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
}
if (ck->key.btree_id != iter->btree_id ||
bkey_cmp(ck->key.pos, iter->pos)) {
bpos_cmp(ck->key.pos, iter->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
......
......@@ -50,7 +50,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
break;
bp = bkey_s_c_to_btree_ptr_v2(k);
if (bkey_cmp(next_node, bp.v->min_key)) {
if (bpos_cmp(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b);
panic("expected next min_key %s got %s\n",
(bch2_bpos_to_text(&PBUF(buf1), next_node), buf1),
......@@ -60,7 +60,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) {
if (bkey_cmp(k.k->p, b->key.k.p)) {
if (bpos_cmp(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b);
panic("expected end %s got %s\n",
(bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1),
......
......@@ -26,7 +26,7 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
{
return cmp_int(l->btree_id, r->btree_id) ?:
-cmp_int(l->level, r->level) ?:
bkey_cmp(l->k->k.p, r->k->k.p);
bpos_cmp(l->k->k.p, r->k->k.p);
}
static inline bool same_leaf_as_prev(struct btree_trans *trans,
......@@ -70,8 +70,8 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
EBUG_ON(bkey_cmp(insert->k.p, b->data->min_key) < 0);
EBUG_ON(bkey_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(iter->trans->c, b));
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
......@@ -225,7 +225,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(bch2_debug_check_bkeys &&
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type));
BUG_ON(bkey_cmp(i->k->k.p, i->iter->real_pos));
BUG_ON(bpos_cmp(i->k->k.p, i->iter->real_pos));
BUG_ON(i->level != i->iter->level);
BUG_ON(i->btree_id != i->iter->btree_id);
}
......
......@@ -273,7 +273,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (err)
return err;
if (!i->size || !bkey_cmp(POS_MAX, i->from))
if (!i->size || !bpos_cmp(POS_MAX, i->from))
return i->ret;
bch2_trans_init(&trans, i->c, 0, 0);
......@@ -289,7 +289,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
* can't easily correctly restart a btree node traversal across
* all nodes, meh
*/
i->from = bkey_cmp(POS_MAX, b->key.k.p)
i->from = bpos_cmp(POS_MAX, b->key.k.p)
? bkey_successor(b->key.k.p)
: b->key.k.p;
......
......@@ -582,6 +582,24 @@ void bch2_ptr_swab(struct bkey_s);
/* Generic extent code: */
enum bch_extent_overlap {
BCH_EXTENT_OVERLAP_ALL = 0,
BCH_EXTENT_OVERLAP_BACK = 1,
BCH_EXTENT_OVERLAP_FRONT = 2,
BCH_EXTENT_OVERLAP_MIDDLE = 3,
};
/* Returns how k overlaps with m */
static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
const struct bkey *m)
{
int cmp1 = bkey_cmp(k->p, m->p) < 0;
int cmp2 = bkey_cmp(bkey_start_pos(k),
bkey_start_pos(m)) > 0;
return (cmp1 << 1) + cmp2;
}
int bch2_cut_front_s(struct bpos, struct bkey_s);
int bch2_cut_back_s(struct bpos, struct bkey_s);
......
......@@ -48,14 +48,14 @@ static int __journal_key_cmp(enum btree_id l_btree_id,
{
return (cmp_int(l_btree_id, r->btree_id) ?:
cmp_int(l_level, r->level) ?:
bkey_cmp(l_pos, r->k->k.p));
bpos_cmp(l_pos, r->k->k.p));
}
static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
{
return (cmp_int(l->btree_id, r->btree_id) ?:
cmp_int(l->level, r->level) ?:
bkey_cmp(l->k->k.p, r->k->k.p));
bpos_cmp(l->k->k.p, r->k->k.p));
}
static size_t journal_key_search(struct journal_keys *journal_keys,
......@@ -90,7 +90,7 @@ static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsign
if (iter->idx > idx ||
(iter->idx == idx &&
biter->last &&
bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
iter->idx++;
}
......@@ -238,7 +238,7 @@ struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *
bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
if (btree_k.k && journal_k.k) {
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
if (!cmp)
bch2_journal_iter_advance_btree(iter);
......@@ -256,7 +256,7 @@ struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *
ret = iter->last == journal ? journal_k : btree_k;
if (iter->b &&
bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
iter->journal.idx = iter->journal.keys->nr;
iter->last = none;
return bkey_s_c_null;
......@@ -419,7 +419,7 @@ static int journal_sort_key_cmp(const void *_l, const void *_r)
return cmp_int(l->btree_id, r->btree_id) ?:
cmp_int(l->level, r->level) ?:
bkey_cmp(l->k->k.p, r->k->k.p) ?:
bpos_cmp(l->k->k.p, r->k->k.p) ?:
cmp_int(l->journal_seq, r->journal_seq) ?:
cmp_int(l->journal_offset, r->journal_offset);
}
......@@ -490,7 +490,7 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
while (src + 1 < keys.d + keys.nr &&
src[0].btree_id == src[1].btree_id &&
src[0].level == src[1].level &&
!bkey_cmp(src[0].k->k.p, src[1].k->k.p))
!bpos_cmp(src[0].k->k.p, src[1].k->k.p))
src++;
*dst++ = *src++;
......@@ -581,7 +581,7 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
return cmp_int(r->level, l->level) ?:
cmp_int(l->journal_seq, r->journal_seq) ?:
cmp_int(l->btree_id, r->btree_id) ?:
bkey_cmp(l->k->k.p, r->k->k.p);
bpos_cmp(l->k->k.p, r->k->k.p);
}
static int bch2_journal_replay(struct bch_fs *c,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment