Commit 39fb2983 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill bkey_type_successor

Previously, BTREE_ID_INODES was special - inodes were indexed by the
inode field, which meant the offset field of struct bpos wasn't used,
which led to special cases in e.g. the btree iterator code.

Now, inodes in the inodes btree are indexed by the offset field.

Also: prevously min_key was special for extents btrees, min_key for
extents would equal max_key for the previous node. Now, min_key =
bkey_successor() of the previous node, same as non extent btrees.

This means we can completely get rid of
btree_type_sucessor/predecessor.

Also make some improvements to the metadata IO validate/compat code.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b72633ae
......@@ -1160,7 +1160,8 @@ enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
bcachefs_metadata_version_new_versioning = 10,
bcachefs_metadata_version_bkey_renumber = 10,
bcachefs_metadata_version_max = 11,
bcachefs_metadata_version_inode_btree_change = 11,
bcachefs_metadata_version_max = 12,
};
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
......
......@@ -273,3 +273,59 @@ void bch2_bkey_renumber(enum btree_node_type btree_node_type,
break;
}
}
void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
int write,
struct bkey_format *f,
struct bkey_packed *k)
{
const struct bkey_ops *ops;
struct bkey uk;
struct bkey_s u;
if (big_endian != CPU_BIG_ENDIAN)
bch2_bkey_swab_key(f, k);
if (version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES) {
if (!bkey_packed(k)) {
struct bkey_i *u = packed_to_bkey(k);
swap(u->k.p.inode, u->k.p.offset);
} else if (f->bits_per_field[BKEY_FIELD_INODE] &&
f->bits_per_field[BKEY_FIELD_OFFSET]) {
struct bkey_format tmp = *f, *in = f, *out = &tmp;
swap(tmp.bits_per_field[BKEY_FIELD_INODE],
tmp.bits_per_field[BKEY_FIELD_OFFSET]);
swap(tmp.field_offset[BKEY_FIELD_INODE],
tmp.field_offset[BKEY_FIELD_OFFSET]);
if (!write)
swap(in, out);
uk = __bch2_bkey_unpack_key(in, k);
swap(uk.p.inode, uk.p.offset);
BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
}
}
if (!bkey_packed(k)) {
u = bkey_i_to_s(packed_to_bkey(k));
} else {
uk = __bch2_bkey_unpack_key(f, k);
u.k = &uk;
u.v = bkeyp_val(f, k);
}
if (big_endian != CPU_BIG_ENDIAN)
bch2_bkey_swab_val(u);
ops = &bch2_bkey_ops[k->type];
if (ops->compat)
ops->compat(btree_id, version, big_endian, write, u);
}
......@@ -33,6 +33,9 @@ struct bkey_ops {
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
enum merge_result (*key_merge)(struct bch_fs *,
struct bkey_s, struct bkey_s);
void (*compat)(enum btree_id id, unsigned version,
unsigned big_endian, int write,
struct bkey_s);
};
const char *bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c);
......@@ -60,4 +63,20 @@ enum merge_result bch2_bkey_merge(struct bch_fs *,
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
void __bch2_bkey_compat(unsigned, enum btree_id, unsigned, unsigned,
int, struct bkey_format *, struct bkey_packed *);
static inline void bch2_bkey_compat(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
int write,
struct bkey_format *f,
struct bkey_packed *k)
{
if (version < bcachefs_metadata_version_current ||
big_endian != CPU_BIG_ENDIAN)
__bch2_bkey_compat(level, btree_id, version,
big_endian, write, f, k);
}
#endif /* _BCACHEFS_BKEY_METHODS_H */
......@@ -924,8 +924,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
if (sib != btree_prev_sib)
swap(n1, n2);
BUG_ON(bkey_cmp(btree_type_successor(n1->c.btree_id,
n1->key.k.p),
BUG_ON(bkey_cmp(bkey_successor(n1->key.k.p),
n2->data->min_key));
}
......
......@@ -74,7 +74,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
struct range_level *l = &r->l[b->c.level];
struct bpos expected_min = bkey_cmp(l->min, l->max)
? btree_type_successor(b->c.btree_id, l->max)
? bkey_successor(l->max)
: l->max;
bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
......@@ -105,8 +105,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
if (bkey_cmp(b->data->max_key, POS_MAX))
l->min = l->max =
btree_type_successor(b->c.btree_id,
b->data->max_key);
bkey_successor(b->data->max_key);
}
}
......@@ -987,9 +986,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
n1->key.k.p = n1->data->max_key =
bkey_unpack_pos(n1, last);
n2->data->min_key =
btree_type_successor(iter->btree_id,
n1->data->max_key);
n2->data->min_key = bkey_successor(n1->data->max_key);
memcpy_u64s(vstruct_last(s1),
s2->start, u64s);
......
......@@ -709,83 +709,107 @@ out: \
static int validate_bset(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors,
unsigned *whiteout_u64s, int write,
bool have_retry)
int write, bool have_retry)
{
struct bkey_packed *k, *prev = NULL;
bool seen_non_whiteout = false;
unsigned version;
unsigned version = le16_to_cpu(i->version);
const char *err;
int ret = 0;
btree_err_on((version != BCH_BSET_VERSION_OLD &&
version < bcachefs_metadata_version_min) ||
version >= bcachefs_metadata_version_max,
BTREE_ERR_FATAL, c, b, i,
"unsupported bset version");
if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
BTREE_ERR_FIXABLE, c, b, i,
"bset past end of btree node")) {
i->u64s = 0;
return 0;
}
btree_err_on(b->written && !i->u64s,
BTREE_ERR_FIXABLE, c, b, i,
"empty bset");
if (!b->written) {
struct btree_node *bn =
container_of(i, struct btree_node, keys);
/* These indicate that we read the wrong btree node: */
btree_err_on(BTREE_NODE_ID(b->data) != b->c.btree_id,
btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect btree id");
btree_err_on(BTREE_NODE_LEVEL(b->data) != b->c.level,
btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect level");
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
u64 *p = (u64 *) &b->data->ptr;
u64 *p = (u64 *) &bn->ptr;
*p = swab64(*p);
bch2_bpos_swab(&b->data->min_key);
bch2_bpos_swab(&b->data->max_key);
}
if (!write)
compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
struct bch_btree_ptr_v2 *bp =
&bkey_i_to_btree_ptr_v2(&b->key)->v;
btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, b, NULL,
"incorrect min_key");
"incorrect min_key: got %llu:%llu should be %llu:%llu",
b->data->min_key.inode,
b->data->min_key.offset,
bp->min_key.inode,
bp->min_key.offset);
}
btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
btree_err_on(bkey_cmp(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect max key");
if (write)
compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
/* XXX: ideally we would be validating min_key too */
#if 0
/*
* not correct anymore, due to btree node write error
* handling
*
* need to add b->data->seq to btree keys and verify
* need to add bn->seq to btree keys and verify
* against that
*/
btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
b->data->ptr),
bn->ptr),
BTREE_ERR_FATAL, c, b, i,
"incorrect backpointer");
#endif
err = bch2_bkey_format_validate(&b->data->format);
err = bch2_bkey_format_validate(&bn->format);
btree_err_on(err,
BTREE_ERR_FATAL, c, b, i,
"invalid bkey format: %s", err);
}
version = le16_to_cpu(i->version);
btree_err_on((version != BCH_BSET_VERSION_OLD &&
version < bcachefs_metadata_version_min) ||
version >= bcachefs_metadata_version_max,
BTREE_ERR_FATAL, c, b, i,
"unsupported bset version");
if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
BTREE_ERR_FIXABLE, c, b, i,
"bset past end of btree node")) {
i->u64s = 0;
return 0;
compat_bformat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&bn->format);
}
fsck_err:
return ret;
}
btree_err_on(b->written && !i->u64s,
BTREE_ERR_FIXABLE, c, b, i,
"empty bset");
static int validate_bset_keys(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned *whiteout_u64s,
int write, bool have_retry)
{
unsigned version = le16_to_cpu(i->version);
struct bkey_packed *k, *prev = NULL;
bool seen_non_whiteout = false;
int ret = 0;
if (!BSET_SEPARATE_WHITEOUTS(i)) {
seen_non_whiteout = true;
......@@ -814,18 +838,14 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
continue;
}
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
bch2_bkey_swab_key(&b->format, k);
if (!write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(btree_node_type(b), k, write);
/* XXX: validate k->u64s */
if (!write)
bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
u = __bkey_disassemble(b, k, &tmp);
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
bch2_bkey_swab_val(u);
invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
bch2_bkey_in_btree_node(b, u.s_c) ?:
(write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
......@@ -842,9 +862,10 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
continue;
}
if (write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(btree_node_type(b), k, write);
if (write)
bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
/*
* with the separate whiteouts thing (used for extents), the
......@@ -875,8 +896,6 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
prev = k;
k = bkey_next_skip_noops(k, vstruct_last(i));
}
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
fsck_err:
return ret;
}
......@@ -944,8 +963,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry
set_btree_node_old_extent_overwrite(b);
sectors = vstruct_sectors(b->data, c->block_bits);
btree_node_set_format(b, b->data->format);
} else {
bne = write_block(b);
i = &bne->keys;
......@@ -969,11 +986,21 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry
sectors = vstruct_sectors(bne, c->block_bits);
}
ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
ret = validate_bset(c, b, i, sectors,
READ, have_retry);
if (ret)
goto fsck_err;
if (!b->written)
btree_node_set_format(b, b->data->format);
ret = validate_bset_keys(c, b, i, &whiteout_u64s,
READ, have_retry);
if (ret)
goto fsck_err;
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
b->written += sectors;
blacklisted = bch2_journal_seq_is_blacklisted(c,
......@@ -1416,7 +1443,8 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
return -1;
ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
ret = validate_bset(c, b, i, sectors, WRITE, false) ?:
validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false);
if (ret)
bch2_inconsistent_error(c);
......@@ -1566,8 +1594,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
validate_before_checksum = true;
/* validate_bset will be modifying: */
if (le16_to_cpu(i->version) <
bcachefs_metadata_version_bkey_renumber)
if (le16_to_cpu(i->version) < bcachefs_metadata_version_max)
validate_before_checksum = true;
/* if we're going to be encrypting, check metadata validity first: */
......
......@@ -2,6 +2,7 @@
#ifndef _BCACHEFS_BTREE_IO_H
#define _BCACHEFS_BTREE_IO_H
#include "bkey_methods.h"
#include "bset.h"
#include "btree_locking.h"
#include "extents.h"
......@@ -140,4 +141,50 @@ void bch2_btree_flush_all_writes(struct bch_fs *);
void bch2_btree_verify_flushed(struct bch_fs *);
ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *, char *);
static inline void compat_bformat(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
int write, struct bkey_format *f)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES) {
swap(f->bits_per_field[BKEY_FIELD_INODE],
f->bits_per_field[BKEY_FIELD_OFFSET]);
swap(f->field_offset[BKEY_FIELD_INODE],
f->field_offset[BKEY_FIELD_OFFSET]);
}
}
static inline void compat_bpos(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
int write, struct bpos *p)
{
if (big_endian != CPU_BIG_ENDIAN)
bch2_bpos_swab(p);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES)
swap(p->inode, p->offset);
}
static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
int write,
struct btree_node *bn)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
bkey_cmp(bn->min_key, POS_MIN) &&
write)
bn->min_key = bkey_predecessor(bn->min_key);
compat_bpos(level, btree_id, version, big_endian, write, &bn->min_key);
compat_bpos(level, btree_id, version, big_endian, write, &bn->max_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
bkey_cmp(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bkey_successor(bn->min_key);
}
#endif /* _BCACHEFS_BTREE_IO_H */
......@@ -39,7 +39,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(iter->pos, b->data->min_key) < 0;
return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0;
}
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
......@@ -1284,10 +1284,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
if (btree_node_read_locked(iter, iter->level))
btree_node_unlock(iter, iter->level);
/* ick: */
iter->pos = iter->btree_id == BTREE_ID_INODES
? btree_type_successor(iter->btree_id, iter->pos)
: bkey_successor(iter->pos);
iter->pos = bkey_successor(iter->pos);
iter->level = iter->min_depth;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
......@@ -1395,8 +1392,8 @@ static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
iter->k.p = iter->pos = l->b->key.k.p;
ret = bkey_cmp(iter->pos, POS_MAX) != 0;
if (ret)
iter->k.p = iter->pos = btree_type_successor(iter->btree_id, iter->pos);
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
iter->k.p = iter->pos = bkey_successor(iter->pos);
btree_iter_pos_changed(iter, 1);
return ret;
......@@ -1412,8 +1409,12 @@ static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
ret = bkey_cmp(iter->pos, POS_MIN) != 0;
if (ret)
iter->k.p = iter->pos = btree_type_predecessor(iter->btree_id, iter->pos);
if (ret) {
iter->k.p = iter->pos = bkey_predecessor(iter->pos);
if (iter->flags & BTREE_ITER_IS_EXTENTS)
iter->k.p = iter->pos = bkey_predecessor(iter->pos);
}
btree_iter_pos_changed(iter, -1);
return ret;
......@@ -1500,7 +1501,9 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
return bkey_s_c_null;
bch2_btree_iter_set_pos(iter,
btree_type_successor(iter->btree_id, iter->k.p));
(iter->flags & BTREE_ITER_IS_EXTENTS)
? iter->k.p
: bkey_successor(iter->k.p));
return bch2_btree_iter_peek(iter);
}
......@@ -1553,7 +1556,9 @@ struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
if (k.k && bkey_deleted(k.k)) {
bch2_btree_iter_set_pos(iter,
btree_type_successor(iter->btree_id, iter->k.p));
(iter->flags & BTREE_ITER_IS_EXTENTS)
? iter->k.p
: bkey_successor(iter->k.p));
continue;
}
......@@ -1582,7 +1587,9 @@ struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
return bkey_s_c_null;
bch2_btree_iter_set_pos(iter,
btree_type_successor(iter->btree_id, iter->k.p));
(iter->flags & BTREE_ITER_IS_EXTENTS)
? iter->k.p
: bkey_successor(iter->k.p));
return bch2_btree_iter_peek_with_updates(iter);
}
......@@ -1749,7 +1756,9 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
return bkey_s_c_null;
bch2_btree_iter_set_pos(iter,
btree_type_successor(iter->btree_id, iter->k.p));
(iter->flags & BTREE_ITER_IS_EXTENTS)
? iter->k.p
: bkey_successor(iter->k.p));
return bch2_btree_iter_peek_slot(iter);
}
......
......@@ -172,32 +172,6 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void __bch2_btree_iter_set_pos(struct btree_iter *, struct bpos, bool);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
static inline struct bpos btree_type_successor(enum btree_id id,
struct bpos pos)
{
if (id == BTREE_ID_INODES) {
pos.inode++;
pos.offset = 0;
} else if (!btree_node_type_is_extents(id)) {
pos = bkey_successor(pos);
}
return pos;
}
static inline struct bpos btree_type_predecessor(enum btree_id id,
struct bpos pos)
{
if (id == BTREE_ID_INODES) {
--pos.inode;
pos.offset = 0;
} else {
pos = bkey_predecessor(pos);
}
return pos;
}
static inline int __btree_iter_cmp(enum btree_id id,
struct bpos pos,
const struct btree_iter *r)
......
......@@ -1193,7 +1193,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
BUG_ON(!prev);
btree_set_max(n1, bkey_unpack_pos(n1, prev));
btree_set_min(n2, btree_type_successor(n1->c.btree_id, n1->key.k.p));
btree_set_min(n2, bkey_successor(n1->key.k.p));
set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
......
......@@ -58,8 +58,11 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
bkey_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(bkey_cmp(b->data->min_key, POS_MIN) &&
bkey_cmp(bkey_start_pos(&insert->k),
bkey_predecessor(b->data->min_key)) < 0);
EBUG_ON(bkey_cmp(insert->k.p, b->data->min_key) < 0);
EBUG_ON(bkey_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(iter->trans->c, b));
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
......
......@@ -115,7 +115,9 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
b = iter->l[0].b;
node_iter = iter->l[0].iter;
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
BUG_ON(bkey_cmp(b->data->min_key, POS_MIN) &&
bkey_cmp(bkey_start_pos(&insert->k),
bkey_predecessor(b->data->min_key)) < 0);
*end = bpos_min(insert->k.p, b->key.k.p);
......
......@@ -9,6 +9,7 @@
#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "buckets.h"
#include "checksum.h"
......@@ -213,6 +214,22 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
bch2_bkey_ptrs_to_text(out, c, k);
}
void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
unsigned big_endian, int write,
struct bkey_s k)
{
struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
bkey_cmp(bp.v->min_key, POS_MIN))
bp.v->min_key = write
? bkey_predecessor(bp.v->min_key)
: bkey_successor(bp.v->min_key);
}
/* KEY_TYPE_extent: */
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
......
......@@ -371,6 +371,8 @@ const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_debugcheck(struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
int, struct bkey_s);
#define bch2_bkey_ops_btree_ptr (struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_invalid, \
......@@ -384,6 +386,7 @@ void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
.key_debugcheck = bch2_btree_ptr_debugcheck, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
}
/* KEY_TYPE_extent: */
......
......@@ -1038,12 +1038,12 @@ static int check_directory_structure(struct bch_fs *c,
if (!ret)
continue;
if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.inode), c,
if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.offset), c,
"unreachable directory found (inum %llu)",
k.k->p.inode)) {
k.k->p.offset)) {
bch2_trans_unlock(&trans);
ret = reattach_inode(c, lostfound_inode, k.k->p.inode);
ret = reattach_inode(c, lostfound_inode, k.k->p.offset);
if (ret) {
goto err;
}
......
......@@ -98,7 +98,7 @@ void bch2_inode_pack(struct bkey_inode_buf *packed,
unsigned bytes;
bkey_inode_init(&packed->inode.k_i);
packed->inode.k.p.inode = inode->bi_inum;
packed->inode.k.p.offset = inode->bi_inum;
packed->inode.v.bi_hash_seed = inode->bi_hash_seed;
packed->inode.v.bi_flags = cpu_to_le32(inode->bi_flags);
packed->inode.v.bi_mode = cpu_to_le16(inode->bi_mode);
......@@ -149,7 +149,7 @@ int bch2_inode_unpack(struct bkey_s_c_inode inode,
unsigned fieldnr = 0, field_bits;
int ret;
unpacked->bi_inum = inode.k->p.inode;
unpacked->bi_inum = inode.k->p.offset;
unpacked->bi_hash_seed = inode.v->bi_hash_seed;
unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags);
unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
......@@ -188,7 +188,7 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(inum, 0),
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(0, inum),
BTREE_ITER_SLOTS|flags);
if (IS_ERR(iter))
return iter;
......@@ -232,13 +232,13 @@ const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k)
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked unpacked;
if (k.k->p.offset)
return "nonzero offset";
if (k.k->p.inode)
return "nonzero k.p.inode";
if (bkey_val_bytes(k.k) < sizeof(struct bch_inode))
return "incorrect value size";
if (k.k->p.inode < BLOCKDEV_INODE_MAX)
if (k.k->p.offset < BLOCKDEV_INODE_MAX)
return "fs inode in blockdev range";
if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR)
......@@ -280,8 +280,8 @@ void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c,
const char *bch2_inode_generation_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
if (k.k->p.offset)
return "nonzero offset";
if (k.k->p.inode)
return "nonzero k.p.inode";
if (bkey_val_bytes(k.k) != sizeof(struct bch_inode_generation))
return "incorrect value size";
......@@ -383,9 +383,9 @@ int bch2_inode_create(struct btree_trans *trans,
if (IS_ERR(inode_p))
return PTR_ERR(inode_p);
again:
for_each_btree_key(trans, iter, BTREE_ID_INODES, POS(start, 0),
for_each_btree_key(trans, iter, BTREE_ID_INODES, POS(0, start),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode > max)
if (bkey_cmp(iter->pos, POS(0, max)) > 0)
break;
if (k.k->type != KEY_TYPE_inode)
......@@ -405,8 +405,8 @@ int bch2_inode_create(struct btree_trans *trans,
return -ENOSPC;
found_slot:
*hint = k.k->p.inode;
inode_u->bi_inum = k.k->p.inode;
*hint = k.k->p.offset;
inode_u->bi_inum = k.k->p.offset;
inode_u->bi_generation = bkey_generation(k);
bch2_inode_pack(inode_p, inode_u);
......@@ -443,7 +443,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(inode_nr, 0),
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(0, inode_nr),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
do {
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
......@@ -475,10 +475,10 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
if (!bi_generation) {
bkey_init(&delete.k);
delete.k.p.inode = inode_nr;
delete.k.p.offset = inode_nr;
} else {
bkey_inode_generation_init(&delete.k_i);
delete.k.p.inode = inode_nr;
delete.k.p.offset = inode_nr;
delete.v.bi_generation = cpu_to_le32(bi_generation);
}
......@@ -500,7 +500,7 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr,
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
POS(inode_nr, 0), BTREE_ITER_SLOTS);
POS(0, inode_nr), BTREE_ITER_SLOTS);
if (IS_ERR(iter))
return PTR_ERR(iter);
......
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "btree_io.h"
#include "buckets.h"
#include "checksum.h"
#include "error.h"
......@@ -137,7 +138,8 @@ static void journal_entry_null_range(void *start, void *end)
static int journal_validate_key(struct bch_fs *c, struct jset *jset,
struct jset_entry *entry,
struct bkey_i *k, enum btree_node_type key_type,
unsigned level, enum btree_id btree_id,
struct bkey_i *k,
const char *type, int write)
{
void *next = vstruct_next(entry);
......@@ -170,16 +172,13 @@ static int journal_validate_key(struct bch_fs *c, struct jset *jset,
return 0;
}
if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN) {
bch2_bkey_swab_key(NULL, bkey_to_packed(k));
bch2_bkey_swab_val(bkey_i_to_s(k));
}
if (!write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
if (!write)
bch2_bkey_compat(level, btree_id, version,
JSET_BIG_ENDIAN(jset), write,
NULL, bkey_to_packed(k));
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
__btree_node_type(level, btree_id));
if (invalid) {
char buf[160];
......@@ -193,9 +192,10 @@ static int journal_validate_key(struct bch_fs *c, struct jset *jset,
return 0;
}
if (write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
if (write)
bch2_bkey_compat(level, btree_id, version,
JSET_BIG_ENDIAN(jset), write,
NULL, bkey_to_packed(k));
fsck_err:
return ret;
}
......@@ -208,10 +208,10 @@ static int journal_entry_validate_btree_keys(struct bch_fs *c,
struct bkey_i *k;
vstruct_for_each(entry, k) {
int ret = journal_validate_key(c, jset, entry, k,
__btree_node_type(entry->level,
entry->btree_id),
"key", write);
int ret = journal_validate_key(c, jset, entry,
entry->level,
entry->btree_id,
k, "key", write);
if (ret)
return ret;
}
......@@ -241,7 +241,7 @@ static int journal_entry_validate_btree_root(struct bch_fs *c,
return 0;
}
return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
"btree root", write);
fsck_err:
return ret;
......@@ -1017,8 +1017,7 @@ void bch2_journal_write(struct closure *cl)
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
validate_before_checksum = true;
if (le32_to_cpu(jset->version) <
bcachefs_metadata_version_bkey_renumber)
if (le32_to_cpu(jset->version) < bcachefs_metadata_version_max)
validate_before_checksum = true;
if (validate_before_checksum &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment