Commit 26609b61 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Make bkey types globally unique

this lets us get rid of a lot of extra switch statements - in a lot of
places we dispatch on the btree node type, and then the key type, so
this is a nice cleanup across a lot of code.

Also improve the on disk format versioning stuff.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 01a0108f
......@@ -24,9 +24,9 @@ static inline int acl_to_xattr_type(int type)
{
switch (type) {
case ACL_TYPE_ACCESS:
return BCH_XATTR_INDEX_POSIX_ACL_ACCESS;
return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
case ACL_TYPE_DEFAULT:
return BCH_XATTR_INDEX_POSIX_ACL_DEFAULT;
return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
default:
BUG();
}
......@@ -355,7 +355,7 @@ int bch2_acl_chmod(struct btree_trans *trans,
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
&inode->ei_str_hash, inode->v.i_ino,
&X_SEARCH(BCH_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
&X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
BTREE_ITER_INTENT);
if (IS_ERR(iter))
return PTR_ERR(iter) != -ENOENT ? PTR_ERR(iter) : 0;
......
......@@ -76,22 +76,15 @@ static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
if (k.k->p.inode >= c->sb.nr_devices ||
!c->devs[k.k->p.inode])
return "invalid device";
switch (k.k->type) {
case BCH_ALLOC: {
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
/* allow for unknown fields */
if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
return "incorrect value size";
break;
}
default:
return "invalid type";
}
/* allow for unknown fields */
if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
return "incorrect value size";
return NULL;
}
......@@ -99,14 +92,9 @@ const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
switch (k.k->type) {
case BCH_ALLOC: {
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
pr_buf(out, "gen %u", a.v->gen);
break;
}
}
pr_buf(out, "gen %u", a.v->gen);
}
static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
......@@ -158,7 +146,7 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
struct bucket *g;
const u8 *d;
if (k.k->type != BCH_ALLOC)
if (k.k->type != KEY_TYPE_alloc)
return;
a = bkey_s_c_to_alloc(k);
......
......@@ -11,7 +11,7 @@
const char *bch2_alloc_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_alloc_ops (struct bkey_ops) { \
#define bch2_bkey_ops_alloc (struct bkey_ops) { \
.key_invalid = bch2_alloc_invalid, \
.val_to_text = bch2_alloc_to_text, \
}
......
......@@ -923,7 +923,8 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
* as allocated out of @ob
*/
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
struct bkey_i_extent *e, unsigned sectors)
struct bkey_i *k, unsigned sectors)
{
struct open_bucket *ob;
unsigned i;
......@@ -935,13 +936,11 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
struct bch_extent_ptr tmp = ob->ptr;
EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
tmp.cached = bkey_extent_is_cached(&e->k) ||
(!ca->mi.durability && wp->type == BCH_DATA_USER);
tmp.cached = !ca->mi.durability &&
wp->type == BCH_DATA_USER;
tmp.offset += ca->mi.bucket_size - ob->sectors_free;
extent_ptr_append(e, tmp);
bch2_bkey_append_ptr(k, tmp);
BUG_ON(sectors > ob->sectors_free);
ob->sectors_free -= sectors;
......
......@@ -101,7 +101,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *,
struct closure *);
void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
struct bkey_i_extent *, unsigned);
struct bkey_i *, unsigned);
void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
void bch2_open_buckets_stop_dev(struct bch_fs *, struct bch_dev *,
......
......@@ -541,6 +541,7 @@ struct bch_fs {
__uuid_t uuid;
__uuid_t user_uuid;
u16 version;
u16 encoded_extent_max;
u8 nr_devices;
......
......@@ -307,15 +307,6 @@ static inline void bkey_init(struct bkey *k)
#define __BKEY_PADDED(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
#define BKEY_VAL_TYPE(name, nr) \
struct bkey_i_##name { \
union { \
struct bkey k; \
struct bkey_i k_i; \
}; \
struct bch_##name v; \
}
/*
* - DELETED keys are used internally to mark keys that should be ignored but
* override keys in composition order. Their version number is ignored.
......@@ -330,19 +321,37 @@ struct bkey_i_##name { \
* by new writes or cluster-wide GC. Node repair can also overwrite them with
* the same or a more recent version number, but not with an older version
* number.
*
* - WHITEOUT: for hash table btrees
*/
#define KEY_TYPE_DELETED 0
#define KEY_TYPE_DISCARD 1
#define KEY_TYPE_ERROR 2
#define KEY_TYPE_COOKIE 3
#define KEY_TYPE_PERSISTENT_DISCARD 4
#define KEY_TYPE_GENERIC_NR 128
#define BCH_BKEY_TYPES() \
x(deleted, 0) \
x(discard, 1) \
x(error, 2) \
x(cookie, 3) \
x(whiteout, 4) \
x(btree_ptr, 5) \
x(extent, 6) \
x(reservation, 7) \
x(inode, 8) \
x(inode_generation, 9) \
x(dirent, 10) \
x(xattr, 11) \
x(alloc, 12) \
x(quota, 13) \
x(stripe, 14)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
BCH_BKEY_TYPES()
#undef x
KEY_TYPE_MAX,
};
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
/* Extents */
......@@ -620,21 +629,12 @@ union bch_extent_entry {
#undef x
};
enum {
BCH_EXTENT = 128,
/*
* This is kind of a hack, we're overloading the type for a boolean that
* really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
* have the same value type:
*/
BCH_EXTENT_CACHED = 129,
struct bch_btree_ptr {
struct bch_val v;
/*
* Persistent reservation:
*/
BCH_RESERVATION = 130,
};
__u64 _data[0];
struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8)));
struct bch_extent {
struct bch_val v;
......@@ -642,7 +642,6 @@ struct bch_extent {
__u64 _data[0];
union bch_extent_entry start[];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(extent, BCH_EXTENT);
struct bch_reservation {
struct bch_val v;
......@@ -651,7 +650,6 @@ struct bch_reservation {
__u8 nr_replicas;
__u8 pad[3];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
......@@ -679,12 +677,6 @@ BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
#define BCACHEFS_ROOT_INO 4096
enum bch_inode_types {
BCH_INODE_FS = 128,
BCH_INODE_BLOCKDEV = 129,
BCH_INODE_GENERATION = 130,
};
struct bch_inode {
struct bch_val v;
......@@ -693,7 +685,6 @@ struct bch_inode {
__le16 bi_mode;
__u8 fields[0];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(inode, BCH_INODE_FS);
struct bch_inode_generation {
struct bch_val v;
......@@ -701,7 +692,6 @@ struct bch_inode_generation {
__le32 bi_generation;
__le32 pad;
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(inode_generation, BCH_INODE_GENERATION);
#define BCH_INODE_FIELDS() \
BCH_INODE_FIELD(bi_atime, 64) \
......@@ -766,24 +756,6 @@ enum {
LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 32);
struct bch_inode_blockdev {
struct bch_val v;
__le64 i_size;
__le64 i_flags;
/* Seconds: */
__le64 i_ctime;
__le64 i_mtime;
__uuid_t i_uuid;
__u8 i_label[32];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
/* Thin provisioned volume, or cache for another block device? */
LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
/* Dirents */
/*
......@@ -797,11 +769,6 @@ LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
* collision:
*/
enum {
BCH_DIRENT = 128,
BCH_DIRENT_WHITEOUT = 129,
};
struct bch_dirent {
struct bch_val v;
......@@ -816,7 +783,6 @@ struct bch_dirent {
__u8 d_name[];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(dirent, BCH_DIRENT);
#define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
sizeof(struct bkey) - \
......@@ -825,16 +791,11 @@ BKEY_VAL_TYPE(dirent, BCH_DIRENT);
/* Xattrs */
enum {
BCH_XATTR = 128,
BCH_XATTR_WHITEOUT = 129,
};
#define BCH_XATTR_INDEX_USER 0
#define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
#define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
#define BCH_XATTR_INDEX_TRUSTED 3
#define BCH_XATTR_INDEX_SECURITY 4
#define KEY_TYPE_XATTR_INDEX_USER 0
#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
#define KEY_TYPE_XATTR_INDEX_SECURITY 4
struct bch_xattr {
struct bch_val v;
......@@ -843,14 +804,9 @@ struct bch_xattr {
__le16 x_val_len;
__u8 x_name[];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(xattr, BCH_XATTR);
/* Bucket/allocation information: */
enum {
BCH_ALLOC = 128,
};
enum {
BCH_ALLOC_FIELD_READ_TIME = 0,
BCH_ALLOC_FIELD_WRITE_TIME = 1,
......@@ -862,14 +818,9 @@ struct bch_alloc {
__u8 gen;
__u8 data[];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(alloc, BCH_ALLOC);
/* Quotas: */
enum {
BCH_QUOTA = 128,
};
enum quota_types {
QTYP_USR = 0,
QTYP_GRP = 1,
......@@ -892,14 +843,9 @@ struct bch_quota {
struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(quota, BCH_QUOTA);
/* Erasure coding */
enum {
BCH_STRIPE = 128,
};
struct bch_stripe {
struct bch_val v;
__le16 sectors;
......@@ -913,7 +859,6 @@ struct bch_stripe {
struct bch_extent_ptr ptrs[0];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(stripe, BCH_STRIPE);
/* Optional/variable size superblock sections: */
......@@ -1149,15 +1094,21 @@ struct bch_sb_field_clean {
/* Superblock: */
/*
* Version 8: BCH_SB_ENCODED_EXTENT_MAX_BITS
* BCH_MEMBER_DATA_ALLOWED
* Version 9: incompatible extent nonce change
* New versioning scheme:
* One common version number for all on disk data structures - superblock, btree
* nodes, journal entries
*/
#define BCH_JSET_VERSION_OLD 2
#define BCH_BSET_VERSION_OLD 3
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
bcachefs_metadata_version_new_versioning = 10,
bcachefs_metadata_version_bkey_renumber = 10,
bcachefs_metadata_version_max = 11,
};
#define BCH_SB_VERSION_MIN 7
#define BCH_SB_VERSION_EXTENT_MAX 8
#define BCH_SB_VERSION_EXTENT_NONCE_V1 9
#define BCH_SB_VERSION_MAX 9
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
#define BCH_SB_SECTOR 8
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
......@@ -1176,6 +1127,9 @@ struct bch_sb_layout {
/*
* @offset - sector where this sb was written
* @version - on disk format version
* @version_min - Oldest metadata version this filesystem contains; so we can
* safely drop compatibility code and refuse to mount filesystems
* we'd need it for
* @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
* @seq - incremented each time superblock is written
* @uuid - used for generating various magic numbers and identifying
......@@ -1369,11 +1323,6 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
/* Journal */
#define BCACHE_JSET_VERSION_UUIDv1 1
#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
#define BCACHE_JSET_VERSION_JKEYS 2
#define BCACHE_JSET_VERSION 2
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
#define BCH_JSET_ENTRY_TYPES() \
......@@ -1453,35 +1402,26 @@ LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
/* Btree: */
#define DEFINE_BCH_BTREE_IDS() \
DEF_BTREE_ID(EXTENTS, 0, "extents") \
DEF_BTREE_ID(INODES, 1, "inodes") \
DEF_BTREE_ID(DIRENTS, 2, "dirents") \
DEF_BTREE_ID(XATTRS, 3, "xattrs") \
DEF_BTREE_ID(ALLOC, 4, "alloc") \
DEF_BTREE_ID(QUOTAS, 5, "quotas") \
DEF_BTREE_ID(EC, 6, "erasure_coding")
#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
#define BCH_BTREE_IDS() \
x(EXTENTS, 0, "extents") \
x(INODES, 1, "inodes") \
x(DIRENTS, 2, "dirents") \
x(XATTRS, 3, "xattrs") \
x(ALLOC, 4, "alloc") \
x(QUOTAS, 5, "quotas") \
x(EC, 6, "erasure_coding")
enum btree_id {
DEFINE_BCH_BTREE_IDS()
#define x(kwd, val, name) BTREE_ID_##kwd = val,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR
};
#undef DEF_BTREE_ID
#define BTREE_MAX_DEPTH 4U
/* Btree nodes */
/* Version 1: Seed pointer into btree node checksum
*/
#define BCACHE_BSET_CSUM 1
#define BCACHE_BSET_KEY_v1 2
#define BCACHE_BSET_JOURNAL_SEQ 3
#define BCACHE_BSET_VERSION 3
/*
* Btree nodes
*
......
......@@ -488,7 +488,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
pack_state_finish(&state, out);
out->u64s = f->key_u64s;
out->format = KEY_FORMAT_LOCAL_BTREE;
out->type = KEY_TYPE_DELETED;
out->type = KEY_TYPE_deleted;
#ifdef CONFIG_BCACHEFS_DEBUG
if (exact) {
......
......@@ -61,10 +61,12 @@ static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
k->u64s = BKEY_U64s + DIV_ROUND_UP(bytes, sizeof(u64));
}
#define bkey_deleted(_k) ((_k)->type == KEY_TYPE_DELETED)
#define bkey_val_end(_k) vstruct_idx((_k).v, bkey_val_u64s((_k).k))
#define bkey_deleted(_k) ((_k)->type == KEY_TYPE_deleted)
#define bkey_whiteout(_k) \
((_k)->type == KEY_TYPE_DELETED || (_k)->type == KEY_TYPE_DISCARD)
((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_discard)
#define bkey_packed_typecheck(_k) \
({ \
......@@ -439,7 +441,15 @@ static inline struct bkey_s_c bkey_i_to_s_c(const struct bkey_i *k)
* bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
* functions.
*/
#define __BKEY_VAL_ACCESSORS(name, nr, _assert) \
#define BKEY_VAL_ACCESSORS(name) \
struct bkey_i_##name { \
union { \
struct bkey k; \
struct bkey_i k_i; \
}; \
struct bch_##name v; \
}; \
\
struct bkey_s_c_##name { \
union { \
struct { \
......@@ -464,20 +474,20 @@ struct bkey_s_##name { \
\
static inline struct bkey_i_##name *bkey_i_to_##name(struct bkey_i *k) \
{ \
_assert(k->k.type, nr); \
EBUG_ON(k->k.type != KEY_TYPE_##name); \
return container_of(&k->k, struct bkey_i_##name, k); \
} \
\
static inline const struct bkey_i_##name * \
bkey_i_to_##name##_c(const struct bkey_i *k) \
{ \
_assert(k->k.type, nr); \
EBUG_ON(k->k.type != KEY_TYPE_##name); \
return container_of(&k->k, struct bkey_i_##name, k); \
} \
\
static inline struct bkey_s_##name bkey_s_to_##name(struct bkey_s k) \
{ \
_assert(k.k->type, nr); \
EBUG_ON(k.k->type != KEY_TYPE_##name); \
return (struct bkey_s_##name) { \
.k = k.k, \
.v = container_of(k.v, struct bch_##name, v), \
......@@ -486,7 +496,7 @@ static inline struct bkey_s_##name bkey_s_to_##name(struct bkey_s k) \
\
static inline struct bkey_s_c_##name bkey_s_c_to_##name(struct bkey_s_c k)\
{ \
_assert(k.k->type, nr); \
EBUG_ON(k.k->type != KEY_TYPE_##name); \
return (struct bkey_s_c_##name) { \
.k = k.k, \
.v = container_of(k.v, struct bch_##name, v), \
......@@ -512,7 +522,7 @@ name##_i_to_s_c(const struct bkey_i_##name *k) \
\
static inline struct bkey_s_##name bkey_i_to_s_##name(struct bkey_i *k) \
{ \
_assert(k->k.type, nr); \
EBUG_ON(k->k.type != KEY_TYPE_##name); \
return (struct bkey_s_##name) { \
.k = &k->k, \
.v = container_of(&k->v, struct bch_##name, v), \
......@@ -522,27 +532,13 @@ static inline struct bkey_s_##name bkey_i_to_s_##name(struct bkey_i *k) \
static inline struct bkey_s_c_##name \
bkey_i_to_s_c_##name(const struct bkey_i *k) \
{ \
_assert(k->k.type, nr); \
EBUG_ON(k->k.type != KEY_TYPE_##name); \
return (struct bkey_s_c_##name) { \
.k = &k->k, \
.v = container_of(&k->v, struct bch_##name, v), \
}; \
} \
\
static inline struct bch_##name * \
bkey_p_##name##_val(const struct bkey_format *f, \
struct bkey_packed *k) \
{ \
return container_of(bkeyp_val(f, k), struct bch_##name, v); \
} \
\
static inline const struct bch_##name * \
bkey_p_c_##name##_val(const struct bkey_format *f, \
const struct bkey_packed *k) \
{ \
return container_of(bkeyp_val(f, k), struct bch_##name, v); \
} \
\
static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
{ \
struct bkey_i_##name *k = \
......@@ -550,45 +546,23 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
\
bkey_init(&k->k); \
memset(&k->v, 0, sizeof(k->v)); \
k->k.type = nr; \
k->k.type = KEY_TYPE_##name; \
set_bkey_val_bytes(&k->k, sizeof(k->v)); \
\
return k; \
}
#define __BKEY_VAL_ASSERT(_type, _nr) EBUG_ON(_type != _nr)
#define BKEY_VAL_ACCESSORS(name, _nr) \
static inline void __bch_##name##_assert(u8 type, u8 nr) \
{ \
EBUG_ON(type != _nr); \
} \
\
__BKEY_VAL_ACCESSORS(name, _nr, __bch_##name##_assert)
BKEY_VAL_ACCESSORS(cookie, KEY_TYPE_COOKIE);
static inline void __bch2_extent_assert(u8 type, u8 nr)
{
EBUG_ON(type != BCH_EXTENT && type != BCH_EXTENT_CACHED);
}
__BKEY_VAL_ACCESSORS(extent, BCH_EXTENT, __bch2_extent_assert);
BKEY_VAL_ACCESSORS(reservation, BCH_RESERVATION);
BKEY_VAL_ACCESSORS(inode, BCH_INODE_FS);
BKEY_VAL_ACCESSORS(inode_blockdev, BCH_INODE_BLOCKDEV);
BKEY_VAL_ACCESSORS(inode_generation, BCH_INODE_GENERATION);
BKEY_VAL_ACCESSORS(dirent, BCH_DIRENT);
BKEY_VAL_ACCESSORS(xattr, BCH_XATTR);
BKEY_VAL_ACCESSORS(alloc, BCH_ALLOC);
BKEY_VAL_ACCESSORS(quota, BCH_QUOTA);
BKEY_VAL_ACCESSORS(stripe, BCH_STRIPE);
BKEY_VAL_ACCESSORS(cookie);
BKEY_VAL_ACCESSORS(btree_ptr);
BKEY_VAL_ACCESSORS(extent);
BKEY_VAL_ACCESSORS(reservation);
BKEY_VAL_ACCESSORS(inode);
BKEY_VAL_ACCESSORS(inode_generation);
BKEY_VAL_ACCESSORS(dirent);
BKEY_VAL_ACCESSORS(xattr);
BKEY_VAL_ACCESSORS(alloc);
BKEY_VAL_ACCESSORS(quota);
BKEY_VAL_ACCESSORS(stripe);
/* byte order helpers */
......
......@@ -12,66 +12,84 @@
#include "quota.h"
#include "xattr.h"
const struct bkey_ops bch2_bkey_ops[] = {
[BKEY_TYPE_EXTENTS] = bch2_bkey_extent_ops,
[BKEY_TYPE_INODES] = bch2_bkey_inode_ops,
[BKEY_TYPE_DIRENTS] = bch2_bkey_dirent_ops,
[BKEY_TYPE_XATTRS] = bch2_bkey_xattr_ops,
[BKEY_TYPE_ALLOC] = bch2_bkey_alloc_ops,
[BKEY_TYPE_QUOTAS] = bch2_bkey_quota_ops,
[BKEY_TYPE_EC] = bch2_bkey_ec_ops,
[BKEY_TYPE_BTREE] = bch2_bkey_btree_ops,
const char * const bch_bkey_types[] = {
#define x(name, nr) #name,
BCH_BKEY_TYPES()
#undef x
NULL
};
const char *bch2_bkey_val_invalid(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
static const char *deleted_key_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[type];
return NULL;
}
const struct bkey_ops bch2_bkey_ops_deleted = {
.key_invalid = deleted_key_invalid,
};
const struct bkey_ops bch2_bkey_ops_discard = {
.key_invalid = deleted_key_invalid,
};
switch (k.k->type) {
case KEY_TYPE_DELETED:
case KEY_TYPE_DISCARD:
return NULL;
static const char *empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
if (bkey_val_bytes(k.k))
return "value size should be zero";
case KEY_TYPE_ERROR:
return bkey_val_bytes(k.k) != 0
? "value size should be zero"
: NULL;
return NULL;
}
case KEY_TYPE_COOKIE:
return bkey_val_bytes(k.k) != sizeof(struct bch_cookie)
? "incorrect value size"
: NULL;
const struct bkey_ops bch2_bkey_ops_error = {
.key_invalid = empty_val_key_invalid,
};
default:
if (k.k->type < KEY_TYPE_GENERIC_NR)
return "invalid type";
static const char *key_type_cookie_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
if (bkey_val_bytes(k.k) != sizeof(struct bch_cookie))
return "incorrect value size";
return ops->key_invalid(c, k);
}
return NULL;
}
const char *__bch2_bkey_invalid(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
const struct bkey_ops bch2_bkey_ops_cookie = {
.key_invalid = key_type_cookie_invalid,
};
const struct bkey_ops bch2_bkey_ops_whiteout = {
.key_invalid = empty_val_key_invalid,
};
static const struct bkey_ops bch2_bkey_ops[] = {
#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
BCH_BKEY_TYPES()
#undef x
};
const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[type];
if (k.k->type >= KEY_TYPE_MAX)
return "invalid type";
return bch2_bkey_ops[k.k->type].key_invalid(c, k);
}
const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type)
{
if (k.k->u64s < BKEY_U64s)
return "u64s too small";
if (!ops->is_extents) {
if (k.k->size)
return "nonzero size field";
} else {
if (btree_node_type_is_extents(type)) {
if ((k.k->size == 0) != bkey_deleted(k.k))
return "bad size field";
} else {
if (k.k->size)
return "nonzero size field";
}
if (ops->is_extents &&
!k.k->size &&
!bkey_deleted(k.k))
return "zero size field";
if (k.k->p.snapshot)
return "nonzero snapshot";
......@@ -82,11 +100,11 @@ const char *__bch2_bkey_invalid(struct bch_fs *c, enum bkey_type type,
return NULL;
}
const char *bch2_bkey_invalid(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type)
{
return __bch2_bkey_invalid(c, type, k) ?:
bch2_bkey_val_invalid(c, type, k);
return __bch2_bkey_invalid(c, k, type) ?:
bch2_bkey_val_invalid(c, k);
}
const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
......@@ -102,24 +120,22 @@ const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
{
enum bkey_type type = btree_node_type(b);
const struct bkey_ops *ops = &bch2_bkey_ops[type];
const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
const char *invalid;
BUG_ON(!k.k->u64s);
invalid = bch2_bkey_invalid(c, type, k) ?:
invalid = bch2_bkey_invalid(c, k, btree_node_type(b)) ?:
bch2_bkey_in_btree_node(b, k);
if (invalid) {
char buf[160];
bch2_bkey_val_to_text(&PBUF(buf), c, type, k);
bch2_bkey_val_to_text(&PBUF(buf), c, k);
bch2_fs_bug(c, "invalid bkey %s: %s", buf, invalid);
return;
}
if (k.k->type >= KEY_TYPE_GENERIC_NR &&
ops->key_debugcheck)
if (ops->key_debugcheck)
ops->key_debugcheck(c, b, k);
}
......@@ -144,46 +160,90 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
}
void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
enum bkey_type type, struct bkey_s_c k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[type];
switch (k.k->type) {
case KEY_TYPE_DELETED:
pr_buf(out, " deleted");
break;
case KEY_TYPE_DISCARD:
pr_buf(out, " discard");
break;
case KEY_TYPE_ERROR:
pr_buf(out, " error");
break;
case KEY_TYPE_COOKIE:
pr_buf(out, " cookie");
break;
default:
if (k.k->type >= KEY_TYPE_GENERIC_NR && ops->val_to_text)
ops->val_to_text(out, c, k);
break;
}
struct bkey_s_c k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
if (likely(ops->val_to_text))
ops->val_to_text(out, c, k);
else
pr_buf(out, " %s", bch_bkey_types[k.k->type]);
}
void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
enum bkey_type type, struct bkey_s_c k)
struct bkey_s_c k)
{
bch2_bkey_to_text(out, k.k);
pr_buf(out, ": ");
bch2_val_to_text(out, c, type, k);
bch2_val_to_text(out, c, k);
}
void bch2_bkey_swab(enum bkey_type type,
const struct bkey_format *f,
struct bkey_packed *k)
void bch2_bkey_swab(const struct bkey_format *f,
struct bkey_packed *k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[type];
const struct bkey_ops *ops = &bch2_bkey_ops[k->type];
bch2_bkey_swab_key(f, k);
if (ops->swab)
ops->swab(f, k);
}
bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
{
const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
return ops->key_normalize
? ops->key_normalize(c, k)
: false;
}
enum merge_result bch2_bkey_merge(struct bch_fs *c,
struct bkey_i *l, struct bkey_i *r)
{
const struct bkey_ops *ops = &bch2_bkey_ops[l->k.type];
if (!key_merging_disabled(c) &&
ops->key_merge &&
l->k.type == r->k.type &&
!bversion_cmp(l->k.version, r->k.version) &&
!bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
return ops->key_merge(c, l, r);
return BCH_MERGE_NOMERGE;
}
static const struct old_bkey_type {
u8 btree_node_type;
u8 old;
u8 new;
} bkey_renumber_table[] = {
{BKEY_TYPE_BTREE, 128, KEY_TYPE_btree_ptr },
{BKEY_TYPE_EXTENTS, 128, KEY_TYPE_extent },
{BKEY_TYPE_EXTENTS, 129, KEY_TYPE_extent },
{BKEY_TYPE_EXTENTS, 130, KEY_TYPE_reservation },
{BKEY_TYPE_INODES, 128, KEY_TYPE_inode },
{BKEY_TYPE_INODES, 130, KEY_TYPE_inode_generation },
{BKEY_TYPE_DIRENTS, 128, KEY_TYPE_dirent },
{BKEY_TYPE_DIRENTS, 129, KEY_TYPE_whiteout },
{BKEY_TYPE_XATTRS, 128, KEY_TYPE_xattr },
{BKEY_TYPE_XATTRS, 129, KEY_TYPE_whiteout },
{BKEY_TYPE_ALLOC, 128, KEY_TYPE_alloc },
{BKEY_TYPE_QUOTAS, 128, KEY_TYPE_quota },
};
void bch2_bkey_renumber(enum btree_node_type btree_node_type,
struct bkey_packed *k,
int write)
{
const struct old_bkey_type *i;
for (i = bkey_renumber_table;
i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
i++)
if (btree_node_type == i->btree_node_type &&
k->type == (write ? i->new : i->old)) {
k->type = write ? i->old : i->new;
break;
}
}
......@@ -4,24 +4,12 @@
#include "bkey.h"
#define DEF_BTREE_ID(kwd, val, name) BKEY_TYPE_##kwd = val,
enum bkey_type {
DEFINE_BCH_BTREE_IDS()
BKEY_TYPE_BTREE,
};
#undef DEF_BTREE_ID
/* Type of a key in btree @id at level @level: */
static inline enum bkey_type bkey_type(unsigned level, enum btree_id id)
{
return level ? BKEY_TYPE_BTREE : (enum bkey_type) id;
}
struct bch_fs;
struct btree;
struct bkey;
enum btree_node_type;
extern const char * const bch_bkey_types[];
enum merge_result {
BCH_MERGE_NOMERGE,
......@@ -34,12 +22,6 @@ enum merge_result {
BCH_MERGE_MERGE,
};
typedef bool (*key_filter_fn)(struct bch_fs *, struct btree *,
struct bkey_s);
typedef enum merge_result (*key_merge_fn)(struct bch_fs *,
struct btree *,
struct bkey_i *, struct bkey_i *);
struct bkey_ops {
/* Returns reason for being invalid if invalid, else NULL: */
const char * (*key_invalid)(const struct bch_fs *,
......@@ -49,41 +31,34 @@ struct bkey_ops {
void (*val_to_text)(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void (*swab)(const struct bkey_format *, struct bkey_packed *);
key_filter_fn key_normalize;
key_merge_fn key_merge;
bool is_extents;
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
enum merge_result (*key_merge)(struct bch_fs *,
struct bkey_i *, struct bkey_i *);
};
static inline bool bkey_type_needs_gc(enum bkey_type type)
{
switch (type) {
case BKEY_TYPE_BTREE:
case BKEY_TYPE_EXTENTS:
case BKEY_TYPE_EC:
return true;
default:
return false;
}
}
const char *bch2_bkey_val_invalid(struct bch_fs *, enum bkey_type,
struct bkey_s_c);
const char *__bch2_bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c);
const char *bch2_bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c);
const char *bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c);
const char *__bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c,
enum btree_node_type);
const char *bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c,
enum btree_node_type);
const char *bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c);
void bch2_bkey_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c);
void bch2_bpos_to_text(struct printbuf *, struct bpos);
void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
void bch2_val_to_text(struct printbuf *, struct bch_fs *, enum bkey_type,
void bch2_val_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void bch2_bkey_val_to_text(struct printbuf *, struct bch_fs *,
enum bkey_type, struct bkey_s_c);
struct bkey_s_c);
void bch2_bkey_swab(const struct bkey_format *, struct bkey_packed *);
bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
void bch2_bkey_swab(enum bkey_type, const struct bkey_format *,
struct bkey_packed *);
enum merge_result bch2_bkey_merge(struct bch_fs *,
struct bkey_i *, struct bkey_i *);
extern const struct bkey_ops bch2_bkey_ops[];
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
#endif /* _BCACHEFS_BKEY_METHODS_H */
......@@ -257,7 +257,7 @@ static void extent_sort_append(struct bch_fs *c,
bch2_bkey_unpack(b, &tmp.k, k);
if (*prev &&
bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
bch2_bkey_merge(c, (void *) *prev, &tmp.k))
return;
if (*prev) {
......@@ -375,7 +375,7 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
}
/* Sort + repack in a new format: */
static struct btree_nr_keys
struct btree_nr_keys
bch2_sort_repack(struct bset *dst, struct btree *src,
struct btree_node_iter *src_iter,
struct bkey_format *out_f,
......@@ -411,18 +411,12 @@ bch2_sort_repack_merge(struct bch_fs *c,
struct bset *dst, struct btree *src,
struct btree_node_iter *iter,
struct bkey_format *out_f,
bool filter_whiteouts,
key_filter_fn filter,
key_merge_fn merge)
bool filter_whiteouts)
{
struct bkey_packed *k, *prev = NULL, *out;
struct btree_nr_keys nr;
BKEY_PADDED(k) tmp;
if (!filter && !merge)
return bch2_sort_repack(dst, src, iter, out_f,
filter_whiteouts);
memset(&nr, 0, sizeof(nr));
while ((k = bch2_btree_node_iter_next_all(iter, src))) {
......@@ -435,14 +429,15 @@ bch2_sort_repack_merge(struct bch_fs *c,
*/
bch2_bkey_unpack(src, &tmp.k, k);
if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
if (filter_whiteouts &&
bch2_bkey_normalize(c, bkey_i_to_s(&tmp.k)))
continue;
/* prev is always unpacked, for key merging: */
if (prev &&
merge &&
merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
bch2_bkey_merge(c, (void *) prev, &tmp.k) ==
BCH_MERGE_MERGE)
continue;
/*
......@@ -606,7 +601,7 @@ unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
continue;
EBUG_ON(bkeyp_val_u64s(f, in));
EBUG_ON(in->type != KEY_TYPE_DISCARD);
EBUG_ON(in->type != KEY_TYPE_discard);
r.k = bkey_unpack_key(iter->b, in);
......
......@@ -47,13 +47,14 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *, struct bset *,
struct btree_node_iter_large *);
struct btree_nr_keys
bch2_sort_repack(struct bset *, struct btree *,
struct btree_node_iter *,
struct bkey_format *, bool);
struct btree_nr_keys
bch2_sort_repack_merge(struct bch_fs *,
struct bset *, struct btree *,
struct btree_node_iter *,
struct bkey_format *,
bool,
key_filter_fn,
key_merge_fn);
struct bkey_format *, bool);
unsigned bch2_sort_keys(struct bkey_packed *,
struct sort_iter *, bool);
......
......@@ -397,7 +397,7 @@ bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
static inline struct bkey_packed *
bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
{
return bch2_bkey_prev_filter(b, t, k, KEY_TYPE_DISCARD + 1);
return bch2_bkey_prev_filter(b, t, k, KEY_TYPE_discard + 1);
}
enum bch_extent_overlap {
......@@ -529,7 +529,7 @@ bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
static inline struct bkey_packed *
bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
{
return bch2_btree_node_iter_peek_filter(iter, b, KEY_TYPE_DISCARD + 1);
return bch2_btree_node_iter_peek_filter(iter, b, KEY_TYPE_discard + 1);
}
static inline struct bkey_packed *
......@@ -555,7 +555,7 @@ bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, struct btree *b)
static inline struct bkey_packed *
bch2_btree_node_iter_prev(struct btree_node_iter *iter, struct btree *b)
{
return bch2_btree_node_iter_prev_filter(iter, b, KEY_TYPE_DISCARD + 1);
return bch2_btree_node_iter_prev_filter(iter, b, KEY_TYPE_discard + 1);
}
struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
......
......@@ -6,20 +6,17 @@
#include "btree_iter.h"
#include "btree_locking.h"
#include "debug.h"
#include "extents.h"
#include "trace.h"
#include <linux/prefetch.h>
#define DEF_BTREE_ID(kwd, val, name) name,
const char * const bch2_btree_ids[] = {
DEFINE_BCH_BTREE_IDS()
#define x(kwd, val, name) name,
BCH_BTREE_IDS()
#undef x
NULL
};
#undef DEF_BTREE_ID
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
......@@ -100,7 +97,7 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
if (!b)
return NULL;
bkey_extent_init(&b->key);
bkey_btree_ptr_init(&b->key);
six_lock_init(&b->lock);
lockdep_set_novalidate_class(&b->lock);
INIT_LIST_HEAD(&b->list);
......@@ -117,7 +114,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
/* Cause future lookups for this node to fail: */
bkey_i_to_extent(&b->key)->v._data[0] = 0;
PTR_HASH(&b->key) = 0;
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
......@@ -604,7 +601,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
/* raced with another fill: */
/* mark as unhashed... */
bkey_i_to_extent(&b->key)->v._data[0] = 0;
PTR_HASH(&b->key) = 0;
mutex_lock(&bc->lock);
list_add(&b->list, &bc->freeable);
......@@ -906,8 +903,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
b->data->min_key.offset,
b->data->max_key.inode,
b->data->max_key.offset);
bch2_val_to_text(out, c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key));
bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
pr_buf(out, "\n"
" format: u64s %u fields %u %u %u %u %u\n"
" unpack fn len: %u\n"
......
......@@ -4,7 +4,6 @@
#include "bcachefs.h"
#include "btree_types.h"
#include "extents.h"
struct btree_iter;
......@@ -37,12 +36,13 @@ void bch2_fs_btree_cache_exit(struct bch_fs *);
int bch2_fs_btree_cache_init(struct bch_fs *);
void bch2_fs_btree_cache_init_early(struct btree_cache *);
#define PTR_HASH(_k) (bkey_i_to_extent_c(_k)->v._data[0])
#define PTR_HASH(_k) *((u64 *) &bkey_i_to_btree_ptr_c(_k)->v)
/* is btree node in hash table? */
static inline bool btree_node_hashed(struct btree *b)
{
return bkey_extent_is_data(&b->key.k) && PTR_HASH(&b->key);
return b->key.k.type == KEY_TYPE_btree_ptr &&
PTR_HASH(&b->key);
}
#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
......
......@@ -112,137 +112,11 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
/* marking of btree keys/nodes: */
static void ptr_gen_recalc_oldest(struct bch_fs *c,
const struct bch_extent_ptr *ptr,
u8 *max_stale)
{
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
if (gen_after(ca->oldest_gens[b], ptr->gen))
ca->oldest_gens[b] = ptr->gen;
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
static void ptr_gens_recalc_oldest(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k, u8 *max_stale)
{
const struct bch_extent_ptr *ptr;
switch (type) {
case BKEY_TYPE_BTREE:
case BKEY_TYPE_EXTENTS:
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
extent_for_each_ptr(e, ptr)
ptr_gen_recalc_oldest(c, ptr, max_stale);
break;
}
}
break;
case BKEY_TYPE_EC:
switch (k.k->type) {
case BCH_STRIPE: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
for (ptr = s.v->ptrs;
ptr < s.v->ptrs + s.v->nr_blocks;
ptr++)
ptr_gen_recalc_oldest(c, ptr, max_stale);
}
}
default:
break;
}
}
static int ptr_gen_check(struct bch_fs *c,
enum bkey_type type,
const struct bch_extent_ptr *ptr)
{
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
struct bucket *g = PTR_BUCKET(ca, ptr);
int ret = 0;
if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
"found ptr with missing gen in alloc btree,\n"
"type %u gen %u",
type, ptr->gen)) {
g->_mark.gen = ptr->gen;
g->_mark.gen_valid = 1;
set_bit(b, ca->buckets_dirty);
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
"%u ptr gen in the future: %u > %u",
type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen;
g->_mark.gen_valid = 1;
set_bit(b, ca->buckets_dirty);
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
fsck_err:
return ret;
}
static int ptr_gens_check(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
{
const struct bch_extent_ptr *ptr;
int ret = 0;
switch (type) {
case BKEY_TYPE_BTREE:
case BKEY_TYPE_EXTENTS:
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
extent_for_each_ptr(e, ptr) {
ret = ptr_gen_check(c, type, ptr);
if (ret)
return ret;
}
break;
}
}
break;
case BKEY_TYPE_EC:
switch (k.k->type) {
case BCH_STRIPE: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
for (ptr = s.v->ptrs;
ptr < s.v->ptrs + s.v->nr_blocks;
ptr++) {
ret = ptr_gen_check(c, type, ptr);
if (ret)
return ret;
}
}
}
break;
default:
break;
}
return ret;
}
/*
* For runtime mark and sweep:
*/
static int bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k,
static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
u8 *max_stale, bool initial)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
struct gc_pos pos = { 0 };
unsigned flags =
BCH_BUCKET_MARK_GC|
......@@ -257,23 +131,50 @@ static int bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
atomic64_set(&c->key_version, k.k->version.lo);
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_bkey_replicas_marked(c, type, k,
false), c,
fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
"superblock not marked as containing replicas (type %u)",
type)) {
ret = bch2_mark_bkey_replicas(c, type, k);
k.k->type)) {
ret = bch2_mark_bkey_replicas(c, k);
if (ret)
return ret;
}
ret = ptr_gens_check(c, type, k);
if (ret)
return ret;
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
struct bucket *g = PTR_BUCKET(ca, ptr);
if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
"found ptr with missing gen in alloc btree,\n"
"type %u gen %u",
k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen;
g->_mark.gen_valid = 1;
set_bit(b, ca->buckets_dirty);
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
"%u ptr gen in the future: %u > %u",
k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen;
g->_mark.gen_valid = 1;
set_bit(b, ca->buckets_dirty);
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
}
}
bch2_mark_key(c, type, k, true, k.k->size, pos, NULL, 0, flags);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
if (gen_after(ca->oldest_gens[b], ptr->gen))
ca->oldest_gens[b] = ptr->gen;
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
ptr_gens_recalc_oldest(c, type, k, max_stale);
bch2_mark_key(c, k, true, k.k->size, pos, NULL, 0, flags);
fsck_err:
return ret;
}
......@@ -281,7 +182,6 @@ static int bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
static int btree_gc_mark_node(struct bch_fs *c, struct btree *b,
u8 *max_stale, bool initial)
{
enum bkey_type type = btree_node_type(b);
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
......@@ -289,14 +189,14 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b,
*max_stale = 0;
if (!bkey_type_needs_gc(type))
if (!btree_node_type_needs_gc(btree_node_type(b)))
return 0;
for_each_btree_node_key_unpack(b, k, &iter,
&unpacked) {
bch2_bkey_debugcheck(c, b, k);
ret = bch2_gc_mark_key(c, type, k, max_stale, initial);
ret = bch2_gc_mark_key(c, k, max_stale, initial);
if (ret)
break;
}
......@@ -310,7 +210,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
struct btree_iter iter;
struct btree *b;
struct range_checks r;
unsigned depth = bkey_type_needs_gc(btree_id) ? 0 : 1;
unsigned depth = btree_node_type_needs_gc(btree_id) ? 0 : 1;
u8 max_stale;
int ret = 0;
......@@ -364,7 +264,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
b = c->btree_roots[btree_id].b;
if (!btree_node_fake(b))
bch2_gc_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key),
bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, initial);
gc_pos_set(c, gc_pos_btree_root(b->btree_id));
......@@ -391,13 +291,13 @@ static int bch2_gc_btrees(struct bch_fs *c, struct list_head *journal,
for (i = 0; i < BTREE_ID_NR; i++) {
enum btree_id id = ids[i];
enum bkey_type type = bkey_type(0, id);
enum btree_node_type type = __btree_node_type(0, id);
int ret = bch2_gc_btree(c, id, initial);
if (ret)
return ret;
if (journal && bkey_type_needs_gc(type)) {
if (journal && btree_node_type_needs_gc(type)) {
struct bkey_i *k, *n;
struct jset_entry *j;
struct journal_replay *r;
......@@ -405,8 +305,8 @@ static int bch2_gc_btrees(struct bch_fs *c, struct list_head *journal,
list_for_each_entry(r, journal, list)
for_each_jset_key(k, n, j, &r->j) {
if (type == bkey_type(j->level, j->btree_id)) {
ret = bch2_gc_mark_key(c, type,
if (type == __btree_node_type(j->level, j->btree_id)) {
ret = bch2_gc_mark_key(c,
bkey_i_to_s_c(k),
&max_stale, initial);
if (ret)
......@@ -507,8 +407,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
bch2_mark_key(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&d->key),
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
true, 0,
pos, NULL, 0,
BCH_BUCKET_MARK_GC);
......
......@@ -4,8 +4,6 @@
#include "btree_types.h"
enum bkey_type;
void bch2_coalesce(struct bch_fs *);
int bch2_gc(struct bch_fs *, struct list_head *, bool);
void bch2_gc_thread_stop(struct bch_fs *);
......@@ -58,9 +56,9 @@ static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
{
switch (id) {
#define DEF_BTREE_ID(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
DEFINE_BCH_BTREE_IDS()
#undef DEF_BTREE_ID
#define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
BCH_BTREE_IDS()
#undef x
default:
BUG();
}
......
......@@ -392,12 +392,16 @@ void bch2_btree_sort_into(struct bch_fs *c,
bch2_btree_node_iter_init_from_start(&src_iter, src);
nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
src, &src_iter,
&dst->format,
true,
btree_node_ops(src)->key_normalize,
btree_node_ops(src)->key_merge);
if (btree_node_is_extents(src))
nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
else
nr = bch2_sort_repack(btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
bch2_time_stats_update(&c->times[BCH_TIME_btree_sort], start_time);
......@@ -598,8 +602,8 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
{
struct bkey_packed *k, *prev = NULL;
struct bpos prev_pos = POS_MIN;
enum bkey_type type = btree_node_type(b);
bool seen_non_whiteout = false;
unsigned version;
const char *err;
int ret = 0;
......@@ -645,13 +649,12 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
"invalid bkey format: %s", err);
}
if (btree_err_on(le16_to_cpu(i->version) != BCACHE_BSET_VERSION,
BTREE_ERR_FIXABLE, c, b, i,
"unsupported bset version")) {
i->version = cpu_to_le16(BCACHE_BSET_VERSION);
i->u64s = 0;
return 0;
}
version = le16_to_cpu(i->version);
btree_err_on((version != BCH_BSET_VERSION_OLD &&
version < bcachefs_metadata_version_min) ||
version >= bcachefs_metadata_version_max,
BTREE_ERR_FATAL, c, b, i,
"unsupported bset version");
if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
BTREE_ERR_FIXABLE, c, b, i,
......@@ -700,17 +703,21 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
}
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
bch2_bkey_swab(type, &b->format, k);
bch2_bkey_swab(&b->format, k);
if (!write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(btree_node_type(b), k, write);
u = bkey_disassemble(b, k, &tmp);
invalid = __bch2_bkey_invalid(c, type, u) ?:
invalid = __bch2_bkey_invalid(c, u, btree_node_type(b)) ?:
bch2_bkey_in_btree_node(b, u) ?:
(write ? bch2_bkey_val_invalid(c, type, u) : NULL);
(write ? bch2_bkey_val_invalid(c, u) : NULL);
if (invalid) {
char buf[160];
bch2_bkey_val_to_text(&PBUF(buf), c, type, u);
bch2_bkey_val_to_text(&PBUF(buf), c, u);
btree_err(BTREE_ERR_FIXABLE, c, b, i,
"invalid bkey:\n%s\n%s", invalid, buf);
......@@ -720,6 +727,10 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
continue;
}
if (write &&
version < bcachefs_metadata_version_bkey_renumber)
bch2_bkey_renumber(btree_node_type(b), k, write);
/*
* with the separate whiteouts thing (used for extents), the
* second set of keys actually can have whiteouts too, so we
......@@ -885,17 +896,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
enum bkey_type type = btree_node_type(b);
struct bkey tmp;
struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
const char *invalid = bch2_bkey_val_invalid(c, type, u);
const char *invalid = bch2_bkey_val_invalid(c, u);
if (invalid ||
(inject_invalid_keys(c) &&
!bversion_cmp(u.k->version, MAX_VERSION))) {
char buf[160];
bch2_bkey_val_to_text(&PBUF(buf), c, type, u);
bch2_bkey_val_to_text(&PBUF(buf), c, u);
btree_err(BTREE_ERR_FIXABLE, c, b, i,
"invalid bkey %s: %s", buf, invalid);
......@@ -964,7 +974,9 @@ static void btree_node_read_work(struct work_struct *work)
bch2_mark_io_failure(&failed, &rb->pick);
can_retry = bch2_btree_pick_ptr(c, b, &failed, &rb->pick) > 0;
can_retry = bch2_bkey_pick_read_device(c,
bkey_i_to_s_c(&b->key),
&failed, &rb->pick) > 0;
if (!bio->bi_status &&
!bch2_btree_node_read_done(c, b, can_retry))
......@@ -1007,7 +1019,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
trace_btree_read(c, b);
ret = bch2_btree_pick_ptr(c, b, NULL, &pick);
ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
NULL, &pick);
if (bch2_fs_fatal_err_on(ret <= 0, c,
"btree node read error: no device to read from")) {
set_btree_node_read_error(b);
......@@ -1135,8 +1148,8 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
{
struct btree *b = wbio->wbio.bio.bi_private;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct bkey_i_extent *new_key;
struct bkey_s_extent e;
struct bkey_i_btree_ptr *new_key;
struct bkey_s_btree_ptr bp;
struct bch_extent_ptr *ptr;
struct btree_iter iter;
int ret;
......@@ -1160,13 +1173,13 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
bkey_copy(&tmp.k, &b->key);
new_key = bkey_i_to_extent(&tmp.k);
e = extent_i_to_s(new_key);
new_key = bkey_i_to_btree_ptr(&tmp.k);
bp = btree_ptr_i_to_s(new_key);
bch2_extent_drop_ptrs(e, ptr,
bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
if (!bch2_extent_nr_ptrs(e.c))
if (!bch2_bkey_nr_ptrs(bp.s_c))
goto err;
ret = bch2_btree_node_update_key(c, &iter, b, new_key);
......@@ -1269,12 +1282,11 @@ static void btree_node_write_endio(struct bio *bio)
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors)
{
const struct bch_extent_ptr *ptr;
unsigned whiteout_u64s = 0;
int ret;
extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
break;
if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
return -1;
ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
if (ret)
......@@ -1292,7 +1304,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
BKEY_PADDED(key) k;
struct bkey_s_extent e;
struct bch_extent_ptr *ptr;
struct sort_iter sort_iter;
struct nonce nonce;
......@@ -1300,6 +1311,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
u64 seq = 0;
bool used_mempool;
unsigned long old, new;
bool validate_before_checksum = false;
void *data;
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
......@@ -1433,11 +1445,21 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq);
i->version = cpu_to_le16(BCACHE_BSET_VERSION);
i->version = c->sb.version < bcachefs_metadata_version_new_versioning
? cpu_to_le16(BCH_BSET_VERSION_OLD)
: cpu_to_le16(c->sb.version);
SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
validate_before_checksum = true;
/* validate_bset will be modifying: */
if (le16_to_cpu(i->version) <
bcachefs_metadata_version_bkey_renumber)
validate_before_checksum = true;
/* if we're going to be encrypting, check metadata validity first: */
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
if (validate_before_checksum &&
validate_bset_for_write(c, b, i, sectors_to_write))
goto err;
......@@ -1451,7 +1473,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
/* if we're not encrypting, check metadata after checksumming: */
if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
if (!validate_before_checksum &&
validate_bset_for_write(c, b, i, sectors_to_write))
goto err;
......@@ -1506,9 +1528,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
*/
bkey_copy(&k.key, &b->key);
e = bkey_i_to_s_extent(&k.key);
extent_for_each_ptr(e, ptr)
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
ptr->offset += b->written;
b->written += sectors_to_write;
......
......@@ -433,7 +433,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
* whiteouts)
*/
k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS
? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_DISCARD)
? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
: bch2_btree_node_iter_prev_all(&tmp, b);
if (k && btree_iter_pos_cmp(iter, b, k) > 0) {
char buf[100];
......@@ -622,7 +622,7 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
* signal to bch2_btree_iter_peek_slot() that we're currently at
* a hole
*/
u->type = KEY_TYPE_DELETED;
u->type = KEY_TYPE_deleted;
return bkey_s_c_null;
}
......
......@@ -405,20 +405,45 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
return i - (void *) b->data;
}
enum btree_node_type {
#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
BCH_BTREE_IDS()
#undef x
BKEY_TYPE_BTREE,
};
/* Type of a key in btree @id at level @level: */
static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
{
return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
}
/* Type of keys @b contains: */
static inline enum bkey_type btree_node_type(struct btree *b)
static inline enum btree_node_type btree_node_type(struct btree *b)
{
return b->level ? BKEY_TYPE_BTREE : b->btree_id;
return __btree_node_type(b->level, b->btree_id);
}
static inline const struct bkey_ops *btree_node_ops(struct btree *b)
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
return &bch2_bkey_ops[btree_node_type(b)];
return type == BKEY_TYPE_EXTENTS;
}
static inline bool btree_node_is_extents(struct btree *b)
{
return btree_node_type(b) == BKEY_TYPE_EXTENTS;
return btree_node_type_is_extents(btree_node_type(b));
}
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
switch (type) {
case BKEY_TYPE_BTREE:
case BKEY_TYPE_EXTENTS:
case BKEY_TYPE_EC:
return true;
default:
return false;
}
}
struct btree_root {
......
......@@ -120,7 +120,7 @@ int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
__le64, unsigned);
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_extent *);
struct btree *, struct bkey_i_btree_ptr *);
/* new transactional interface: */
......
......@@ -132,13 +132,15 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
/* Btree node freeing/allocation: */
static bool btree_key_matches(struct bch_fs *c,
struct bkey_s_c_extent l,
struct bkey_s_c_extent r)
struct bkey_s_c l,
struct bkey_s_c r)
{
struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(l);
struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(r);
const struct bch_extent_ptr *ptr1, *ptr2;
extent_for_each_ptr(l, ptr1)
extent_for_each_ptr(r, ptr2)
bkey_for_each_ptr(ptrs1, ptr1)
bkey_for_each_ptr(ptrs2, ptr2)
if (ptr1->dev == ptr2->dev &&
ptr1->gen == ptr2->gen &&
ptr1->offset == ptr2->offset)
......@@ -164,8 +166,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
for (d = as->pending; d < as->pending + as->nr_pending; d++)
if (!bkey_cmp(k.k->p, d->key.k.p) &&
btree_key_matches(c, bkey_s_c_to_extent(k),
bkey_i_to_s_c_extent(&d->key)))
btree_key_matches(c, k, bkey_i_to_s_c(&d->key)))
goto found;
BUG();
found:
......@@ -197,7 +198,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
? gc_pos_btree_node(b)
: gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bch2_mark_key_locked(c,
bkey_i_to_s_c(&d->key),
false, 0, pos,
NULL, 0, BCH_BUCKET_MARK_GC);
......@@ -270,8 +271,7 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
{
BUG_ON(!pending->index_update_done);
bch2_mark_key(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&pending->key),
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
false, 0,
gc_phase(GC_PHASE_PENDING_DELETE),
NULL, 0, 0);
......@@ -285,7 +285,6 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
struct write_point *wp;
struct btree *b;
BKEY_PADDED(k) tmp;
struct bkey_i_extent *e;
struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve;
......@@ -336,8 +335,8 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
goto retry;
}
e = bkey_extent_init(&tmp.k);
bch2_alloc_sectors_append_ptrs(c, wp, e, c->opts.btree_node_size);
bkey_btree_ptr_init(&tmp.k);
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size);
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
......@@ -375,7 +374,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
b->data->flags = 0;
SET_BTREE_NODE_ID(b->data, as->btree_id);
SET_BTREE_NODE_LEVEL(b->data, level);
b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
b->data->ptr = bkey_i_to_btree_ptr(&b->key)->v.start[0];
bch2_btree_build_aux_trees(b);
......@@ -528,8 +527,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
goto err_free;
}
ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key));
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key));
if (ret)
goto err_free;
......@@ -1072,8 +1070,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key),
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
......@@ -1166,11 +1163,9 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
if (bkey_extent_is_data(&insert->k))
bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(insert),
true, 0,
gc_pos_btree_node(b), &stats, 0, 0);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0,
gc_pos_btree_node(b), &stats, 0, 0);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
......@@ -1893,7 +1888,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
struct btree_update *as,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
struct bkey_i_extent *new_key)
struct bkey_i_btree_ptr *new_key)
{
struct btree *parent;
int ret;
......@@ -1938,7 +1933,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
*/
ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
c->opts.btree_node_size *
bch2_extent_nr_ptrs(extent_i_to_s_c(new_key)),
bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
BCH_DISK_RESERVATION_NOFAIL|
BCH_DISK_RESERVATION_GC_LOCK_HELD);
BUG_ON(ret);
......@@ -1978,8 +1973,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&new_key->k_i),
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0,
gc_pos_btree_root(b->btree_id),
&stats, 0, 0);
......@@ -2012,7 +2006,8 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
}
int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
struct btree *b, struct bkey_i_extent *new_key)
struct btree *b,
struct bkey_i_btree_ptr *new_key)
{
struct btree *parent = btree_node_parent(iter, b);
struct btree_update *as = NULL;
......@@ -2078,8 +2073,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
goto err;
}
ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
extent_i_to_s_c(new_key).s_c);
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&new_key->k_i));
if (ret)
goto err_free_update;
......@@ -2137,9 +2131,9 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->level = 0;
b->btree_id = id;
bkey_extent_init(&b->key);
bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX;
bkey_i_to_extent(&b->key)->v._data[0] = U64_MAX - id;
PTR_HASH(&b->key) = U64_MAX - id;
bch2_bset_init_first(b, &b->data->keys);
bch2_btree_build_aux_trees(b);
......
......@@ -71,7 +71,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
goto overwrite;
}
k->type = KEY_TYPE_DELETED;
k->type = KEY_TYPE_deleted;
bch2_btree_node_iter_fix(iter, b, node_iter, k,
k->u64s, k->u64s);
bch2_btree_iter_verify(iter, b);
......@@ -312,7 +312,6 @@ btree_key_can_insert(struct btree_insert *trans,
return BTREE_INSERT_BTREE_NODE_FULL;
if (!bch2_bkey_replicas_marked(c,
insert->iter->btree_id,
bkey_i_to_s_c(insert->k),
true))
return BTREE_INSERT_NEED_MARK_REPLICAS;
......@@ -449,8 +448,8 @@ static inline void btree_insert_entry_checks(struct bch_fs *c,
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
BUG_ON(debug_check_bkeys(c) &&
!bkey_deleted(&i->k->k) &&
bch2_bkey_invalid(c, (enum bkey_type) i->iter->btree_id,
bkey_i_to_s_c(i->k)));
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
i->iter->btree_id));
}
/**
......@@ -585,8 +584,7 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
}
bch2_btree_iter_unlock(trans->entries[0].iter);
ret = bch2_mark_bkey_replicas(c, i->iter->btree_id,
bkey_i_to_s_c(i->k))
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k))
?: -EINTR;
break;
default:
......
This diff is collapsed.
......@@ -220,10 +220,10 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
#define BCH_BUCKET_MARK_NOATOMIC (1 << 0)
#define BCH_BUCKET_MARK_GC (1 << 1)
int bch2_mark_key_locked(struct bch_fs *, enum bkey_type, struct bkey_s_c,
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
int bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c,
int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
......
......@@ -56,7 +56,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
v->btree_id = b->btree_id;
bch2_btree_keys_init(v, &c->expensive_debug_checks);
if (bch2_btree_pick_ptr(c, b, NULL, &pick) <= 0)
if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
NULL, &pick) <= 0)
return;
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
......@@ -223,8 +224,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
k = bch2_btree_iter_peek(&iter);
while (k.k && !(err = btree_iter_err(k))) {
bch2_bkey_val_to_text(&PBUF(i->buf), i->c,
bkey_type(0, i->id), k);
bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k);
i->bytes = strlen(i->buf);
BUG_ON(i->bytes >= PAGE_SIZE);
i->buf[i->bytes] = '\n';
......
......@@ -65,8 +65,7 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
const struct bch_hash_desc bch2_dirent_hash_desc = {
.btree_id = BTREE_ID_DIRENTS,
.key_type = BCH_DIRENT,
.whiteout_type = BCH_DIRENT_WHITEOUT,
.key_type = KEY_TYPE_dirent,
.hash_key = dirent_hash_key,
.hash_bkey = dirent_hash_bkey,
.cmp_key = dirent_cmp_key,
......@@ -75,58 +74,37 @@ const struct bch_hash_desc bch2_dirent_hash_desc = {
const char *bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_dirent d;
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
unsigned len;
switch (k.k->type) {
case BCH_DIRENT:
if (bkey_val_bytes(k.k) < sizeof(struct bch_dirent))
return "value too small";
d = bkey_s_c_to_dirent(k);
len = bch2_dirent_name_bytes(d);
if (!len)
return "empty name";
if (bkey_val_bytes(k.k) < sizeof(struct bch_dirent))
return "value too small";
/*
* older versions of bcachefs were buggy and creating dirent
* keys that were bigger than necessary:
*/
if (bkey_val_u64s(k.k) > dirent_val_u64s(len + 7))
return "value too big";
len = bch2_dirent_name_bytes(d);
if (!len)
return "empty name";
if (len > BCH_NAME_MAX)
return "dirent name too big";
/*
* older versions of bcachefs were buggy and creating dirent
* keys that were bigger than necessary:
*/
if (bkey_val_u64s(k.k) > dirent_val_u64s(len + 7))
return "value too big";
return NULL;
case BCH_DIRENT_WHITEOUT:
return bkey_val_bytes(k.k) != 0
? "value size should be zero"
: NULL;
if (len > BCH_NAME_MAX)
return "dirent name too big";
default:
return "invalid type";
}
return NULL;
}
void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_s_c_dirent d;
switch (k.k->type) {
case BCH_DIRENT:
d = bkey_s_c_to_dirent(k);
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
pr_buf(out, " -> %llu", d.v->d_inum);
break;
case BCH_DIRENT_WHITEOUT:
pr_buf(out, "whiteout");
break;
}
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
pr_buf(out, " -> %llu", d.v->d_inum);
}
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
......@@ -287,7 +265,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
* overwrite old_dst - just make sure to use a
* whiteout when deleting src:
*/
new_src->k.type = BCH_DIRENT_WHITEOUT;
new_src->k.type = KEY_TYPE_whiteout;
}
} else {
/* Check if we need a whiteout to delete src: */
......@@ -298,7 +276,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
return ret;
if (ret)
new_src->k.type = BCH_DIRENT_WHITEOUT;
new_src->k.type = KEY_TYPE_whiteout;
}
}
......@@ -361,7 +339,7 @@ int bch2_empty_dir(struct bch_fs *c, u64 dir_inum)
if (k.k->p.inode > dir_inum)
break;
if (k.k->type == BCH_DIRENT) {
if (k.k->type == KEY_TYPE_dirent) {
ret = -ENOTEMPTY;
break;
}
......@@ -385,7 +363,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file,
for_each_btree_key(&iter, c, BTREE_ID_DIRENTS,
POS(inode->v.i_ino, ctx->pos), 0, k) {
if (k.k->type != BCH_DIRENT)
if (k.k->type != KEY_TYPE_dirent)
continue;
dirent = bkey_s_c_to_dirent(k);
......
......@@ -9,7 +9,7 @@ extern const struct bch_hash_desc bch2_dirent_hash_desc;
const char *bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_dirent_ops (struct bkey_ops) { \
#define bch2_bkey_ops_dirent (struct bkey_ops) { \
.key_invalid = bch2_dirent_invalid, \
.val_to_text = bch2_dirent_to_text, \
}
......
......@@ -123,49 +123,39 @@ static void *stripe_csum(struct bch_stripe *s, unsigned dev, unsigned csum_idx)
return csums + (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
}
const char *bch2_ec_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
if (k.k->p.inode)
return "invalid stripe key";
switch (k.k->type) {
case BCH_STRIPE: {
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
if (bkey_val_bytes(k.k) < sizeof(*s))
return "incorrect value size";
if (bkey_val_bytes(k.k) < sizeof(*s))
return "incorrect value size";
if (bkey_val_u64s(k.k) != stripe_val_u64s(s))
return "incorrect value size";
if (bkey_val_u64s(k.k) != stripe_val_u64s(s))
return "incorrect value size";
return NULL;
}
default:
return "invalid type";
}
return NULL;
}
void bch2_ec_key_to_text(struct printbuf *out, struct bch_fs *c,
void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
switch (k.k->type) {
case BCH_STRIPE: {
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned i;
pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
s->algorithm,
le16_to_cpu(s->sectors),
s->nr_blocks - s->nr_redundant,
s->nr_redundant,
s->csum_type,
1U << s->csum_granularity_bits);
for (i = 0; i < s->nr_blocks; i++)
pr_buf(out, " %u:%llu", s->ptrs[i].dev,
(u64) s->ptrs[i].offset);
}
}
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned i;
pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
s->algorithm,
le16_to_cpu(s->sectors),
s->nr_blocks - s->nr_redundant,
s->nr_redundant,
s->csum_type,
1U << s->csum_granularity_bits);
for (i = 0; i < s->nr_blocks; i++)
pr_buf(out, " %u:%llu", s->ptrs[i].dev,
(u64) s->ptrs[i].offset);
}
static int ptr_matches_stripe(struct bch_fs *c,
......@@ -454,7 +444,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
POS(0, stripe_idx),
BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(&iter);
if (btree_iter_err(k) || k.k->type != BCH_STRIPE) {
if (btree_iter_err(k) || k.k->type != KEY_TYPE_stripe) {
__bcache_io_error(c,
"error doing reconstruct read: stripe not found");
kfree(buf);
......@@ -695,7 +685,7 @@ static void ec_stripe_delete(struct bch_fs *c, size_t idx)
POS(0, idx),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
if (btree_iter_err(k) || k.k->type != BCH_STRIPE)
if (btree_iter_err(k) || k.k->type != KEY_TYPE_stripe)
goto out;
v = kmalloc(bkey_val_bytes(k.k), GFP_KERNEL);
......
......@@ -5,13 +5,13 @@
#include "ec_types.h"
#include "keylist_types.h"
const char *bch2_ec_key_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_ec_key_to_text(struct printbuf *, struct bch_fs *,
const char *bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
#define bch2_bkey_ec_ops (struct bkey_ops) { \
.key_invalid = bch2_ec_key_invalid, \
.val_to_text = bch2_ec_key_to_text, \
#define bch2_bkey_ops_stripe (struct bkey_ops) { \
.key_invalid = bch2_stripe_invalid, \
.val_to_text = bch2_stripe_to_text, \
}
struct bch_read_bio;
......
This diff is collapsed.
This diff is collapsed.
......@@ -121,7 +121,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
BUG_ON(res->sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC,
-((s64) res->sectors), BCH_QUOTA_PREALLOC);
-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
inode->ei_quota_reserved -= res->sectors;
mutex_unlock(&inode->ei_quota_lock);
......@@ -138,7 +138,7 @@ static int bch2_quota_reservation_add(struct bch_fs *c,
mutex_lock(&inode->ei_quota_lock);
ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
check_enospc ? BCH_QUOTA_PREALLOC : BCH_QUOTA_NOCHECK);
check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
if (likely(!ret)) {
inode->ei_quota_reserved += sectors;
res->sectors += sectors;
......@@ -220,7 +220,7 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
quota_res->sectors -= sectors;
inode->ei_quota_reserved -= sectors;
} else {
bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
}
#endif
inode->v.i_blocks += sectors;
......@@ -813,7 +813,7 @@ static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
struct bvec_iter iter;
struct bio_vec bv;
unsigned nr_ptrs = !bch2_extent_is_compressed(k)
? bch2_extent_nr_dirty_ptrs(k)
? bch2_bkey_nr_dirty_ptrs(k)
: 0;
bio_for_each_segment(bv, bio, iter) {
......@@ -2397,7 +2397,7 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(&copy.k.k)));
ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(&copy.k)),
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(&copy.k)),
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
......@@ -2504,7 +2504,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
goto btree_iter_err;
/* already reserved */
if (k.k->type == BCH_RESERVATION &&
if (k.k->type == KEY_TYPE_reservation &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
bch2_btree_iter_next_slot(iter);
continue;
......@@ -2517,7 +2517,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
}
bkey_reservation_init(&reservation.k_i);
reservation.k.type = BCH_RESERVATION;
reservation.k.type = KEY_TYPE_reservation;
reservation.k.p = k.k->p;
reservation.k.size = k.k->size;
......@@ -2525,7 +2525,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
bch2_cut_back(end_pos, &reservation.k);
sectors = reservation.k.size;
reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
if (!bkey_extent_is_allocation(k.k)) {
ret = bch2_quota_reservation_add(c, inode,
......
......@@ -340,7 +340,7 @@ __bch2_create(struct mnt_idmap *idmap,
if (tmpfile)
inode_u.bi_flags |= BCH_INODE_UNLINKED;
ret = bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, BCH_QUOTA_PREALLOC);
ret = bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, KEY_TYPE_QUOTA_PREALLOC);
if (ret)
return ERR_PTR(ret);
......@@ -457,7 +457,7 @@ __bch2_create(struct mnt_idmap *idmap,
make_bad_inode(&inode->v);
iput(&inode->v);
err:
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, BCH_QUOTA_WARN);
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, KEY_TYPE_QUOTA_WARN);
inode = ERR_PTR(ret);
goto out;
}
......@@ -1079,7 +1079,7 @@ static int bch2_fill_extent(struct fiemap_extent_info *info,
}
return 0;
} else if (k->k.type == BCH_RESERVATION) {
} else if (k->k.type == KEY_TYPE_reservation) {
return fiemap_fill_next_extent(info,
bkey_start_offset(&k->k) << 9,
0, k->k.size << 9,
......@@ -1112,7 +1112,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
POS(ei->v.i_ino, start >> 9), 0, k)
if (bkey_extent_is_data(k.k) ||
k.k->type == BCH_RESERVATION) {
k.k->type == KEY_TYPE_reservation) {
if (bkey_cmp(bkey_start_pos(k.k),
POS(ei->v.i_ino, (start + len) >> 9)) >= 0)
break;
......@@ -1414,9 +1414,9 @@ static void bch2_evict_inode(struct inode *vinode)
if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
BCH_QUOTA_WARN);
KEY_TYPE_QUOTA_WARN);
bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
BCH_QUOTA_WARN);
KEY_TYPE_QUOTA_WARN);
bch2_inode_rm(c, inode->v.i_ino);
WARN_ONCE(atomic_long_dec_return(&c->nr_inodes) < 0,
......
......@@ -235,7 +235,6 @@ static int hash_check_duplicates(const struct bch_hash_desc desc,
!desc.cmp_bkey(k, k2), c,
"duplicate hash table keys:\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c,
bkey_type(0, desc.btree_id),
k), buf))) {
ret = fsck_hash_delete_at(desc, &h->info, k_iter);
if (ret)
......@@ -255,7 +254,7 @@ static bool key_has_correct_hash(const struct bch_hash_desc desc,
{
u64 hash;
if (k.k->type != desc.whiteout_type &&
if (k.k->type != KEY_TYPE_whiteout &&
k.k->type != desc.key_type)
return true;
......@@ -280,7 +279,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
u64 hashed;
int ret = 0;
if (k.k->type != desc.whiteout_type &&
if (k.k->type != KEY_TYPE_whiteout &&
k.k->type != desc.key_type)
return 0;
......@@ -300,7 +299,6 @@ static int hash_check_key(const struct bch_hash_desc desc,
desc.btree_id, k.k->p.offset,
hashed, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c,
bkey_type(0, desc.btree_id),
k), buf))) {
ret = hash_redo_key(desc, h, c, k_iter, k, hashed);
if (ret) {
......@@ -370,7 +368,7 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
*k = bch2_btree_iter_peek(iter);
BUG_ON(k->k->type != BCH_DIRENT);
BUG_ON(k->k->type != KEY_TYPE_dirent);
}
err:
fsck_err:
......@@ -385,7 +383,6 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
buf, strlen(buf), BTREE_ID_DIRENTS,
k->k->p.offset, hash, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c,
bkey_type(0, BTREE_ID_DIRENTS),
*k), buf))) {
ret = hash_redo_key(bch2_dirent_hash_desc,
h, c, iter, *k, hash);
......@@ -471,7 +468,7 @@ static int check_extents(struct bch_fs *c)
if (fsck_err_on(w.have_inode &&
!(w.inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
k.k->type != BCH_RESERVATION &&
k.k->type != KEY_TYPE_reservation &&
k.k->p.offset > round_up(w.inode.bi_size, PAGE_SIZE) >> 9, c,
"extent type %u offset %llu past end of inode %llu, i_size %llu",
k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size)) {
......@@ -529,13 +526,11 @@ static int check_dirents(struct bch_fs *c)
if (fsck_err_on(!w.have_inode, c,
"dirent in nonexisting directory:\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c,
(enum bkey_type) BTREE_ID_DIRENTS,
k), buf)) ||
fsck_err_on(!S_ISDIR(w.inode.bi_mode), c,
"dirent in non directory inode type %u:\n%s",
mode_to_type(w.inode.bi_mode),
(bch2_bkey_val_to_text(&PBUF(buf), c,
(enum bkey_type) BTREE_ID_DIRENTS,
k), buf))) {
ret = bch2_btree_delete_at(iter, 0);
if (ret)
......@@ -557,7 +552,7 @@ static int check_dirents(struct bch_fs *c)
if (ret)
goto fsck_err;
if (k.k->type != BCH_DIRENT)
if (k.k->type != KEY_TYPE_dirent)
continue;
d = bkey_s_c_to_dirent(k);
......@@ -586,7 +581,6 @@ static int check_dirents(struct bch_fs *c)
if (fsck_err_on(d_inum == d.k->p.inode, c,
"dirent points to own directory:\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c,
(enum bkey_type) BTREE_ID_DIRENTS,
k), buf))) {
ret = remove_dirent(c, iter, d);
if (ret)
......@@ -604,7 +598,6 @@ static int check_dirents(struct bch_fs *c)
if (fsck_err_on(!have_target, c,
"dirent points to missing inode:\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c,
(enum bkey_type) BTREE_ID_DIRENTS,
k), buf))) {
ret = remove_dirent(c, iter, d);
if (ret)
......@@ -618,7 +611,6 @@ static int check_dirents(struct bch_fs *c)
"incorrect d_type: should be %u:\n%s",
mode_to_type(target.bi_mode),
(bch2_bkey_val_to_text(&PBUF(buf), c,
(enum bkey_type) BTREE_ID_DIRENTS,
k), buf))) {
struct bkey_i_dirent *n;
......@@ -899,7 +891,7 @@ static int check_directory_structure(struct bch_fs *c,
e->offset = k.k->p.offset;
if (k.k->type != BCH_DIRENT)
if (k.k->type != KEY_TYPE_dirent)
continue;
dirent = bkey_s_c_to_dirent(k);
......@@ -942,7 +934,7 @@ static int check_directory_structure(struct bch_fs *c,
}
for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) {
if (k.k->type != BCH_INODE_FS)
if (k.k->type != KEY_TYPE_inode)
continue;
if (!S_ISDIR(le16_to_cpu(bkey_s_c_to_inode(k).v->bi_mode)))
......@@ -1030,7 +1022,7 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k) {
switch (k.k->type) {
case BCH_DIRENT:
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
d_inum = le64_to_cpu(d.v->d_inum);
......@@ -1310,7 +1302,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
if (iter.pos.inode < nlinks_pos || !link)
link = &zero_links;
if (k.k && k.k->type == BCH_INODE_FS) {
if (k.k && k.k->type == KEY_TYPE_inode) {
/*
* Avoid potential deadlocks with iter for
* truncate/rm/etc.:
......@@ -1392,7 +1384,7 @@ static int check_inodes_fast(struct bch_fs *c)
int ret = 0;
for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) {
if (k.k->type != BCH_INODE_FS)
if (k.k->type != KEY_TYPE_inode)
continue;
inode = bkey_s_c_to_inode(k);
......
......@@ -178,76 +178,69 @@ int bch2_inode_unpack(struct bkey_s_c_inode inode,
const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
if (k.k->p.offset)
return "nonzero offset";
switch (k.k->type) {
case BCH_INODE_FS: {
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked unpacked;
if (bkey_val_bytes(k.k) < sizeof(struct bch_inode))
return "incorrect value size";
if (k.k->p.inode < BLOCKDEV_INODE_MAX)
return "fs inode in blockdev range";
if (k.k->p.offset)
return "nonzero offset";
if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR)
return "invalid str hash type";
if (bkey_val_bytes(k.k) < sizeof(struct bch_inode))
return "incorrect value size";
if (bch2_inode_unpack(inode, &unpacked))
return "invalid variable length fields";
if (k.k->p.inode < BLOCKDEV_INODE_MAX)
return "fs inode in blockdev range";
if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1)
return "invalid data checksum type";
if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR)
return "invalid str hash type";
if (unpacked.bi_compression >= BCH_COMPRESSION_OPT_NR + 1)
return "invalid data checksum type";
if (bch2_inode_unpack(inode, &unpacked))
return "invalid variable length fields";
if ((unpacked.bi_flags & BCH_INODE_UNLINKED) &&
unpacked.bi_nlink != 0)
return "flagged as unlinked but bi_nlink != 0";
if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1)
return "invalid data checksum type";
return NULL;
}
case BCH_INODE_BLOCKDEV:
if (bkey_val_bytes(k.k) != sizeof(struct bch_inode_blockdev))
return "incorrect value size";
if (unpacked.bi_compression >= BCH_COMPRESSION_OPT_NR + 1)
return "invalid data checksum type";
if (k.k->p.inode >= BLOCKDEV_INODE_MAX)
return "blockdev inode in fs range";
if ((unpacked.bi_flags & BCH_INODE_UNLINKED) &&
unpacked.bi_nlink != 0)
return "flagged as unlinked but bi_nlink != 0";
return NULL;
case BCH_INODE_GENERATION:
if (bkey_val_bytes(k.k) != sizeof(struct bch_inode_generation))
return "incorrect value size";
return NULL;
default:
return "invalid type";
}
return NULL;
}
void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_s_c_inode inode;
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked unpacked;
switch (k.k->type) {
case BCH_INODE_FS:
inode = bkey_s_c_to_inode(k);
if (bch2_inode_unpack(inode, &unpacked)) {
pr_buf(out, "(unpack error)");
break;
}
if (bch2_inode_unpack(inode, &unpacked)) {
pr_buf(out, "(unpack error)");
return;
}
#define BCH_INODE_FIELD(_name, _bits) \
pr_buf(out, #_name ": %llu ", (u64) unpacked._name);
BCH_INODE_FIELDS()
pr_buf(out, #_name ": %llu ", (u64) unpacked._name);
BCH_INODE_FIELDS()
#undef BCH_INODE_FIELD
break;
}
}
const char *bch2_inode_generation_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
if (k.k->p.offset)
return "nonzero offset";
if (bkey_val_bytes(k.k) != sizeof(struct bch_inode_generation))
return "incorrect value size";
return NULL;
}
void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
}
void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
......@@ -281,10 +274,9 @@ void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
static inline u32 bkey_generation(struct bkey_s_c k)
{
switch (k.k->type) {
case BCH_INODE_BLOCKDEV:
case BCH_INODE_FS:
case KEY_TYPE_inode:
BUG();
case BCH_INODE_GENERATION:
case KEY_TYPE_inode_generation:
return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
default:
return 0;
......@@ -330,8 +322,7 @@ int __bch2_inode_create(struct btree_trans *trans,
return ret;
switch (k.k->type) {
case BCH_INODE_BLOCKDEV:
case BCH_INODE_FS:
case KEY_TYPE_inode:
/* slot used */
if (iter->pos.inode >= max)
goto out;
......@@ -405,19 +396,19 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
return ret;
}
bch2_fs_inconsistent_on(k.k->type != BCH_INODE_FS, c,
bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_inode, c,
"inode %llu not found when deleting",
inode_nr);
switch (k.k->type) {
case BCH_INODE_FS: {
case KEY_TYPE_inode: {
struct bch_inode_unpacked inode_u;
if (!bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u))
bi_generation = inode_u.bi_generation + 1;
break;
}
case BCH_INODE_GENERATION: {
case KEY_TYPE_inode_generation: {
struct bkey_s_c_inode_generation g =
bkey_s_c_to_inode_generation(k);
bi_generation = le32_to_cpu(g.v->bi_generation);
......@@ -455,7 +446,7 @@ int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
POS(inode_nr, 0),
BTREE_ITER_SLOTS, k) {
switch (k.k->type) {
case BCH_INODE_FS:
case KEY_TYPE_inode:
ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
break;
default:
......@@ -464,7 +455,6 @@ int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
}
break;
}
return bch2_btree_iter_unlock(&iter) ?: ret;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment