Commit 34dfa5db authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch2_bkey_get_mut() improvements

 - bch2_bkey_get_mut() now handles types increasing in size, allocating
   a buffer for the type's current size when necessary
 - bch2_bkey_make_mut_typed()
 - bch2_bkey_get_mut() now initializes the iterator, like
   bch2_bkey_get_iter()

Also, refactor so that most of the code is in functions - now macros are
only used for wrappers.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d67a16df
......@@ -511,18 +511,8 @@ static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_
if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
((a = bkey_s_c_to_alloc_v4(k), true) &&
BCH_ALLOC_V4_BACKPOINTERS_START(a.v) == BCH_ALLOC_V4_U64s &&
BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) {
/*
* Reserve space for one more backpointer here:
* Not sketchy at doing it this way, nope...
*/
struct bkey_i_alloc_v4 *ret =
bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
if (!IS_ERR(ret))
bkey_reassemble(&ret->k_i, k);
return ret;
}
BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
return bch2_bkey_make_mut_typed(trans, k, alloc_v4);
return __bch2_alloc_to_v4_mut(trans, k);
}
......
......@@ -183,47 +183,90 @@ static inline void bch2_trans_reset_updates(struct btree_trans *trans)
}
}
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k,
unsigned type, unsigned min_bytes)
{
struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k));
struct bkey_i *mut;
if (type && k.k->type != type)
return ERR_PTR(-ENOENT);
if (!IS_ERR(mut))
mut = bch2_trans_kmalloc_nomemzero(trans, bytes);
if (!IS_ERR(mut)) {
bkey_reassemble(mut, k);
if (unlikely(bytes > bkey_bytes(k.k))) {
memset((void *) mut + bkey_bytes(k.k), 0,
bytes - bkey_bytes(k.k));
mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64));
}
}
return mut;
}
static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
struct btree_iter *iter)
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
return __bch2_bkey_make_mut(trans, k, 0, 0);
}
return unlikely(IS_ERR(k.k))
#define bch2_bkey_make_mut_typed(_trans, _k, _type) \
bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _k, \
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags, unsigned type, unsigned min_bytes)
{
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
btree_id, pos, flags|BTREE_ITER_INTENT, type);
struct bkey_i *ret = unlikely(IS_ERR(k.k))
? ERR_CAST(k.k)
: bch2_bkey_make_mut(trans, k);
: __bch2_bkey_make_mut(trans, k, 0, min_bytes);
if (unlikely(IS_ERR(ret)))
bch2_trans_iter_exit(trans, iter);
return ret;
}
#define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
({ \
struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
struct bkey_i_##_type *_ret; \
\
if (IS_ERR(_k)) \
_ret = ERR_CAST(_k); \
else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
_ret = ERR_PTR(-ENOENT); \
else \
_ret = bkey_i_to_##_type(_k); \
_ret; \
})
static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags, unsigned min_bytes)
{
return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes);
}
static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags)
{
return bch2_bkey_get_mut_minsize(trans, iter, btree_id, pos, flags, 0);
}
#define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \
_btree_id, _pos, _flags, \
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter,
unsigned type, unsigned val_size)
{
struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size);
if (!IS_ERR(k)) {
bkey_init(&k->k);
k->k.p = iter->pos;
k->k.type = type;
set_bkey_val_bytes(&k->k, val_size);
}
return k;
}
#define bch2_bkey_alloc(_trans, _iter, _type) \
({ \
struct bkey_i_##_type *_k = bch2_trans_kmalloc_nomemzero(_trans, sizeof(*_k));\
if (!IS_ERR(_k)) { \
bkey_##_type##_init(&_k->k_i); \
_k->k.p = (_iter)->pos; \
} \
_k; \
})
bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, \
KEY_TYPE_##_type, sizeof(struct bch_##_type)))
#endif /* _BCACHEFS_BTREE_UPDATE_H */
......@@ -1448,10 +1448,9 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
struct bch_replicas_padded r;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES);
s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
s = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_stripes, POS(0, p.ec.idx),
BTREE_ITER_WITH_UPDATES, stripe);
ret = PTR_ERR_OR_ZERO(s);
if (unlikely(ret)) {
bch2_trans_inconsistent_on(ret == -ENOENT, trans,
......@@ -1750,10 +1749,9 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES);
k = bch2_bkey_get_mut(trans, &iter);
k = bch2_bkey_get_mut(trans, &iter,
BTREE_ID_reflink, POS(0, *idx),
BTREE_ITER_WITH_UPDATES);
ret = PTR_ERR_OR_ZERO(k);
if (ret)
goto err;
......
......@@ -257,15 +257,14 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
SPOS(0,
extent_iter->pos.inode,
extent_iter->snapshot),
BTREE_ITER_INTENT|BTREE_ITER_CACHED);
k = bch2_bkey_get_mut(trans, &iter);
k = bch2_bkey_get_mut(trans, &iter, BTREE_ID_inodes,
SPOS(0,
extent_iter->pos.inode,
extent_iter->snapshot),
BTREE_ITER_CACHED);
ret = PTR_ERR_OR_ZERO(k);
if (unlikely(ret))
goto err;
return ret;
if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
k = bch2_inode_to_v3(trans, k);
......
......@@ -363,9 +363,9 @@ static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
struct bkey_i_snapshot *s;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_INTENT);
s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
s = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_snapshots, POS(0, id),
0, snapshot);
ret = PTR_ERR_OR_ZERO(s);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
......@@ -411,10 +411,9 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
if (parent_id) {
struct bkey_i_snapshot *parent;
bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
POS(0, parent_id),
BTREE_ITER_INTENT);
parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
parent = bch2_bkey_get_mut_typed(trans, &p_iter,
BTREE_ID_snapshots, POS(0, parent_id),
0, snapshot);
ret = PTR_ERR_OR_ZERO(parent);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(ret == -ENOENT, c, "missing snapshot %u", parent_id);
......@@ -453,7 +452,7 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
u32 *snapshot_subvols,
unsigned nr_snapids)
{
struct btree_iter iter;
struct btree_iter iter, parent_iter = { NULL };
struct bkey_i_snapshot *n;
struct bkey_s_c k;
unsigned i;
......@@ -498,8 +497,9 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
}
if (parent) {
bch2_btree_iter_set_pos(&iter, POS(0, parent));
n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
n = bch2_bkey_get_mut_typed(trans, &parent_iter,
BTREE_ID_snapshots, POS(0, parent),
0, snapshot);
ret = PTR_ERR_OR_ZERO(n);
if (unlikely(ret)) {
if (ret == -ENOENT)
......@@ -517,11 +517,12 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
n->v.children[1] = cpu_to_le32(new_snapids[1]);
n->v.subvol = 0;
SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
ret = bch2_trans_update(trans, &parent_iter, &n->k_i, 0);
if (ret)
goto err;
}
err:
bch2_trans_iter_exit(trans, &parent_iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......@@ -888,11 +889,9 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
struct subvolume_unlink_hook *h;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
POS(0, subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
n = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_subvolumes, POS(0, subvolid),
BTREE_ITER_CACHED, subvolume);
ret = PTR_ERR_OR_ZERO(n);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
......@@ -956,11 +955,9 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
if (src_subvolid) {
/* Creating a snapshot: */
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
POS(0, src_subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
BTREE_ID_subvolumes, POS(0, src_subvolid),
BTREE_ITER_CACHED, subvolume);
ret = PTR_ERR_OR_ZERO(src_subvol);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment