Commit e6ae2727 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Change inode allocation code for snapshots

For snapshots, when we allocate a new inode we want to allocate an inode
number that isn't in use in any other subvolume. We won't be able to use
ITER_SLOTS for this, inode allocation needs to change to use
BTREE_ITER_ALL_SNAPSHOTS.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent ab2a29cc
...@@ -36,7 +36,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum, ...@@ -36,7 +36,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
if (!name) if (!name)
new_inode->bi_flags |= BCH_INODE_UNLINKED; new_inode->bi_flags |= BCH_INODE_UNLINKED;
inode_iter = bch2_inode_create(trans, new_inode); inode_iter = bch2_inode_create(trans, new_inode, U32_MAX);
ret = PTR_ERR_OR_ZERO(inode_iter); ret = PTR_ERR_OR_ZERO(inode_iter);
if (ret) if (ret)
goto err; goto err;
...@@ -80,6 +80,10 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum, ...@@ -80,6 +80,10 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
new_inode->bi_dir_offset = dir_offset; new_inode->bi_dir_offset = dir_offset;
} }
/* XXX use bch2_btree_iter_set_snapshot() */
inode_iter->snapshot = U32_MAX;
bch2_btree_iter_set_pos(inode_iter, SPOS(0, new_inode->bi_inum, U32_MAX));
ret = bch2_inode_write(trans, inode_iter, new_inode); ret = bch2_inode_write(trans, inode_iter, new_inode);
err: err:
bch2_trans_iter_put(trans, inode_iter); bch2_trans_iter_put(trans, inode_iter);
......
...@@ -471,12 +471,13 @@ static inline u32 bkey_generation(struct bkey_s_c k) ...@@ -471,12 +471,13 @@ static inline u32 bkey_generation(struct bkey_s_c k)
} }
struct btree_iter *bch2_inode_create(struct btree_trans *trans, struct btree_iter *bch2_inode_create(struct btree_trans *trans,
struct bch_inode_unpacked *inode_u) struct bch_inode_unpacked *inode_u,
u32 snapshot)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter *iter = NULL; struct btree_iter *iter = NULL;
struct bkey_s_c k; struct bkey_s_c k;
u64 min, max, start, *hint; u64 min, max, start, pos, *hint;
int ret; int ret;
u64 cpu = raw_smp_processor_id(); u64 cpu = raw_smp_processor_id();
...@@ -493,39 +494,70 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans, ...@@ -493,39 +494,70 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
if (start >= max || start < min) if (start >= max || start < min)
start = min; start = min;
pos = start;
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, pos),
BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_INTENT);
again: again:
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, start), while ((k = bch2_btree_iter_peek(iter)).k &&
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { !(ret = bkey_err(k)) &&
if (bkey_cmp(iter->pos, POS(0, max)) > 0) bkey_cmp(k.k->p, POS(0, max)) < 0) {
break; while (pos < iter->pos.offset) {
if (!bch2_btree_key_cache_find(c, BTREE_ID_inodes, POS(0, pos)))
goto found_slot;
pos++;
}
if (k.k->p.snapshot == snapshot &&
k.k->type != KEY_TYPE_inode &&
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) {
bch2_btree_iter_next(iter);
continue;
}
/* /*
* There's a potential cache coherency issue with the btree key * We don't need to iterate over keys in every snapshot once
* cache code here - we're iterating over the btree, skipping * we've found just one:
* that cache. We should never see an empty slot that isn't
* actually empty due to a pending update in the key cache
* because the update that creates the inode isn't done with a
* cached iterator, but - better safe than sorry, check the
* cache before using a slot:
*/ */
if (k.k->type != KEY_TYPE_inode && pos = iter->pos.offset + 1;
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, iter->pos)) bch2_btree_iter_set_pos(iter, POS(0, pos));
}
while (!ret && pos < max) {
if (!bch2_btree_key_cache_find(c, BTREE_ID_inodes, POS(0, pos)))
goto found_slot; goto found_slot;
pos++;
} }
bch2_trans_iter_put(trans, iter); if (!ret && start == min)
ret = -ENOSPC;
if (ret) if (ret) {
bch2_trans_iter_put(trans, iter);
return ERR_PTR(ret); return ERR_PTR(ret);
if (start != min) {
/* Retry from start */
start = min;
goto again;
} }
return ERR_PTR(-ENOSPC); /* Retry from start */
pos = start = min;
bch2_btree_iter_set_pos(iter, POS(0, pos));
goto again;
found_slot: found_slot:
bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_put(trans, iter);
return ERR_PTR(ret);
}
/* We may have raced while the iterator wasn't pointing at pos: */
if (k.k->type == KEY_TYPE_inode ||
bch2_btree_key_cache_find(c, BTREE_ID_inodes, k.k->p))
goto again;
*hint = k.k->p.offset; *hint = k.k->p.offset;
inode_u->bi_inum = k.k->p.offset; inode_u->bi_inum = k.k->p.offset;
inode_u->bi_generation = bkey_generation(k); inode_u->bi_generation = bkey_generation(k);
......
...@@ -70,7 +70,7 @@ void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *, ...@@ -70,7 +70,7 @@ void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
struct bch_inode_unpacked *); struct bch_inode_unpacked *);
struct btree_iter *bch2_inode_create(struct btree_trans *, struct btree_iter *bch2_inode_create(struct btree_trans *,
struct bch_inode_unpacked *); struct bch_inode_unpacked *, u32);
int bch2_inode_rm(struct bch_fs *, u64, bool); int bch2_inode_rm(struct bch_fs *, u64, bool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment