Commit fc3268c1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: kill extent_insert_hook

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 190fa7af
......@@ -322,7 +322,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_set_acl_fn,
(void *)(unsigned long) mode) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
......
......@@ -319,7 +319,7 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
bch2_btree_iter_set_pos(iter, a->k.p);
return bch2_btree_insert_at(c, NULL, NULL, journal_seq,
return bch2_btree_insert_at(c, NULL, journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
......
......@@ -975,8 +975,6 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
if (__bch2_btree_iter_relock(iter))
return 0;
iter->flags &= ~BTREE_ITER_AT_END_OF_LEAF;
/*
* XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
* here unnecessary
......@@ -1155,10 +1153,8 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_
iter->flags & BTREE_ITER_IS_EXTENTS))
__btree_iter_advance(l);
if (!k && btree_iter_pos_after_node(iter, l->b)) {
if (!k && btree_iter_pos_after_node(iter, l->b))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
}
}
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
......
......@@ -196,11 +196,7 @@ enum btree_iter_type {
* @pos or the first key strictly greater than @pos
*/
#define BTREE_ITER_IS_EXTENTS (1 << 4)
/*
* indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
*/
#define BTREE_ITER_AT_END_OF_LEAF (1 << 5)
#define BTREE_ITER_ERROR (1 << 6)
#define BTREE_ITER_ERROR (1 << 5)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
......@@ -256,12 +252,6 @@ struct btree_iter {
struct btree_insert_entry {
struct btree_iter *iter;
struct bkey_i *k;
unsigned extra_res;
/*
* true if entire key was inserted - can only be false for
* extents
*/
bool done;
};
struct btree_trans {
......@@ -467,12 +457,6 @@ enum btree_insert_ret {
BTREE_INSERT_NEED_GC_LOCK,
};
struct extent_insert_hook {
enum btree_insert_ret
(*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
struct bkey_s_c, const struct bkey_i *);
};
enum btree_gc_coalesce_fail_reason {
BTREE_GC_COALESCE_FAIL_RESERVE_GET,
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
......
......@@ -23,7 +23,6 @@ struct btree_insert {
struct disk_reservation *disk_res;
struct journal_res journal_res;
u64 *journal_seq;
struct extent_insert_hook *hook;
unsigned flags;
bool did_work;
......@@ -37,15 +36,6 @@ int __bch2_btree_insert_at(struct btree_insert *);
((struct btree_insert_entry) { \
.iter = (_iter), \
.k = (_k), \
.done = false, \
})
#define BTREE_INSERT_ENTRY_EXTRA_RES(_iter, _k, _extra) \
((struct btree_insert_entry) { \
.iter = (_iter), \
.k = (_k), \
.extra_res = (_extra), \
.done = false, \
})
/**
......@@ -61,13 +51,11 @@ int __bch2_btree_insert_at(struct btree_insert *);
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
#define bch2_btree_insert_at(_c, _disk_res, _hook, \
_journal_seq, _flags, ...) \
#define bch2_btree_insert_at(_c, _disk_res, _journal_seq, _flags, ...) \
__bch2_btree_insert_at(&(struct btree_insert) { \
.c = (_c), \
.disk_res = (_disk_res), \
.journal_seq = (_journal_seq), \
.hook = (_hook), \
.flags = (_flags), \
.nr = COUNT_ARGS(__VA_ARGS__), \
.entries = (struct btree_insert_entry[]) { \
......@@ -121,17 +109,13 @@ enum {
int bch2_btree_delete_at(struct btree_iter *, unsigned);
int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *,
struct disk_reservation *,
struct extent_insert_hook *, u64 *, unsigned);
struct disk_reservation *, u64 *, unsigned);
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *,
struct extent_insert_hook *, u64 *, int flags);
struct disk_reservation *, u64 *, int flags);
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
struct bpos, struct bpos, struct bversion,
struct disk_reservation *,
struct extent_insert_hook *, u64 *);
struct bpos, struct bpos, u64 *);
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
__le64, unsigned);
......@@ -151,7 +135,6 @@ bch2_trans_update(struct btree_trans *trans,
int bch2_trans_commit(struct btree_trans *,
struct disk_reservation *,
struct extent_insert_hook *,
u64 *, unsigned);
#define bch2_trans_do(_c, _journal_seq, _flags, _do) \
......@@ -164,7 +147,7 @@ int bch2_trans_commit(struct btree_trans *,
do { \
bch2_trans_begin(&trans); \
\
_ret = (_do) ?: bch2_trans_commit(&trans, NULL, NULL, \
_ret = (_do) ?: bch2_trans_commit(&trans, NULL, \
(_journal_seq), (_flags)); \
} while (_ret == -EINTR); \
\
......
......@@ -161,15 +161,6 @@ static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
{
struct btree *b;
/*
* iterators are inconsistent when they hit end of leaf, until
* traversed again
*
* XXX inconsistent how?
*/
if (iter->flags & BTREE_ITER_AT_END_OF_LEAF)
return;
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return;
......
......@@ -336,14 +336,12 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
unsigned u64s;
int ret;
trans_for_each_entry(trans, i) {
BUG_ON(i->done);
trans_for_each_entry(trans, i)
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
}
u64s = 0;
trans_for_each_entry(trans, i)
u64s += jset_u64s(i->k->k.u64s + i->extra_res);
u64s += jset_u64s(i->k->k.u64s);
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
......@@ -374,7 +372,7 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
if (!same_leaf_as_prev(trans, i))
u64s = 0;
u64s += i->k->k.u64s + i->extra_res;
u64s += i->k->k.u64s;
switch (btree_key_can_insert(trans, i, &u64s)) {
case BTREE_INSERT_OK:
break;
......@@ -406,28 +404,14 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
trans_for_each_entry(trans, i) {
switch (btree_insert_key_leaf(trans, i)) {
case BTREE_INSERT_OK:
i->done = true;
break;
case BTREE_INSERT_NEED_TRAVERSE:
BUG_ON((trans->flags & BTREE_INSERT_ATOMIC));
ret = -EINTR;
break;
case BTREE_INSERT_BTREE_NODE_FULL:
ret = -EINTR;
*split = i->iter;
break;
case BTREE_INSERT_ENOSPC:
ret = -ENOSPC;
break;
goto out;
default:
BUG();
}
/*
* If we did some work (i.e. inserted part of an extent),
* we have to do all the other updates as well:
*/
if (!trans->did_work && (ret || *split))
break;
}
out:
multi_unlock_write(trans);
......@@ -523,11 +507,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
trans->did_work &&
!btree_node_locked(linked, 0));
}
/* make sure we didn't lose an error: */
if (!ret)
trans_for_each_entry(trans, i)
BUG_ON(!i->done);
}
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
......@@ -614,7 +593,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
int bch2_trans_commit(struct btree_trans *trans,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq,
unsigned flags)
{
......@@ -642,7 +620,7 @@ int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
bkey_init(&k.k);
k.k.p = iter->pos;
return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
return bch2_btree_insert_at(iter->c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags,
BTREE_INSERT_ENTRY(iter, &k));
......@@ -651,7 +629,6 @@ int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
int bch2_btree_insert_list_at(struct btree_iter *iter,
struct keylist *keys,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq, unsigned flags)
{
BUG_ON(flags & BTREE_INSERT_ATOMIC);
......@@ -659,7 +636,7 @@ int bch2_btree_insert_list_at(struct btree_iter *iter,
bch2_verify_keylist_sorted(keys);
while (!bch2_keylist_empty(keys)) {
int ret = bch2_btree_insert_at(iter->c, disk_res, hook,
int ret = bch2_btree_insert_at(iter->c, disk_res,
journal_seq, flags,
BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
......@@ -681,7 +658,6 @@ int bch2_btree_insert_list_at(struct btree_iter *iter,
int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct bkey_i *k,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq, int flags)
{
struct btree_iter iter;
......@@ -689,7 +665,7 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
BTREE_INSERT_ENTRY(&iter, k));
bch2_btree_iter_unlock(&iter);
......@@ -702,12 +678,8 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
* Range is a half open interval - [start, end)
*/
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
struct bpos start,
struct bpos end,
struct bversion version,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq)
struct bpos start, struct bpos end,
u64 *journal_seq)
{
struct btree_iter iter;
struct bkey_s_c k;
......@@ -717,14 +689,12 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = btree_iter_err(k))) {
!(ret = btree_iter_err(k)) &&
bkey_cmp(iter.pos, end) < 0) {
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
/* really shouldn't be using a bare, unpadded bkey_i */
struct bkey_i delete;
if (bkey_cmp(iter.pos, end) >= 0)
break;
bkey_init(&delete.k);
/*
......@@ -738,7 +708,6 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
* bkey_start_pos(k.k)).
*/
delete.k.p = iter.pos;
delete.k.version = version;
if (iter.flags & BTREE_ITER_IS_EXTENTS) {
/* create the biggest key we can */
......@@ -746,7 +715,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
bch2_cut_back(end, &delete.k);
}
ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
ret = bch2_btree_insert_at(c, NULL, journal_seq,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete));
if (ret)
......
......@@ -1252,52 +1252,6 @@ static void extent_insert_committed(struct extent_insert_state *s)
s->trans->did_work = true;
}
static enum btree_insert_ret
__extent_insert_advance_pos(struct extent_insert_state *s,
struct bpos next_pos,
struct bkey_s_c k)
{
struct extent_insert_hook *hook = s->trans->hook;
enum btree_insert_ret ret;
if (hook)
ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
else
ret = BTREE_INSERT_OK;
if (ret == BTREE_INSERT_OK)
s->committed = next_pos;
return ret;
}
/*
* Update iter->pos, marking how much of @insert we've processed, and call hook
* fn:
*/
static enum btree_insert_ret
extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
{
struct btree *b = s->insert->iter->l[0].b;
struct bpos next_pos = bpos_min(s->insert->k->k.p,
k.k ? k.k->p : b->key.k.p);
enum btree_insert_ret ret;
/* hole? */
if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
ret = __extent_insert_advance_pos(s, bkey_start_pos(k.k),
bkey_s_c_null);
if (ret != BTREE_INSERT_OK)
return ret;
}
/* avoid redundant calls to hook fn: */
if (!bkey_cmp(s->committed, next_pos))
return BTREE_INSERT_OK;
return __extent_insert_advance_pos(s, next_pos, k);
}
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
......@@ -1468,8 +1422,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
}
}
static enum btree_insert_ret
__bch2_insert_fixup_extent(struct extent_insert_state *s)
static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
{
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
......@@ -1477,7 +1430,6 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
enum btree_insert_ret ret = BTREE_INSERT_OK;
while (bkey_cmp(s->committed, insert->k.p) < 0 &&
(_k = bch2_btree_node_iter_peek_filter(&l->iter, b,
......@@ -1491,9 +1443,7 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
break;
ret = extent_insert_advance_pos(s, k.s_c);
if (ret)
break;
s->committed = bpos_min(s->insert->k->k.p, k.k->p);
if (!bkey_whiteout(k.k))
s->update_journal = true;
......@@ -1547,9 +1497,8 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
break;
}
if (ret == BTREE_INSERT_OK &&
bkey_cmp(s->committed, insert->k.p) < 0)
ret = extent_insert_advance_pos(s, bkey_s_c_null);
if (bkey_cmp(s->committed, insert->k.p) < 0)
s->committed = bpos_min(s->insert->k->k.p, b->key.k.p);
/*
* may have skipped past some deleted extents greater than the insert
......@@ -1563,8 +1512,6 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
bkey_cmp_left_packed(b, _k, &s->committed) > 0)
l->iter = node_iter;
}
return ret;
}
/**
......@@ -1610,16 +1557,13 @@ enum btree_insert_ret
bch2_insert_fixup_extent(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree_iter_level *l = &iter->l[0];
struct btree *b = l->b;
enum btree_insert_ret ret = BTREE_INSERT_OK;
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct extent_insert_state s = {
.trans = trans,
.insert = insert,
.committed = insert->iter->pos,
.committed = iter->pos,
.whiteout = *insert->k,
.update_journal = !bkey_whiteout(&insert->k->k),
......@@ -1644,7 +1588,7 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
bkey_start_offset(&insert->k->k),
insert->k->k.size);
ret = __bch2_insert_fixup_extent(&s);
__bch2_insert_fixup_extent(&s);
extent_insert_committed(&s);
......@@ -1653,16 +1597,14 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed));
EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
!!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
ret = BTREE_INSERT_NEED_TRAVERSE;
WARN_ONCE((ret == BTREE_INSERT_OK) != (insert->k->k.size == 0),
"ret %u insert->k.size %u", ret, insert->k->k.size);
if (insert->k->k.size) {
/* got to the end of this leaf node */
BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
return BTREE_INSERT_NEED_TRAVERSE;
}
return ret;
return BTREE_INSERT_OK;
}
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
......
......@@ -12,7 +12,6 @@ struct btree_node_iter;
struct btree_node_iter_large;
struct btree_insert;
struct btree_insert_entry;
struct extent_insert_hook;
struct bch_devs_mask;
union bch_extent_crc;
......
......@@ -326,7 +326,7 @@ static int bch2_extent_update(struct btree_trans *trans,
BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
}
ret = bch2_trans_commit(trans, disk_res, NULL,
ret = bch2_trans_commit(trans, disk_res,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
......
......@@ -229,7 +229,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
bch2_trans_begin(&trans);
ret = bch2_write_inode_trans(&trans, inode, &inode_u, set, p) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
......@@ -391,7 +391,7 @@ __bch2_create(struct mnt_idmap *idmap,
inode_update_for_create_fn,
&inode_u)
: 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
......@@ -535,7 +535,7 @@ static int __bch2_link(struct bch_fs *c,
bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_link_fn,
NULL) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
......@@ -622,7 +622,7 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_unlink_fn,
NULL) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&dir->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
......@@ -832,7 +832,7 @@ static int bch2_rename2(struct mnt_idmap *idmap,
? bch2_write_inode_trans(&trans, i.dst_inode, &dst_inode_u,
inode_update_for_rename_fn, &i)
: 0 ) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
......@@ -958,7 +958,7 @@ static int bch2_setattr_nonsize(struct mnt_idmap *idmap,
(iattr->ia_valid & ATTR_MODE
? bch2_acl_chmod(&trans, inode, iattr->ia_mode, &acl)
: 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
......
......@@ -73,8 +73,7 @@ static int reattach_inode(struct bch_fs *c,
bch2_inode_pack(&packed, lostfound_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL,
BTREE_INSERT_NOFAIL);
NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret) {
bch_err(c, "error %i reattaching inode %llu while updating lost+found",
ret, inum);
......@@ -202,7 +201,7 @@ static int fsck_hash_delete_at(const struct bch_hash_desc desc,
}
ret = bch2_hash_delete_at(&trans, desc, info, iter) ?:
bch2_trans_commit(&trans, NULL, NULL, NULL,
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL);
err:
......@@ -290,6 +289,13 @@ static int hash_check_key(const struct bch_hash_desc desc,
return ret;
}
static int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size)
{
return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
POS(inode_nr, round_up(new_size, block_bytes(c)) >> 9),
POS(inode_nr + 1, 0), NULL);
}
/*
* Walk extents: verify that extents have a corresponding S_ISREG inode, and
* that i_size an i_sectors are consistent
......@@ -320,7 +326,7 @@ static int check_extents(struct bch_fs *c)
k.k->type, k.k->p.inode, w.inode.bi_mode)) {
bch2_btree_iter_unlock(&iter);
ret = bch2_inode_truncate(c, k.k->p.inode, 0, NULL, NULL);
ret = bch2_inode_truncate(c, k.k->p.inode, 0);
if (ret)
goto err;
continue;
......@@ -342,10 +348,7 @@ static int check_extents(struct bch_fs *c)
bch2_inode_pack(&p, &w.inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES,
&p.inode.k_i,
NULL,
NULL,
NULL,
&p.inode.k_i, NULL, NULL,
BTREE_INSERT_NOFAIL);
if (ret) {
bch_err(c, "error in fs gc: error %i "
......@@ -366,8 +369,7 @@ static int check_extents(struct bch_fs *c)
bch2_btree_iter_unlock(&iter);
ret = bch2_inode_truncate(c, k.k->p.inode,
round_up(w.inode.bi_size, PAGE_SIZE) >> 9,
NULL, NULL);
w.inode.bi_size);
if (ret)
goto err;
continue;
......@@ -508,7 +510,7 @@ static int check_dirents(struct bch_fs *c)
bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(target.bi_mode);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &n->k_i));
kfree(n);
......@@ -602,7 +604,7 @@ static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode)
bch2_inode_pack(&packed, root_inode);
return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
NULL, NULL, BTREE_INSERT_NOFAIL);
}
/* Get lost+found, create if it doesn't exist: */
......@@ -646,7 +648,7 @@ static int check_lostfound(struct bch_fs *c,
bch2_inode_pack(&packed, root_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret)
return ret;
......@@ -1094,9 +1096,7 @@ static int check_inode(struct bch_fs *c,
* just switch units to bytes and that issue goes away
*/
ret = bch2_inode_truncate(c, u.bi_inum,
round_up(u.bi_size, PAGE_SIZE) >> 9,
NULL, NULL);
ret = bch2_inode_truncate(c, u.bi_inum, u.bi_size);
if (ret) {
bch_err(c, "error in fs gc: error %i "
"truncating inode", ret);
......@@ -1142,7 +1142,7 @@ static int check_inode(struct bch_fs *c,
bch2_inode_pack(&p, &u);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
if (ret && ret != -EINTR)
......
......@@ -371,33 +371,14 @@ int bch2_inode_create(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
__bch2_inode_create(&trans, inode_u, min, max, hint));
}
int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size,
struct extent_insert_hook *hook, u64 *journal_seq)
{
return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
POS(inode_nr, new_size),
POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, hook,
journal_seq);
}
int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
{
struct btree_iter iter;
struct bkey_i_inode_generation delete;
struct bpos start = POS(inode_nr, 0);
struct bpos end = POS(inode_nr + 1, 0);
int ret;
ret = bch2_inode_truncate(c, inode_nr, 0, NULL, NULL);
if (ret < 0)
return ret;
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
if (ret < 0)
return ret;
/*
* If this was a directory, there shouldn't be any real dirents left -
* but there could be whiteouts (from hash collisions) that we should
......@@ -406,11 +387,13 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
* XXX: the dirent could ideally would delete whiteouts when they're no
* longer needed
*/
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
if (ret < 0)
ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
start, end, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_XATTRS,
start, end, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
start, end, NULL);
if (ret)
return ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inode_nr, 0),
......@@ -454,7 +437,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
delete.v.bi_generation = cpu_to_le32(bi_generation);
}
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete.k_i));
......
......@@ -46,8 +46,6 @@ int __bch2_inode_create(struct btree_trans *,
int bch2_inode_create(struct bch_fs *, struct bch_inode_unpacked *,
u64, u64, u64 *);
int bch2_inode_truncate(struct bch_fs *, u64, u64,
struct extent_insert_hook *, u64 *);
int bch2_inode_rm(struct bch_fs *, u64);
int bch2_inode_find_by_inum(struct bch_fs *, u64,
......
......@@ -302,7 +302,7 @@ int bch2_write_index_default(struct bch_write_op *op)
BTREE_ITER_INTENT);
ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
NULL, op_journal_seq(op),
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
bch2_btree_iter_unlock(&iter);
......@@ -1403,7 +1403,7 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
if (!bch2_extent_narrow_crcs(e, new_crc))
goto out;
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOWAIT,
......
......@@ -904,7 +904,7 @@ int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
bch2_disk_reservation_init(c, 0);
ret = bch2_btree_insert(c, entry->btree_id, k,
&disk_res, NULL, NULL,
&disk_res, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_REPLAY);
}
......
......@@ -79,7 +79,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
iter.pos = bkey_start_pos(&tmp.key.k);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &tmp.key));
......
......@@ -158,7 +158,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
break;
ret = bch2_btree_insert_at(c, &op->res,
NULL, op_journal_seq(op),
op_journal_seq(op),
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
......
......@@ -541,7 +541,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_USR, 0),
POS(QTYP_USR + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
if (ret)
return ret;
}
......@@ -553,7 +553,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_GRP, 0),
POS(QTYP_GRP + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
if (ret)
return ret;
}
......@@ -565,7 +565,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_PRJ, 0),
POS(QTYP_PRJ + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
if (ret)
return ret;
}
......@@ -764,7 +764,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
bch2_btree_iter_unlock(&iter);
......
......@@ -331,7 +331,7 @@ int bch2_fs_initialize(struct bch_fs *c)
err = "error creating root directory";
ret = bch2_btree_insert(c, BTREE_ID_INODES,
&packed_inode.inode.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
if (ret)
goto err;
......@@ -344,7 +344,7 @@ int bch2_fs_initialize(struct bch_fs *c)
err = "error creating lost+found";
ret = bch2_btree_insert(c, BTREE_ID_INODES,
&packed_inode.inode.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
if (ret)
goto err;
......
......@@ -1246,8 +1246,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
POS(ca->dev_idx + 1, 0),
ZERO_VERSION,
NULL, NULL, NULL);
NULL);
if (ret) {
bch_err(ca, "Remove failed, error deleting alloc info");
goto err;
......
......@@ -15,12 +15,12 @@ static void delete_test_keys(struct bch_fs *c)
ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
POS(0, 0), POS(0, U64_MAX),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(0, 0), POS(0, U64_MAX),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
BUG_ON(ret);
}
......@@ -40,7 +40,7 @@ static void test_delete(struct bch_fs *c, u64 nr)
ret = bch2_btree_iter_traverse(&iter);
BUG_ON(ret);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
......@@ -69,7 +69,7 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
ret = bch2_btree_iter_traverse(&iter);
BUG_ON(ret);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
......@@ -99,7 +99,7 @@ static void test_iterate(struct bch_fs *c, u64 nr)
k.k.p.offset = i;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
......@@ -141,7 +141,7 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr)
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
......@@ -186,7 +186,7 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
k.k.p.offset = i * 2;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
......@@ -236,7 +236,7 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
......@@ -289,7 +289,7 @@ static void insert_test_extent(struct bch_fs *c,
k.k_i.k.version.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
......@@ -352,7 +352,7 @@ static void rand_insert(struct bch_fs *c, u64 nr)
k.k.p.offset = test_rand();
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
}
......@@ -393,7 +393,7 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
k.k.p = iter.pos;
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
}
......@@ -414,7 +414,7 @@ static void rand_delete(struct bch_fs *c, u64 nr)
k.k.p.offset = test_rand();
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
NULL, NULL, NULL, 0);
NULL, NULL, 0);
BUG_ON(ret);
}
}
......@@ -433,7 +433,7 @@ static void seq_insert(struct bch_fs *c, u64 nr)
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
insert.k.p = iter.pos;
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &insert.k_i));
BUG_ON(ret);
......@@ -465,7 +465,7 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
bkey_reassemble(&u.k_i, k);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &u.k_i));
BUG_ON(ret);
}
......@@ -478,7 +478,7 @@ static void seq_delete(struct bch_fs *c, u64 nr)
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(0, 0), POS(0, U64_MAX),
ZERO_VERSION, NULL, NULL, NULL);
NULL);
BUG_ON(ret);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment