Commit 08af47df authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: convert bchfs_write_index_update() to bch2_extent_update()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e2d9912c
...@@ -1040,7 +1040,6 @@ static inline void bch2_btree_iter_checks(struct btree_iter *iter, ...@@ -1040,7 +1040,6 @@ static inline void bch2_btree_iter_checks(struct btree_iter *iter,
enum btree_iter_type type) enum btree_iter_type type)
{ {
EBUG_ON(iter->btree_id >= BTREE_ID_NR); EBUG_ON(iter->btree_id >= BTREE_ID_NR);
EBUG_ON((iter->flags & BTREE_ITER_TYPE) != type);
EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) != EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
(iter->btree_id == BTREE_ID_EXTENTS && (iter->btree_id == BTREE_ID_EXTENTS &&
type != BTREE_ITER_NODES)); type != BTREE_ITER_NODES));
...@@ -1624,17 +1623,29 @@ static void btree_trans_verify(struct btree_trans *trans) ...@@ -1624,17 +1623,29 @@ static void btree_trans_verify(struct btree_trans *trans)
} }
} }
static inline unsigned btree_trans_iter_idx(struct btree_trans *trans,
struct btree_iter *iter)
{
ssize_t idx = iter - trans->iters;
BUG_ON(idx < 0 || idx >= trans->nr_iters);
BUG_ON(!(trans->iters_live & (1U << idx)));
return idx;
}
void bch2_trans_iter_put(struct btree_trans *trans,
struct btree_iter *iter)
{
ssize_t idx = btree_trans_iter_idx(trans, iter);
trans->iters_live &= ~(1U << idx);
}
void bch2_trans_iter_free(struct btree_trans *trans, void bch2_trans_iter_free(struct btree_trans *trans,
struct btree_iter *iter) struct btree_iter *iter)
{ {
unsigned idx; ssize_t idx = btree_trans_iter_idx(trans, iter);
for (idx = 0; idx < trans->nr_iters; idx++)
if (&trans->iters[idx] == iter)
goto found;
BUG();
found:
BUG_ON(!(trans->iters_linked & (1U << idx)));
trans->iters_live &= ~(1U << idx); trans->iters_live &= ~(1U << idx);
trans->iters_linked &= ~(1U << idx); trans->iters_linked &= ~(1U << idx);
...@@ -1719,10 +1730,6 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, ...@@ -1719,10 +1730,6 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
} else { } else {
iter = &trans->iters[idx]; iter = &trans->iters[idx];
BUG_ON(iter->btree_id != btree_id);
BUG_ON((iter->flags ^ flags) &
(BTREE_ITER_SLOTS|BTREE_ITER_IS_EXTENTS));
iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH); iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH); iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
} }
...@@ -1739,6 +1746,9 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, ...@@ -1739,6 +1746,9 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
btree_trans_verify(trans); btree_trans_verify(trans);
BUG_ON(iter->btree_id != btree_id);
BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
return iter; return iter;
} }
......
...@@ -271,8 +271,8 @@ static inline int btree_iter_err(struct bkey_s_c k) ...@@ -271,8 +271,8 @@ static inline int btree_iter_err(struct bkey_s_c k)
/* new multiple iterator interface: */ /* new multiple iterator interface: */
void bch2_trans_preload_iters(struct btree_trans *); void bch2_trans_preload_iters(struct btree_trans *);
void bch2_trans_iter_free(struct btree_trans *, void bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
struct btree_iter *); void bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id, struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
struct bpos, unsigned, u64); struct bpos, unsigned, u64);
...@@ -307,6 +307,11 @@ bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src) ...@@ -307,6 +307,11 @@ bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
void __bch2_trans_begin(struct btree_trans *); void __bch2_trans_begin(struct btree_trans *);
static inline void bch2_trans_begin_updates(struct btree_trans *trans)
{
trans->nr_updates = 0;
}
void *bch2_trans_kmalloc(struct btree_trans *, size_t); void *bch2_trans_kmalloc(struct btree_trans *, size_t);
int bch2_trans_unlock(struct btree_trans *); int bch2_trans_unlock(struct btree_trans *);
void bch2_trans_init(struct btree_trans *, struct bch_fs *); void bch2_trans_init(struct btree_trans *, struct bch_fs *);
......
...@@ -330,212 +330,189 @@ i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags) ...@@ -330,212 +330,189 @@ i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags)
/* normal i_size/i_sectors update machinery: */ /* normal i_size/i_sectors update machinery: */
struct bchfs_extent_trans_hook { static s64 sum_sector_overwrites(struct bkey_i *new, struct btree_iter *_iter,
struct bchfs_write_op *op; bool *allocating)
struct extent_insert_hook hook; {
struct btree_iter iter;
struct bkey_s_c old;
s64 delta = 0;
struct bch_inode_unpacked inode_u; bch2_btree_iter_init(&iter, _iter->c, BTREE_ID_EXTENTS, POS_MIN,
struct bkey_inode_buf inode_p; BTREE_ITER_SLOTS);
bool need_inode_update; bch2_btree_iter_link(_iter, &iter);
}; bch2_btree_iter_copy(&iter, _iter);
static enum btree_insert_ret for_each_btree_key_continue(&iter, BTREE_ITER_SLOTS, old) {
bchfs_extent_update_hook(struct extent_insert_hook *hook, if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
struct bpos committed_pos, break;
struct bpos next_pos,
struct bkey_s_c k,
const struct bkey_i *insert)
{
struct bchfs_extent_trans_hook *h = container_of(hook,
struct bchfs_extent_trans_hook, hook);
struct bch_inode_info *inode = h->op->inode;
int sign = bkey_extent_is_allocation(&insert->k) -
(k.k && bkey_extent_is_allocation(k.k));
s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
bool do_pack = false;
if (h->op->unalloc && if (allocating &&
!bch2_extent_is_fully_allocated(k)) !bch2_extent_is_fully_allocated(old))
return BTREE_INSERT_ENOSPC; *allocating = true;
BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE)); delta += (min(new->k.p.offset,
old.k->p.offset) -
max(bkey_start_offset(&new->k),
bkey_start_offset(old.k))) *
(bkey_extent_is_allocation(&new->k) -
bkey_extent_is_allocation(old.k));
}
/* XXX: inode->i_size locking */ bch2_btree_iter_unlink(&iter);
if (offset > inode->ei_inode.bi_size) {
if (!h->need_inode_update) {
h->need_inode_update = true;
return BTREE_INSERT_NEED_TRAVERSE;
}
/* truncate in progress? */ return delta;
if (h->inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) }
goto no_i_size_update;
h->inode_u.bi_size = offset; static int bch2_extent_update(struct btree_trans *trans,
do_pack = true; struct bch_inode_info *inode,
struct disk_reservation *disk_res,
struct quota_res *quota_res,
struct btree_iter *extent_iter,
struct bkey_i *k,
u64 new_i_size,
bool may_allocate,
bool direct,
s64 *total_delta)
{
struct btree_iter *inode_iter = NULL;
struct bch_inode_unpacked inode_u;
struct bkey_inode_buf inode_p;
bool allocating = false;
bool extended = false;
s64 i_sectors_delta;
int ret;
bch2_trans_begin_updates(trans);
spin_lock(&inode->v.i_lock); ret = bch2_btree_iter_traverse(extent_iter);
if (offset > inode->v.i_size) { if (ret)
if (h->op->is_dio) return ret;
i_size_write(&inode->v, offset);
else bch2_extent_trim_atomic(k, extent_iter);
BUG();
i_sectors_delta = sum_sector_overwrites(k, extent_iter, &allocating);
if (!may_allocate && allocating)
return -ENOSPC;
bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
new_i_size = min(k->k.p.offset << 9, new_i_size);
/* XXX: inode->i_size locking */
if (i_sectors_delta ||
new_i_size > inode->ei_inode.bi_size) {
inode_iter = bch2_trans_get_iter(trans,
BTREE_ID_INODES,
POS(k->k.p.inode, 0),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
if (IS_ERR(inode_iter))
return PTR_ERR(inode_iter);
ret = bch2_btree_iter_traverse(inode_iter);
if (ret)
goto err;
inode_u = inode->ei_inode;
inode_u.bi_sectors += i_sectors_delta;
/* XXX: this is slightly suspect */
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
new_i_size > inode_u.bi_size) {
inode_u.bi_size = new_i_size;
extended = true;
} }
spin_unlock(&inode->v.i_lock);
bch2_inode_pack(&inode_p, &inode_u);
bch2_trans_update(trans,
BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
} }
no_i_size_update:
if (sectors) {
if (!h->need_inode_update) {
h->need_inode_update = true;
return BTREE_INSERT_NEED_TRAVERSE;
}
h->inode_u.bi_sectors += sectors; ret = bch2_trans_commit(trans, disk_res, NULL,
do_pack = true; &inode->ei_journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_USE_RESERVE);
if (ret)
goto err;
inode->ei_inode.bi_sectors += i_sectors_delta;
EBUG_ON(i_sectors_delta &&
inode->ei_inode.bi_sectors != inode_u.bi_sectors);
if (extended) {
inode->ei_inode.bi_size = new_i_size;
h->op->sectors_added += sectors; if (direct) {
spin_lock(&inode->v.i_lock);
if (new_i_size > inode->v.i_size)
i_size_write(&inode->v, new_i_size);
spin_unlock(&inode->v.i_lock);
}
} }
if (do_pack) if (direct)
bch2_inode_pack(&h->inode_p, &h->inode_u); i_sectors_acct(trans->c, inode, quota_res, i_sectors_delta);
return BTREE_INSERT_OK; if (total_delta)
*total_delta += i_sectors_delta;
err:
if (!IS_ERR_OR_NULL(inode_iter))
bch2_trans_iter_put(trans, inode_iter);
return ret;
} }
static int bchfs_write_index_update(struct bch_write_op *wop) static int bchfs_write_index_update(struct bch_write_op *wop)
{ {
struct bchfs_write_op *op = container_of(wop, struct bchfs_write_op *op = container_of(wop,
struct bchfs_write_op, op); struct bchfs_write_op, op);
struct quota_res *quota_res = op->is_dio
? &container_of(op, struct dio_write, iop)->quota_res
: NULL;
struct bch_inode_info *inode = op->inode;
struct keylist *keys = &op->op.insert_keys; struct keylist *keys = &op->op.insert_keys;
struct btree_trans trans;
struct btree_iter *extent_iter, *inode_iter = NULL;
struct bchfs_extent_trans_hook hook;
struct bkey_i *k = bch2_keylist_front(keys); struct bkey_i *k = bch2_keylist_front(keys);
s64 orig_sectors_added = op->sectors_added; struct btree_trans trans;
struct btree_iter *iter;
int ret; int ret;
BUG_ON(k->k.p.inode != op->inode->v.i_ino); BUG_ON(k->k.p.inode != inode->v.i_ino);
bch2_trans_init(&trans, wop->c); bch2_trans_init(&trans, wop->c);
bch2_trans_preload_iters(&trans);
extent_iter = bch2_trans_get_iter(&trans, iter = bch2_trans_get_iter(&trans,
BTREE_ID_EXTENTS, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k), bkey_start_pos(&k->k),
BTREE_ITER_INTENT); BTREE_ITER_INTENT);
BUG_ON(IS_ERR(extent_iter));
hook.op = op;
hook.hook.fn = bchfs_extent_update_hook;
hook.need_inode_update = false;
do { do {
BKEY_PADDED(k) tmp; BKEY_PADDED(k) tmp;
ret = bch2_btree_iter_traverse(extent_iter);
if (ret)
goto err;
bkey_copy(&tmp.k, bch2_keylist_front(keys)); bkey_copy(&tmp.k, bch2_keylist_front(keys));
k = &tmp.k;
bch2_extent_trim_atomic(k, extent_iter);
/* XXX: inode->i_size locking */
if (min(k->k.p.offset << 9, op->new_i_size) >
op->inode->ei_inode.bi_size)
hook.need_inode_update = true;
if (hook.need_inode_update) { ret = bch2_extent_update(&trans, inode,
struct bkey_s_c inode; &wop->res, quota_res,
iter, &tmp.k,
if (!inode_iter) { op->new_i_size,
inode_iter = bch2_trans_get_iter(&trans, !op->unalloc,
BTREE_ID_INODES, op->is_dio,
POS(extent_iter->pos.inode, 0), &op->sectors_added);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
BUG_ON(IS_ERR(inode_iter));
}
inode = bch2_btree_iter_peek_slot(inode_iter);
if ((ret = btree_iter_err(inode)))
goto err;
if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
"inode %llu not found when updating",
extent_iter->pos.inode)) {
ret = -ENOENT;
break;
}
if (WARN_ONCE(bkey_bytes(inode.k) >
sizeof(hook.inode_p),
"inode %llu too big (%zu bytes, buf %zu)",
extent_iter->pos.inode,
bkey_bytes(inode.k),
sizeof(hook.inode_p))) {
ret = -ENOENT;
break;
}
bkey_reassemble(&hook.inode_p.inode.k_i, inode);
ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
&hook.inode_u);
if (WARN_ONCE(ret,
"error %i unpacking inode %llu",
ret, extent_iter->pos.inode)) {
ret = -ENOENT;
break;
}
ret = bch2_btree_insert_at(wop->c, &wop->res,
&hook.hook, op_journal_seq(wop),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
BTREE_INSERT_USE_RESERVE,
BTREE_INSERT_ENTRY(extent_iter, k),
BTREE_INSERT_ENTRY_EXTRA_RES(inode_iter,
&hook.inode_p.inode.k_i, 2));
} else {
ret = bch2_btree_insert_at(wop->c, &wop->res,
&hook.hook, op_journal_seq(wop),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_USE_RESERVE,
BTREE_INSERT_ENTRY(extent_iter, k));
}
BUG_ON(bkey_cmp(extent_iter->pos, bkey_start_pos(&k->k)));
if (WARN_ONCE(!ret != !k->k.size,
"ret %i k->size %u", ret, k->k.size))
ret = k->k.size ? -EINTR : 0;
err:
if (ret == -EINTR) if (ret == -EINTR)
continue; continue;
if (ret) if (ret)
break; break;
if (hook.need_inode_update) if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
op->inode->ei_inode = hook.inode_u; bch2_cut_front(iter->pos, bch2_keylist_front(keys));
if (bkey_cmp(extent_iter->pos, bch2_keylist_front(keys)->k.p) < 0)
bch2_cut_front(extent_iter->pos, bch2_keylist_front(keys));
else else
bch2_keylist_pop_front(keys); bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys)); } while (!bch2_keylist_empty(keys));
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
if (op->is_dio) {
struct dio_write *dio = container_of(op, struct dio_write, iop);
i_sectors_acct(wop->c, op->inode, &dio->quota_res,
op->sectors_added - orig_sectors_added);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment