Commit 88767d65 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Update path now handles triggers that generate more triggers

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 44da9767
...@@ -265,6 +265,7 @@ struct btree_insert_entry { ...@@ -265,6 +265,7 @@ struct btree_insert_entry {
bool deferred; bool deferred;
bool triggered; bool triggered;
bool marked;
}; };
#define BTREE_ITER_MAX 64 #define BTREE_ITER_MAX 64
......
...@@ -542,6 +542,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -542,6 +542,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL; struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i; struct btree_insert_entry *i;
bool saw_non_marked;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0; : 0;
...@@ -551,14 +552,28 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -551,14 +552,28 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK); BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
trans_for_each_update_iter(trans, i) trans_for_each_update_iter(trans, i)
if (update_has_triggers(trans, i) && i->marked = false;
update_triggers_transactional(trans, i)) {
ret = bch2_trans_mark_update(trans, i); do {
if (ret == -EINTR) saw_non_marked = false;
trace_trans_restart_mark(trans->ip);
if (ret) trans_for_each_update_iter(trans, i) {
goto out_clear_replicas; if (i->marked)
continue;
saw_non_marked = true;
i->marked = true;
if (update_has_triggers(trans, i) &&
update_triggers_transactional(trans, i)) {
ret = bch2_trans_mark_update(trans, i->iter, i->k);
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip);
if (ret)
goto out_clear_replicas;
}
} }
} while (saw_non_marked);
btree_trans_lock_write(c, trans); btree_trans_lock_write(c, trans);
......
...@@ -1590,9 +1590,9 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k, ...@@ -1590,9 +1590,9 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
} }
int bch2_trans_mark_update(struct btree_trans *trans, int bch2_trans_mark_update(struct btree_trans *trans,
struct btree_insert_entry *insert) struct btree_iter *iter,
struct bkey_i *insert)
{ {
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b; struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter; struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k; struct bkey_packed *_k;
...@@ -1602,9 +1602,9 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1602,9 +1602,9 @@ int bch2_trans_mark_update(struct btree_trans *trans,
return 0; return 0;
ret = bch2_trans_mark_key(trans, ret = bch2_trans_mark_key(trans,
bkey_i_to_s_c(insert->k), bkey_i_to_s_c(insert),
bpos_min(insert->k->k.p, b->key.k.p).offset - bpos_min(insert->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k->k), bkey_start_offset(&insert->k),
BCH_BUCKET_MARK_INSERT); BCH_BUCKET_MARK_INSERT);
if (ret) if (ret)
return ret; return ret;
...@@ -1618,25 +1618,25 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1618,25 +1618,25 @@ int bch2_trans_mark_update(struct btree_trans *trans,
k = bkey_disassemble(b, _k, &unpacked); k = bkey_disassemble(b, _k, &unpacked);
if (btree_node_is_extents(b) if (btree_node_is_extents(b)
? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0 ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
: bkey_cmp(insert->k->k.p, k.k->p)) : bkey_cmp(insert->k.p, k.k->p))
break; break;
if (btree_node_is_extents(b)) { if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&insert->k->k, k.k)) { switch (bch2_extent_overlap(&insert->k, k.k)) {
case BCH_EXTENT_OVERLAP_ALL: case BCH_EXTENT_OVERLAP_ALL:
sectors = -((s64) k.k->size); sectors = -((s64) k.k->size);
break; break;
case BCH_EXTENT_OVERLAP_BACK: case BCH_EXTENT_OVERLAP_BACK:
sectors = bkey_start_offset(&insert->k->k) - sectors = bkey_start_offset(&insert->k) -
k.k->p.offset; k.k->p.offset;
break; break;
case BCH_EXTENT_OVERLAP_FRONT: case BCH_EXTENT_OVERLAP_FRONT:
sectors = bkey_start_offset(k.k) - sectors = bkey_start_offset(k.k) -
insert->k->k.p.offset; insert->k.p.offset;
break; break;
case BCH_EXTENT_OVERLAP_MIDDLE: case BCH_EXTENT_OVERLAP_MIDDLE:
sectors = k.k->p.offset - insert->k->k.p.offset; sectors = k.k->p.offset - insert->k.p.offset;
BUG_ON(sectors <= 0); BUG_ON(sectors <= 0);
ret = bch2_trans_mark_key(trans, k, sectors, ret = bch2_trans_mark_key(trans, k, sectors,
...@@ -1644,7 +1644,7 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1644,7 +1644,7 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
sectors = bkey_start_offset(&insert->k->k) - sectors = bkey_start_offset(&insert->k) -
k.k->p.offset; k.k->p.offset;
break; break;
} }
......
...@@ -274,7 +274,8 @@ void bch2_replicas_delta_list_apply(struct bch_fs *, ...@@ -274,7 +274,8 @@ void bch2_replicas_delta_list_apply(struct bch_fs *,
struct replicas_delta_list *); struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned); int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *, int bch2_trans_mark_update(struct btree_trans *,
struct btree_insert_entry *); struct btree_iter *iter,
struct bkey_i *insert);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *); void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
/* disk reservations: */ /* disk reservations: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment