Commit 2a9101a9 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Refactor bch2_trans_commit() path

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8f196539
...@@ -301,7 +301,6 @@ do { \ ...@@ -301,7 +301,6 @@ do { \
x(btree_node_sort) \ x(btree_node_sort) \
x(btree_node_read) \ x(btree_node_read) \
x(btree_gc) \ x(btree_gc) \
x(btree_update) \
x(btree_lock_contended_read) \ x(btree_lock_contended_read) \
x(btree_lock_contended_intent) \ x(btree_lock_contended_intent) \
x(btree_lock_contended_write) \ x(btree_lock_contended_write) \
......
...@@ -62,10 +62,10 @@ bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode ...@@ -62,10 +62,10 @@ bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode
static inline unsigned should_compact_bset_lazy(struct btree *b, struct bset_tree *t) static inline unsigned should_compact_bset_lazy(struct btree *b, struct bset_tree *t)
{ {
unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s); unsigned total_u64s = bset_u64s(t);
unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set]; unsigned dead_u64s = total_u64s - b->nr.bset_u64s[t - b->set];
return dead_u64s > 128 && dead_u64s * 3 > bset_u64s; return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
} }
static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b) static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
......
...@@ -48,6 +48,11 @@ static inline int btree_iter_err(const struct btree_iter *iter) ...@@ -48,6 +48,11 @@ static inline int btree_iter_err(const struct btree_iter *iter)
/* Iterate over iters within a transaction: */ /* Iterate over iters within a transaction: */
#define trans_for_each_iter_all(_trans, _iter) \
for (_iter = (_trans)->iters; \
_iter < (_trans)->iters + (_trans)->nr_iters; \
_iter++)
static inline struct btree_iter * static inline struct btree_iter *
__trans_next_iter(struct btree_trans *trans, unsigned idx) __trans_next_iter(struct btree_trans *trans, unsigned idx)
{ {
......
...@@ -255,7 +255,6 @@ struct btree_insert_entry { ...@@ -255,7 +255,6 @@ struct btree_insert_entry {
struct btree_trans { struct btree_trans {
struct bch_fs *c; struct bch_fs *c;
unsigned long ip; unsigned long ip;
u64 commit_start;
u64 iters_linked; u64 iters_linked;
u64 iters_live; u64 iters_live;
...@@ -283,12 +282,11 @@ struct btree_trans { ...@@ -283,12 +282,11 @@ struct btree_trans {
struct disk_reservation *disk_res; struct disk_reservation *disk_res;
unsigned flags; unsigned flags;
unsigned journal_u64s; unsigned journal_u64s;
struct replicas_delta_list *fs_usage_deltas;
struct btree_iter iters_onstack[2]; struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[6]; struct btree_insert_entry updates_onstack[6];
u8 updates_sorted_onstack[6]; u8 updates_sorted_onstack[6];
struct replicas_delta_list *fs_usage_deltas;
}; };
#define BTREE_FLAG(flag) \ #define BTREE_FLAG(flag) \
...@@ -420,6 +418,12 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t) ...@@ -420,6 +418,12 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
__btree_node_offset_to_key(_b, (_t)->end_offset); \ __btree_node_offset_to_key(_b, (_t)->end_offset); \
}) })
static inline unsigned bset_u64s(struct bset_tree *t)
{
return t->end_offset - t->data_offset -
sizeof(struct bset) / sizeof(u64);
}
static inline unsigned bset_byte_offset(struct btree *b, void *i) static inline unsigned bset_byte_offset(struct btree *b, void *i)
{ {
return i - (void *) b->data; return i - (void *) b->data;
......
...@@ -93,9 +93,30 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *, ...@@ -93,9 +93,30 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *, int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *); struct btree *, struct bkey_i_btree_ptr *);
int bch2_trans_commit(struct btree_trans *, int __bch2_trans_commit(struct btree_trans *);
struct disk_reservation *,
u64 *, unsigned); /**
* bch2_trans_commit - insert keys at given iterator positions
*
* This is main entry point for btree updates.
*
* Return values:
* -EINTR: locking changed, this function should be called again. Only returned
* if passed BTREE_INSERT_ATOMIC.
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
static inline int bch2_trans_commit(struct btree_trans *trans,
struct disk_reservation *disk_res,
u64 *journal_seq,
unsigned flags)
{
trans->disk_res = disk_res;
trans->journal_seq = journal_seq;
trans->flags = flags;
return __bch2_trans_commit(trans);
}
static inline void bch2_trans_update(struct btree_trans *trans, static inline void bch2_trans_update(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
......
...@@ -20,16 +20,11 @@ ...@@ -20,16 +20,11 @@
#include <linux/sort.h> #include <linux/sort.h>
static inline bool same_leaf_as_prev(struct btree_trans *trans, static inline bool same_leaf_as_prev(struct btree_trans *trans,
unsigned sorted_idx) unsigned idx)
{ {
struct btree_insert_entry *i = trans->updates + return idx &&
trans->updates_sorted[sorted_idx]; trans->updates[trans->updates_sorted[idx]].iter->l[0].b ==
struct btree_insert_entry *prev = sorted_idx trans->updates[trans->updates_sorted[idx - 1]].iter->l[0].b;
? trans->updates + trans->updates_sorted[sorted_idx - 1]
: NULL;
return prev &&
i->iter->l[0].b == prev->iter->l[0].b;
} }
#define trans_for_each_update_sorted(_trans, _i, _iter) \ #define trans_for_each_update_sorted(_trans, _i, _iter) \
...@@ -92,8 +87,6 @@ static inline void btree_trans_sort_updates(struct btree_trans *trans) ...@@ -92,8 +87,6 @@ static inline void btree_trans_sort_updates(struct btree_trans *trans)
trans->updates_sorted[pos] = l - trans->updates; trans->updates_sorted[pos] = l - trans->updates;
nr++; nr++;
} }
BUG_ON(nr != trans->nr_updates);
} }
/* Inserting into a given leaf node (last stage of insert): */ /* Inserting into a given leaf node (last stage of insert): */
...@@ -266,8 +259,8 @@ static void bch2_insert_fixup_key(struct btree_trans *trans, ...@@ -266,8 +259,8 @@ static void bch2_insert_fixup_key(struct btree_trans *trans,
EBUG_ON(insert->k->k.u64s > EBUG_ON(insert->k->k.u64s >
bch_btree_keys_u64s_remaining(trans->c, l->b)); bch_btree_keys_u64s_remaining(trans->c, l->b));
if (bch2_btree_bset_insert_key(iter, l->b, &l->iter, if (likely(bch2_btree_bset_insert_key(iter, l->b, &l->iter,
insert->k)) insert->k)))
bch2_btree_journal_key(trans, iter, insert->k); bch2_btree_journal_key(trans, iter, insert->k);
} }
...@@ -280,7 +273,8 @@ static void btree_insert_key_leaf(struct btree_trans *trans, ...@@ -280,7 +273,8 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter; struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b; struct btree *b = iter->l[0].b;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); struct bset_tree *t = bset_tree_last(b);
int old_u64s = bset_u64s(t);
int old_live_u64s = b->nr.live_u64s; int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added; int live_u64s_added, u64s_added;
...@@ -290,7 +284,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans, ...@@ -290,7 +284,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
bch2_insert_fixup_extent(trans, insert); bch2_insert_fixup_extent(trans, insert);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; u64s_added = (int) bset_u64s(t) - old_u64s;
if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added); b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
...@@ -323,26 +317,12 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans, ...@@ -323,26 +317,12 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id)); bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id));
} }
static int bch2_trans_journal_preres_get(struct btree_trans *trans) static noinline int
bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
unsigned u64s = 0;
int ret; int ret;
trans_for_each_update(trans, i)
if (0)
u64s += jset_u64s(i->k->k.u64s);
if (!u64s)
return 0;
ret = bch2_journal_preres_get(&c->journal,
&trans->journal_preres, u64s,
JOURNAL_RES_GET_NONBLOCK);
if (ret != -EAGAIN)
return ret;
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
ret = bch2_journal_preres_get(&c->journal, ret = bch2_journal_preres_get(&c->journal,
...@@ -358,8 +338,8 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans) ...@@ -358,8 +338,8 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
return 0; return 0;
} }
static int bch2_trans_journal_res_get(struct btree_trans *trans, static inline int bch2_trans_journal_res_get(struct btree_trans *trans,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
int ret; int ret;
...@@ -438,63 +418,43 @@ static inline bool update_has_nontrans_triggers(struct btree_insert_entry *i) ...@@ -438,63 +418,43 @@ static inline bool update_has_nontrans_triggers(struct btree_insert_entry *i)
(1U << i->iter->btree_id); (1U << i->iter->btree_id);
} }
/* static noinline void bch2_btree_iter_unlock_noinline(struct btree_iter *iter)
* Get journal reservation, take write locks, and attempt to do btree update(s): {
*/ __bch2_btree_iter_unlock(iter);
static inline int do_btree_insert_at(struct btree_trans *trans, }
struct btree_insert_entry **stopped_at)
static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i; struct btree_insert_entry *i;
struct btree_iter *iter;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0; : 0;
int ret;
trans_for_each_update(trans, i) if (unlikely(trans->flags & BTREE_INSERT_NOMARK))
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK); return;
/*
* note: running triggers will append more updates to the list of
* updates as we're walking it:
*/
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) && if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
update_has_trans_triggers(i)) { bch2_mark_update(trans, i, NULL,
ret = bch2_trans_mark_update(trans, i->iter, i->k); mark_flags|BCH_BUCKET_MARK_GC);
if (ret == -EINTR) }
trace_trans_restart_mark(trans->ip);
if (ret)
goto out_clear_replicas;
}
trans_for_each_iter(trans, iter) {
if (iter->nodes_locked != iter->nodes_intent_locked) {
BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
BUG_ON(trans->iters_live & (1ULL << iter->idx));
__bch2_btree_iter_unlock(iter);
}
}
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
trans_for_each_update(trans, i)
btree_insert_entry_checks(trans, i);
bch2_btree_trans_verify_locks(trans);
/*
* No more updates can be added - sort updates so we can take write
* locks in the correct order:
*/
btree_trans_sort_updates(trans);
btree_trans_lock_write(trans, true); static inline int
bch2_trans_commit_write_locked(struct btree_trans *trans,
struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
int ret;
if (race_fault()) { if (race_fault()) {
ret = -EINTR;
trace_trans_restart_fault_inject(trans->ip); trace_trans_restart_fault_inject(trans->ip);
goto out; return -EINTR;
} }
/* /*
...@@ -504,7 +464,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -504,7 +464,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
*/ */
ret = btree_trans_check_can_insert(trans, stopped_at); ret = btree_trans_check_can_insert(trans, stopped_at);
if (ret) if (ret)
goto out; return ret;
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
if (!btree_node_type_needs_gc(i->iter->btree_id)) if (!btree_node_type_needs_gc(i->iter->btree_id))
...@@ -515,10 +475,11 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -515,10 +475,11 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
} }
/* Must be called under mark_lock: */
if (!bch2_bkey_replicas_marked_locked(c, if (!bch2_bkey_replicas_marked_locked(c,
bkey_i_to_s_c(i->k), true)) { bkey_i_to_s_c(i->k), true)) {
ret = BTREE_INSERT_NEED_MARK_REPLICAS; ret = BTREE_INSERT_NEED_MARK_REPLICAS;
goto out; goto err;
} }
} }
...@@ -527,16 +488,17 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -527,16 +488,17 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
* succeed: * succeed:
*/ */
if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) { if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
trans->journal_u64s = 0; ret = bch2_trans_journal_res_get(trans,
JOURNAL_RES_GET_NONBLOCK);
trans_for_each_update(trans, i)
trans->journal_u64s += jset_u64s(i->k->k.u64s);
ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_NONBLOCK);
if (ret) if (ret)
goto out; goto err;
} }
/*
* Not allowed to fail after we've gotten our journal reservation - we
* have to use it:
*/
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) { if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
if (journal_seq_verify(c)) if (journal_seq_verify(c))
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
...@@ -558,39 +520,122 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -558,39 +520,122 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
if (fs_usage) if (fs_usage)
bch2_trans_fs_usage_apply(trans, fs_usage); bch2_trans_fs_usage_apply(trans, fs_usage);
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) && if (unlikely(c->gc_pos.phase))
unlikely(c->gc_pos.phase)) bch2_trans_mark_gc(trans);
trans_for_each_update(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i, NULL,
mark_flags|
BCH_BUCKET_MARK_GC);
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
do_btree_insert_one(trans, i); do_btree_insert_one(trans, i);
out: err:
BUG_ON(ret &&
(trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
trans->journal_res.ref);
btree_trans_lock_write(trans, false);
if (fs_usage) { if (fs_usage) {
bch2_fs_usage_scratch_put(c, fs_usage); bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
} }
bch2_journal_res_put(&c->journal, &trans->journal_res); return ret;
out_clear_replicas: }
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0; /*
memset((void *) trans->fs_usage_deltas + * Get journal reservation, take write locks, and attempt to do btree update(s):
offsetof(struct replicas_delta_list, memset_start), 0, */
(void *) &trans->fs_usage_deltas->memset_end - static inline int do_bch2_trans_commit(struct btree_trans *trans,
(void *) &trans->fs_usage_deltas->memset_start); struct btree_insert_entry **stopped_at)
{
struct btree_insert_entry *i;
struct btree_iter *iter;
unsigned idx, u64s, journal_preres_u64s = 0;
int ret;
/*
* note: running triggers will append more updates to the list of
* updates as we're walking it:
*/
trans_for_each_update(trans, i) {
/* we know trans->nounlock won't be set here: */
if (unlikely(!(i->iter->locks_want < 1
? __bch2_btree_iter_upgrade(i->iter, 1)
: i->iter->uptodate <= BTREE_ITER_NEED_PEEK))) {
trace_trans_restart_upgrade(trans->ip);
return -EINTR;
}
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
update_has_trans_triggers(i)) {
ret = bch2_trans_mark_update(trans, i->iter, i->k);
if (unlikely(ret)) {
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip);
return ret;
}
}
u64s = jset_u64s(i->k->k.u64s);
if (0)
journal_preres_u64s += u64s;
trans->journal_u64s += u64s;
} }
return ret; ret = bch2_journal_preres_get(&trans->c->journal,
&trans->journal_preres, journal_preres_u64s,
JOURNAL_RES_GET_NONBLOCK);
if (unlikely(ret == -EAGAIN))
ret = bch2_trans_journal_preres_get_cold(trans,
journal_preres_u64s);
if (unlikely(ret))
return ret;
/*
* Can't be holding any read locks when we go to take write locks:
*
* note - this must be done after bch2_trans_journal_preres_get_cold()
* or anything else that might call bch2_trans_relock(), since that
* would just retake the read locks:
*/
trans_for_each_iter_all(trans, iter) {
if (iter->nodes_locked != iter->nodes_intent_locked) {
EBUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
EBUG_ON(trans->iters_live & (1ULL << iter->idx));
bch2_btree_iter_unlock_noinline(iter);
}
}
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
trans_for_each_update(trans, i)
btree_insert_entry_checks(trans, i);
bch2_btree_trans_verify_locks(trans);
/*
* No more updates can be added - sort updates so we can take write
* locks in the correct order:
*/
btree_trans_sort_updates(trans);
btree_trans_lock_write(trans, true);
ret = bch2_trans_commit_write_locked(trans, stopped_at);
btree_trans_lock_write(trans, false);
/*
* Drop journal reservation after dropping write locks, since dropping
* the journal reservation may kick off a journal write:
*/
bch2_journal_res_put(&trans->c->journal, &trans->journal_res);
if (unlikely(ret))
return ret;
if (trans->flags & BTREE_INSERT_NOUNLOCK)
trans->nounlock = true;
trans_for_each_update_sorted(trans, i, idx)
if (!same_leaf_as_prev(trans, idx))
bch2_foreground_maybe_merge(trans->c, i->iter,
0, trans->flags);
trans->nounlock = false;
trans_for_each_update(trans, i)
bch2_btree_iter_downgrade(i->iter);
return 0;
} }
static noinline static noinline
...@@ -698,66 +743,27 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -698,66 +743,27 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return ret; return ret;
} }
/** static noinline int
* __bch_btree_insert_at - insert keys at given iterator positions bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
*
* This is main entry point for btree updates.
*
* Return values:
* -EINTR: locking changed, this function should be called again. Only returned
* if passed BTREE_INSERT_ATOMIC.
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
static int __bch2_trans_commit(struct btree_trans *trans,
struct btree_insert_entry **stopped_at)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
unsigned iter;
int ret; int ret;
trans_for_each_update(trans, i) { if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
if (!bch2_btree_iter_upgrade(i->iter, 1)) { return -EROFS;
trace_trans_restart_upgrade(trans->ip);
ret = -EINTR;
goto err;
}
ret = btree_iter_err(i->iter);
if (ret)
goto err;
}
ret = do_btree_insert_at(trans, stopped_at); bch2_trans_unlock(trans);
if (unlikely(ret))
goto err;
if (trans->flags & BTREE_INSERT_NOUNLOCK)
trans->nounlock = true;
trans_for_each_update_sorted(trans, i, iter)
if (!same_leaf_as_prev(trans, iter))
bch2_foreground_maybe_merge(c, i->iter,
0, trans->flags);
trans->nounlock = false;
trans_for_each_update(trans, i) ret = bch2_fs_read_write_early(c);
bch2_btree_iter_downgrade(i->iter); if (ret)
err: return ret;
/* make sure we didn't drop or screw up locks: */
bch2_btree_trans_verify_locks(trans);
return ret; percpu_ref_get(&c->writes);
return 0;
} }
int bch2_trans_commit(struct btree_trans *trans, int __bch2_trans_commit(struct btree_trans *trans)
struct disk_reservation *disk_res,
u64 *journal_seq,
unsigned flags)
{ {
struct bch_fs *c = trans->c;
struct btree_insert_entry *i = NULL; struct btree_insert_entry *i = NULL;
struct btree_iter *iter; struct btree_iter *iter;
unsigned orig_nr_updates = trans->nr_updates; unsigned orig_nr_updates = trans->nr_updates;
...@@ -768,61 +774,47 @@ int bch2_trans_commit(struct btree_trans *trans, ...@@ -768,61 +774,47 @@ int bch2_trans_commit(struct btree_trans *trans,
goto out_noupdates; goto out_noupdates;
/* for the sake of sanity: */ /* for the sake of sanity: */
BUG_ON(trans->nr_updates > 1 && !(flags & BTREE_INSERT_ATOMIC)); EBUG_ON(trans->nr_updates > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
if (flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock);
if (!trans->commit_start) if (trans->flags & BTREE_INSERT_GC_LOCK_HELD)
trans->commit_start = local_clock(); lockdep_assert_held(&trans->c->gc_lock);
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
memset(&trans->journal_preres, 0, sizeof(trans->journal_preres)); memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
trans->disk_res = disk_res;
trans->journal_seq = journal_seq;
trans->flags = flags;
if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW) && if (!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
!percpu_ref_tryget(&c->writes))) { unlikely(!percpu_ref_tryget(&trans->c->writes))) {
if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW))) ret = bch2_trans_commit_get_rw_cold(trans);
return -EROFS;
bch2_trans_unlock(trans);
ret = bch2_fs_read_write_early(c);
if (ret) if (ret)
return ret; return ret;
}
retry:
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
trans->journal_u64s = 0;
percpu_ref_get(&c->writes); ret = do_bch2_trans_commit(trans, &i);
if (!bch2_trans_relock(trans)) { if (trans->fs_usage_deltas) {
ret = -EINTR; trans->fs_usage_deltas->used = 0;
goto err; memset((void *) trans->fs_usage_deltas +
} offsetof(struct replicas_delta_list, memset_start), 0,
(void *) &trans->fs_usage_deltas->memset_end -
(void *) &trans->fs_usage_deltas->memset_start);
} }
retry:
ret = bch2_trans_journal_preres_get(trans);
if (ret)
goto err;
ret = __bch2_trans_commit(trans, &i); /* make sure we didn't drop or screw up locks: */
bch2_btree_trans_verify_locks(trans);
if (ret) if (ret)
goto err; goto err;
out: out:
bch2_journal_preres_put(&c->journal, &trans->journal_preres); bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW))) if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
percpu_ref_put(&c->writes); percpu_ref_put(&trans->c->writes);
out_noupdates: out_noupdates:
if (!ret && trans->commit_start) { EBUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
bch2_time_stats_update(&c->times[BCH_TIME_btree_update],
trans->commit_start);
trans->commit_start = 0;
}
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
trans_for_each_iter(trans, iter) trans_for_each_iter_all(trans, iter)
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT; iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
if (!ret) { if (!ret) {
...@@ -836,18 +828,16 @@ int bch2_trans_commit(struct btree_trans *trans, ...@@ -836,18 +828,16 @@ int bch2_trans_commit(struct btree_trans *trans,
err: err:
ret = bch2_trans_commit_error(trans, i, ret); ret = bch2_trans_commit_error(trans, i, ret);
/* free updates and memory used by triggers, they'll be reexecuted: */
trans->nr_updates = orig_nr_updates;
trans->mem_top = orig_mem_top;
/* can't loop if it was passed in and we changed it: */ /* can't loop if it was passed in and we changed it: */
if (unlikely(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS) && !ret) if (unlikely(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS) && !ret)
ret = -EINTR; ret = -EINTR;
if (ret)
goto out;
if (!ret) /* free updates and memory used by triggers, they'll be reexecuted: */
goto retry; trans->nr_updates = orig_nr_updates;
trans->mem_top = orig_mem_top;
goto out; goto retry;
} }
/** /**
......
...@@ -2720,20 +2720,26 @@ long bch2_fallocate_dispatch(struct file *file, int mode, ...@@ -2720,20 +2720,26 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
loff_t offset, loff_t len) loff_t offset, loff_t len)
{ {
struct bch_inode_info *inode = file_bch_inode(file); struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
long ret;
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE))) if (!percpu_ref_tryget(&c->writes))
return bchfs_fallocate(inode, mode, offset, len); return -EROFS;
if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
return bchfs_fpunch(inode, offset, len);
if (mode == FALLOC_FL_INSERT_RANGE)
return bchfs_fcollapse_finsert(inode, offset, len, true);
if (mode == FALLOC_FL_COLLAPSE_RANGE) if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
return bchfs_fcollapse_finsert(inode, offset, len, false); ret = bchfs_fallocate(inode, mode, offset, len);
else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
ret = bchfs_fpunch(inode, offset, len);
else if (mode == FALLOC_FL_INSERT_RANGE)
ret = bchfs_fcollapse_finsert(inode, offset, len, true);
else if (mode == FALLOC_FL_COLLAPSE_RANGE)
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
percpu_ref_put(&c->writes);
return -EOPNOTSUPP; return ret;
} }
static void mark_range_unallocated(struct bch_inode_info *inode, static void mark_range_unallocated(struct bch_inode_info *inode,
......
...@@ -310,6 +310,7 @@ int bch2_extent_update(struct btree_trans *trans, ...@@ -310,6 +310,7 @@ int bch2_extent_update(struct btree_trans *trans,
bch2_trans_update(trans, iter, k); bch2_trans_update(trans, iter, k);
ret = bch2_trans_commit(trans, disk_res, journal_seq, ret = bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
BTREE_INSERT_USE_RESERVE); BTREE_INSERT_USE_RESERVE);
......
...@@ -271,7 +271,7 @@ static inline void bch2_journal_res_put(struct journal *j, ...@@ -271,7 +271,7 @@ static inline void bch2_journal_res_put(struct journal *j,
if (!res->ref) if (!res->ref)
return; return;
lock_release(&j->res_map, _RET_IP_); lock_release(&j->res_map, _THIS_IP_);
while (res->u64s) while (res->u64s)
bch2_journal_add_entry(j, res, bch2_journal_add_entry(j, res,
......
...@@ -166,6 +166,9 @@ s64 bch2_remap_range(struct bch_fs *c, ...@@ -166,6 +166,9 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 src_done, dst_done; u64 src_done, dst_done;
int ret = 0, ret2 = 0; int ret = 0, ret2 = 0;
if (!percpu_ref_tryget(&c->writes))
return -EROFS;
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) { if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) { if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
...@@ -295,5 +298,7 @@ s64 bch2_remap_range(struct bch_fs *c, ...@@ -295,5 +298,7 @@ s64 bch2_remap_range(struct bch_fs *c,
ret = bch2_trans_exit(&trans) ?: ret; ret = bch2_trans_exit(&trans) ?: ret;
percpu_ref_put(&c->writes);
return dst_done ?: ret ?: ret2; return dst_done ?: ret ?: ret2;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment