Commit f82755e4 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Data move path now uses bch2_trans_unlock_long()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c4accde4
...@@ -2833,6 +2833,13 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) ...@@ -2833,6 +2833,13 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
return p; return p;
} }
static inline void check_srcu_held_too_long(struct btree_trans *trans)
{
WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
"btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
(jiffies - trans->srcu_lock_time) / HZ);
}
void bch2_trans_srcu_unlock(struct btree_trans *trans) void bch2_trans_srcu_unlock(struct btree_trans *trans)
{ {
if (trans->srcu_held) { if (trans->srcu_held) {
...@@ -2843,6 +2850,7 @@ void bch2_trans_srcu_unlock(struct btree_trans *trans) ...@@ -2843,6 +2850,7 @@ void bch2_trans_srcu_unlock(struct btree_trans *trans)
if (path->cached && !btree_node_locked(path, 0)) if (path->cached && !btree_node_locked(path, 0))
path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset); path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
check_srcu_held_too_long(trans);
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
trans->srcu_held = false; trans->srcu_held = false;
} }
...@@ -3074,8 +3082,10 @@ void bch2_trans_put(struct btree_trans *trans) ...@@ -3074,8 +3082,10 @@ void bch2_trans_put(struct btree_trans *trans)
check_btree_paths_leaked(trans); check_btree_paths_leaked(trans);
if (trans->srcu_held) if (trans->srcu_held) {
check_srcu_held_too_long(trans);
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
}
bch2_journal_preres_put(&c->journal, &trans->journal_preres); bch2_journal_preres_put(&c->journal, &trans->journal_preres);
......
...@@ -147,9 +147,8 @@ void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt) ...@@ -147,9 +147,8 @@ void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
{ {
struct moving_io *io; struct moving_io *io;
bch2_trans_unlock(ctxt->trans);
while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
bch2_trans_unlock_long(ctxt->trans);
list_del(&io->read_list); list_del(&io->read_list);
move_write(io); move_write(io);
} }
...@@ -485,8 +484,8 @@ int bch2_move_ratelimit(struct moving_context *ctxt) ...@@ -485,8 +484,8 @@ int bch2_move_ratelimit(struct moving_context *ctxt)
struct bch_fs *c = ctxt->trans->c; struct bch_fs *c = ctxt->trans->c;
u64 delay; u64 delay;
if (ctxt->wait_on_copygc) { if (ctxt->wait_on_copygc && !c->copygc_running) {
bch2_trans_unlock(ctxt->trans); bch2_trans_unlock_long(ctxt->trans);
wait_event_killable(c->copygc_running_wq, wait_event_killable(c->copygc_running_wq,
!c->copygc_running || !c->copygc_running ||
kthread_should_stop()); kthread_should_stop());
...@@ -495,8 +494,12 @@ int bch2_move_ratelimit(struct moving_context *ctxt) ...@@ -495,8 +494,12 @@ int bch2_move_ratelimit(struct moving_context *ctxt)
do { do {
delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0; delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
if (delay) { if (delay) {
bch2_trans_unlock(ctxt->trans); if (delay > HZ / 10)
bch2_trans_unlock_long(ctxt->trans);
else
bch2_trans_unlock(ctxt->trans);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
} }
......
...@@ -45,6 +45,7 @@ do { \ ...@@ -45,6 +45,7 @@ do { \
\ \
if (_cond) \ if (_cond) \
break; \ break; \
bch2_trans_unlock_long((_ctxt)->trans); \
__wait_event((_ctxt)->wait, \ __wait_event((_ctxt)->wait, \
bch2_moving_ctxt_next_pending_write(_ctxt) || \ bch2_moving_ctxt_next_pending_write(_ctxt) || \
(cond_finished = (_cond))); \ (cond_finished = (_cond))); \
......
...@@ -128,7 +128,7 @@ static void move_buckets_wait(struct moving_context *ctxt, ...@@ -128,7 +128,7 @@ static void move_buckets_wait(struct moving_context *ctxt,
kfree(i); kfree(i);
} }
bch2_trans_unlock(ctxt->trans); bch2_trans_unlock_long(ctxt->trans);
} }
static bool bucket_in_flight(struct buckets_in_flight *list, static bool bucket_in_flight(struct buckets_in_flight *list,
...@@ -327,7 +327,7 @@ static int bch2_copygc_thread(void *arg) ...@@ -327,7 +327,7 @@ static int bch2_copygc_thread(void *arg)
while (!ret && !kthread_should_stop()) { while (!ret && !kthread_should_stop()) {
bool did_work = false; bool did_work = false;
bch2_trans_unlock(ctxt.trans); bch2_trans_unlock_long(ctxt.trans);
cond_resched(); cond_resched();
if (!c->copy_gc_enabled) { if (!c->copy_gc_enabled) {
......
...@@ -348,7 +348,7 @@ static int do_rebalance(struct moving_context *ctxt) ...@@ -348,7 +348,7 @@ static int do_rebalance(struct moving_context *ctxt)
!kthread_should_stop() && !kthread_should_stop() &&
!atomic64_read(&r->work_stats.sectors_seen) && !atomic64_read(&r->work_stats.sectors_seen) &&
!atomic64_read(&r->scan_stats.sectors_seen)) { !atomic64_read(&r->scan_stats.sectors_seen)) {
bch2_trans_unlock(trans); bch2_trans_unlock_long(trans);
rebalance_wait(c); rebalance_wait(c);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment