Commit b1bd955b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Don't wait for ALLOC_SCAN_BATCH buckets in allocator

It used to be necessary for the allocator thread to batch up
invalidating buckets when possible - but since we added the btree key
cache that hasn't been a concern, and now it's causing the allocator
thread to livelock when the filesystem is nearly full.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 3a14d58e
...@@ -1071,7 +1071,7 @@ static int bch2_allocator_thread(void *arg) ...@@ -1071,7 +1071,7 @@ static int bch2_allocator_thread(void *arg)
pr_debug("free_inc now empty"); pr_debug("free_inc now empty");
do { while (1) {
cond_resched(); cond_resched();
/* /*
* Find some buckets that we can invalidate, either * Find some buckets that we can invalidate, either
...@@ -1095,22 +1095,21 @@ static int bch2_allocator_thread(void *arg) ...@@ -1095,22 +1095,21 @@ static int bch2_allocator_thread(void *arg)
wake_up_process(c->gc_thread); wake_up_process(c->gc_thread);
} }
if (nr)
break;
/* /*
* If we found any buckets, we have to invalidate them * If we found any buckets, we have to invalidate them
* before we scan for more - but if we didn't find very * before we scan for more - but if we didn't find very
* many we may want to wait on more buckets being * many we may want to wait on more buckets being
* available so we don't spin: * available so we don't spin:
*/ */
if (!nr || ret = wait_buckets_available(c, ca);
(nr < ALLOC_SCAN_BATCH(ca) && if (ret) {
!fifo_empty(&ca->free[RESERVE_NONE]))) { up_read(&c->gc_lock);
ret = wait_buckets_available(c, ca); goto stop;
if (ret) {
up_read(&c->gc_lock);
goto stop;
}
} }
} while (!nr); }
up_read(&c->gc_lock); up_read(&c->gc_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment