Commit 3a0e06db authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Assorted preemption fixes

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d5f70c1f
...@@ -291,8 +291,10 @@ int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) ...@@ -291,8 +291,10 @@ int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
bch2_alloc_read_key(c, bkey_i_to_s_c(k)); bch2_alloc_read_key(c, bkey_i_to_s_c(k));
} }
percpu_down_write(&c->mark_lock);
for_each_member_device(ca, c, i) for_each_member_device(ca, c, i)
bch2_dev_usage_from_buckets(c, ca); bch2_dev_usage_from_buckets(c, ca);
percpu_up_write(&c->mark_lock);
mutex_lock(&c->bucket_clock[READ].lock); mutex_lock(&c->bucket_clock[READ].lock);
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
......
...@@ -354,8 +354,6 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ...@@ -354,8 +354,6 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
if (c) { if (c) {
lockdep_assert_held(&c->sb_lock); lockdep_assert_held(&c->sb_lock);
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
} else {
preempt_disable();
} }
for (i = 0; i < layout->nr_superblocks; i++) { for (i = 0; i < layout->nr_superblocks; i++) {
...@@ -377,11 +375,8 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ...@@ -377,11 +375,8 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
gc_phase(GC_PHASE_SB), flags); gc_phase(GC_PHASE_SB), flags);
} }
if (c) { if (c)
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
}
} }
static void bch2_mark_superblocks(struct bch_fs *c) static void bch2_mark_superblocks(struct bch_fs *c)
......
...@@ -393,14 +393,19 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca) ...@@ -393,14 +393,19 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
struct bucket_array *buckets; struct bucket_array *buckets;
struct bucket *g; struct bucket *g;
percpu_down_read(&c->mark_lock); /*
* This is only called during startup, before there's any multithreaded
* access to c->usage:
*/
preempt_disable();
fs_usage = this_cpu_ptr(c->usage[0]); fs_usage = this_cpu_ptr(c->usage[0]);
preempt_enable();
buckets = bucket_array(ca); buckets = bucket_array(ca);
for_each_bucket(g, buckets) for_each_bucket(g, buckets)
if (g->mark.data_type) if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false); bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
percpu_up_read(&c->mark_lock);
} }
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \ #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
...@@ -513,8 +518,12 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -513,8 +518,12 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator, size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags) struct gc_pos pos, unsigned flags)
{ {
preempt_disable();
do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags, do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
ca, b, owned_by_allocator); ca, b, owned_by_allocator);
preempt_enable();
} }
static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k, static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
......
...@@ -835,8 +835,6 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -835,8 +835,6 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (c) { if (c) {
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock); spin_lock(&c->journal.lock);
} else {
preempt_disable();
} }
pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0; pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
...@@ -866,8 +864,6 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -866,8 +864,6 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (c) { if (c) {
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
} }
if (!new_fs) if (!new_fs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment