Commit a1dd428b authored by Brian Foster's avatar Brian Foster Committed by Kent Overstreet

bcachefs: push rcu lock down into bch2_target_to_mask()

We have one caller that cycles the rcu lock solely for this call
(via target_rw_devs()), and we'd like to add another. Simplify
things by pushing the rcu lock down into bch2_target_to_mask(),
similar to how bch2_dev_in_target() works.
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent fec4fc82
......@@ -934,9 +934,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
unsigned i;
int ret;
rcu_read_lock();
devs = target_rw_devs(c, wp->data_type, target);
rcu_read_unlock();
/* Don't allocate from devices we already have pointers to: */
for (i = 0; i < devs_have->nr; i++)
......
......@@ -208,26 +208,36 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
{
struct target t = target_decode(target);
struct bch_devs_mask *devs;
rcu_read_lock();
switch (t.type) {
case TARGET_NULL:
return NULL;
devs = NULL;
break;
case TARGET_DEV: {
struct bch_dev *ca = t.dev < c->sb.nr_devices
? rcu_dereference(c->devs[t.dev])
: NULL;
return ca ? &ca->self : NULL;
devs = ca ? &ca->self : NULL;
break;
}
case TARGET_GROUP: {
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
return g && t.group < g->nr && !g->entries[t.group].deleted
devs = g && t.group < g->nr && !g->entries[t.group].deleted
? &g->entries[t.group].devs
: NULL;
break;
}
default:
BUG();
}
rcu_read_unlock();
return devs;
}
bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment