Commit c281db0f authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: mark_superblock cleanup

Consolidate mark_superblock() and trans_mark_superblock(), like we did
with the other trigger paths.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent ba665494
......@@ -1040,59 +1040,14 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial)
return ret;
}
static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
u64 start, u64 end,
enum bch_data_type type,
unsigned flags)
{
u64 b = sector_to_bucket(ca, start);
do {
unsigned sectors =
min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
bch2_mark_metadata_bucket(c, ca, b, type, sectors,
gc_phase(GC_PHASE_SB), flags);
b++;
start += sectors;
} while (start < end);
}
static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
unsigned flags)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
unsigned i;
u64 b;
for (i = 0; i < layout->nr_superblocks; i++) {
u64 offset = le64_to_cpu(layout->sb_offset[i]);
if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
BCH_DATA_sb, flags);
mark_metadata_sectors(c, ca, offset,
offset + (1 << layout->sb_max_size_bits),
BCH_DATA_sb, flags);
}
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), flags);
}
}
static void bch2_mark_superblocks(struct bch_fs *c)
static int bch2_mark_superblocks(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
gc_pos_set(c, gc_phase(GC_PHASE_SB));
for_each_online_member(c, ca)
bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_GC);
mutex_unlock(&c->sb_lock);
return ret;
}
static void bch2_gc_free(struct bch_fs *c)
......@@ -1635,7 +1590,8 @@ static int bch2_gc(struct bch_fs *c, bool initial)
again:
gc_pos_set(c, gc_phase(GC_PHASE_START));
bch2_mark_superblocks(c);
ret = bch2_mark_superblocks(c);
BUG_ON(ret);
ret = bch2_gc_btrees(c, initial);
if (ret)
......
......@@ -485,59 +485,6 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
return bch2_update_replicas_list(trans, &r.e, sectors);
}
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, enum bch_data_type data_type,
unsigned sectors, struct gc_pos pos,
unsigned flags)
{
struct bucket old, new, *g;
int ret = 0;
BUG_ON(!(flags & BTREE_TRIGGER_GC));
BUG_ON(data_type != BCH_DATA_sb &&
data_type != BCH_DATA_journal);
/*
* Backup superblock might be past the end of our normal usable space:
*/
if (b >= ca->mi.nbuckets)
return 0;
percpu_down_read(&c->mark_lock);
g = gc_bucket(ca, b);
bucket_lock(g);
old = *g;
if (bch2_fs_inconsistent_on(g->data_type &&
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) {
ret = -EIO;
goto err;
}
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_type_str(g->data_type ?: data_type),
g->dirty_sectors, sectors)) {
ret = -EIO;
goto err;
}
g->data_type = data_type;
g->dirty_sectors += sectors;
new = *g;
err:
bucket_unlock(g);
if (!ret)
bch2_dev_usage_update_m(c, ca, &old, &new);
percpu_up_read(&c->mark_lock);
return ret;
}
int bch2_check_bucket_ref(struct btree_trans *trans,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
......@@ -1107,22 +1054,16 @@ int bch2_trigger_reservation(struct btree_trans *trans,
/* Mark superblocks: */
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
struct bch_dev *ca, u64 b,
enum bch_data_type type,
unsigned sectors)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_i_alloc_v4 *a;
int ret = 0;
/*
* Backup superblock might be past the end of our normal usable space:
*/
if (b >= ca->mi.nbuckets)
return 0;
a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
struct bkey_i_alloc_v4 *a =
bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
if (IS_ERR(a))
return PTR_ERR(a);
......@@ -1150,20 +1091,78 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
return ret;
}
static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 b, enum bch_data_type data_type,
unsigned sectors, unsigned flags)
{
struct bucket old, new, *g;
int ret = 0;
percpu_down_read(&c->mark_lock);
g = gc_bucket(ca, b);
bucket_lock(g);
old = *g;
if (bch2_fs_inconsistent_on(g->data_type &&
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) {
ret = -EIO;
goto err;
}
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_type_str(g->data_type ?: data_type),
g->dirty_sectors, sectors)) {
ret = -EIO;
goto err;
}
g->data_type = data_type;
g->dirty_sectors += sectors;
new = *g;
err:
bucket_unlock(g);
if (!ret)
bch2_dev_usage_update_m(c, ca, &old, &new);
percpu_up_read(&c->mark_lock);
return ret;
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
struct bch_dev *ca, u64 b,
enum bch_data_type type,
unsigned sectors)
unsigned sectors, unsigned flags)
{
return commit_do(trans, NULL, NULL, 0,
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
BUG_ON(type != BCH_DATA_free &&
type != BCH_DATA_sb &&
type != BCH_DATA_journal);
/*
* Backup superblock might be past the end of our normal usable space:
*/
if (b >= ca->mi.nbuckets)
return 0;
if (flags & BTREE_TRIGGER_GC)
return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags);
else if (flags & BTREE_TRIGGER_TRANSACTIONAL)
return commit_do(trans, NULL, NULL, 0,
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
else
BUG();
}
static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
struct bch_dev *ca,
u64 start, u64 end,
enum bch_data_type type,
u64 *bucket, unsigned *bucket_sectors)
u64 *bucket, unsigned *bucket_sectors,
unsigned flags)
{
do {
u64 b = sector_to_bucket(ca, start);
......@@ -1172,7 +1171,7 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
if (b != *bucket && *bucket_sectors) {
int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
type, *bucket_sectors);
type, *bucket_sectors, flags);
if (ret)
return ret;
......@@ -1188,7 +1187,7 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
}
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
struct bch_dev *ca)
struct bch_dev *ca, unsigned flags)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
u64 bucket = 0;
......@@ -1201,21 +1200,21 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
if (offset == BCH_SB_SECTOR) {
ret = bch2_trans_mark_metadata_sectors(trans, ca,
0, BCH_SB_SECTOR,
BCH_DATA_sb, &bucket, &bucket_sectors);
BCH_DATA_sb, &bucket, &bucket_sectors, flags);
if (ret)
return ret;
}
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
offset + (1 << layout->sb_max_size_bits),
BCH_DATA_sb, &bucket, &bucket_sectors);
BCH_DATA_sb, &bucket, &bucket_sectors, flags);
if (ret)
return ret;
}
if (bucket_sectors) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
bucket, BCH_DATA_sb, bucket_sectors);
bucket, BCH_DATA_sb, bucket_sectors, flags);
if (ret)
return ret;
}
......@@ -1223,7 +1222,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
for (i = 0; i < ca->journal.nr; i++) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
ca->journal.buckets[i],
BCH_DATA_journal, ca->mi.bucket_size);
BCH_DATA_journal, ca->mi.bucket_size, flags);
if (ret)
return ret;
}
......@@ -1231,18 +1230,18 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
return 0;
}
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, unsigned flags)
{
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
int ret = bch2_trans_run(c,
__bch2_trans_mark_dev_sb(trans, ca, flags));
bch_err_fn(c, ret);
return ret;
}
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, unsigned flags)
{
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca);
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
percpu_ref_put(&ca->ref);
return ret;
......@@ -1252,6 +1251,11 @@ int bch2_trans_mark_dev_sbs(struct bch_fs *c)
return 0;
}
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
{
return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_TRANSACTIONAL);
}
/* Disk reservations: */
#define SECTORS_CACHE 1024
......
......@@ -337,10 +337,6 @@ int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
const struct bch_extent_ptr *,
s64, enum bch_data_type, u8, u8, u32);
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
......@@ -362,9 +358,10 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *);
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
size_t, enum bch_data_type, unsigned);
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
enum bch_data_type, unsigned, unsigned);
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *, unsigned);
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, unsigned);
int bch2_trans_mark_dev_sbs(struct bch_fs *);
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
......
......@@ -946,7 +946,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
ret = bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
ob[nr_got]->bucket, BCH_DATA_journal,
ca->mi.bucket_size));
ca->mi.bucket_size, BTREE_TRIGGER_TRANSACTIONAL));
if (ret) {
bch2_open_bucket_put(c, ob[nr_got]);
bch_err_msg(c, ret, "marking new journal buckets");
......@@ -1026,7 +1026,8 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
for (i = 0; i < nr_got; i++)
bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
bu[i], BCH_DATA_free, 0));
bu[i], BCH_DATA_free, 0,
BTREE_TRIGGER_TRANSACTIONAL));
err_free:
if (!new_fs)
for (i = 0; i < nr_got; i++)
......
......@@ -1822,7 +1822,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
bch2_dev_usage_journal_reserve(c);
ret = bch2_trans_mark_dev_sb(c, ca);
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
bch_err_msg(ca, ret, "marking new superblock");
if (ret)
goto err_late;
......@@ -1887,7 +1887,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
ca = bch_dev_locked(c, dev_idx);
ret = bch2_trans_mark_dev_sb(c, ca);
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
if (ret)
goto err;
......@@ -1980,7 +1980,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (ret)
goto err;
ret = bch2_trans_mark_dev_sb(c, ca);
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_TRANSACTIONAL);
if (ret)
goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment