Commit 079663d8 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill metadata only gc

This was useful before we had transactional updates to interior btree
nodes - but now, it's just extra unneeded complexity.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b7cf4bd7
...@@ -205,13 +205,12 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, ...@@ -205,13 +205,12 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
} }
static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bool initial, bool metadata_only) bool initial)
{ {
struct btree_trans trans; struct btree_trans trans;
struct btree_iter *iter; struct btree_iter *iter;
struct btree *b; struct btree *b;
unsigned depth = metadata_only ? 1 unsigned depth = bch2_expensive_debug_checks ? 0
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1 : !btree_node_type_needs_gc(btree_id) ? 1
: 0; : 0;
u8 max_stale = 0; u8 max_stale = 0;
...@@ -326,12 +325,10 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, ...@@ -326,12 +325,10 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
static int bch2_gc_btree_init(struct bch_fs *c, static int bch2_gc_btree_init(struct bch_fs *c,
struct journal_keys *journal_keys, struct journal_keys *journal_keys,
enum btree_id btree_id, enum btree_id btree_id)
bool metadata_only)
{ {
struct btree *b; struct btree *b;
unsigned target_depth = metadata_only ? 1 unsigned target_depth = bch2_expensive_debug_checks ? 0
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1 : !btree_node_type_needs_gc(btree_id) ? 1
: 0; : 0;
u8 max_stale = 0; u8 max_stale = 0;
...@@ -377,7 +374,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r) ...@@ -377,7 +374,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
} }
static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys, static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
bool initial, bool metadata_only) bool initial)
{ {
enum btree_id ids[BTREE_ID_NR]; enum btree_id ids[BTREE_ID_NR];
unsigned i; unsigned i;
...@@ -390,8 +387,8 @@ static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys, ...@@ -390,8 +387,8 @@ static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
enum btree_id id = ids[i]; enum btree_id id = ids[i];
int ret = initial int ret = initial
? bch2_gc_btree_init(c, journal_keys, ? bch2_gc_btree_init(c, journal_keys,
id, metadata_only) id)
: bch2_gc_btree(c, id, initial, metadata_only); : bch2_gc_btree(c, id, initial);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -558,11 +555,10 @@ static void bch2_gc_free(struct bch_fs *c) ...@@ -558,11 +555,10 @@ static void bch2_gc_free(struct bch_fs *c)
} }
static int bch2_gc_done(struct bch_fs *c, static int bch2_gc_done(struct bch_fs *c,
bool initial, bool metadata_only) bool initial)
{ {
struct bch_dev *ca; struct bch_dev *ca;
bool verify = !metadata_only && bool verify = (!initial ||
(!initial ||
(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO))); (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)));
unsigned i; unsigned i;
int ret = 0; int ret = 0;
...@@ -601,7 +597,7 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -601,7 +597,7 @@ static int bch2_gc_done(struct bch_fs *c,
#define copy_fs_field(_f, _msg, ...) \ #define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
if (!metadata_only) { {
struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0); struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src; struct stripe *dst, *src;
...@@ -660,8 +656,6 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -660,8 +656,6 @@ static int bch2_gc_done(struct bch_fs *c,
copy_fs_field(hidden, "hidden"); copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree"); copy_fs_field(btree, "btree");
if (!metadata_only) {
copy_fs_field(data, "data"); copy_fs_field(data, "data");
copy_fs_field(cached, "cached"); copy_fs_field(cached, "cached");
copy_fs_field(reserved, "reserved"); copy_fs_field(reserved, "reserved");
...@@ -670,18 +664,12 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -670,18 +664,12 @@ static int bch2_gc_done(struct bch_fs *c,
for (i = 0; i < BCH_REPLICAS_MAX; i++) for (i = 0; i < BCH_REPLICAS_MAX; i++)
copy_fs_field(persistent_reserved[i], copy_fs_field(persistent_reserved[i],
"persistent_reserved[%i]", i); "persistent_reserved[%i]", i);
}
for (i = 0; i < c->replicas.nr; i++) { for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
char buf[80]; char buf[80];
if (metadata_only &&
(e->data_type == BCH_DATA_user ||
e->data_type == BCH_DATA_cached))
continue;
bch2_replicas_entry_to_text(&PBUF(buf), e); bch2_replicas_entry_to_text(&PBUF(buf), e);
copy_fs_field(replicas[i], "%s", buf); copy_fs_field(replicas[i], "%s", buf);
...@@ -697,8 +685,7 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -697,8 +685,7 @@ static int bch2_gc_done(struct bch_fs *c,
return ret; return ret;
} }
static int bch2_gc_start(struct bch_fs *c, static int bch2_gc_start(struct bch_fs *c)
bool metadata_only)
{ {
struct bch_dev *ca; struct bch_dev *ca;
unsigned i; unsigned i;
...@@ -762,13 +749,6 @@ static int bch2_gc_start(struct bch_fs *c, ...@@ -762,13 +749,6 @@ static int bch2_gc_start(struct bch_fs *c,
d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen; d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
d->gen_valid = s->gen_valid; d->gen_valid = s->gen_valid;
if (metadata_only &&
(s->mark.data_type == BCH_DATA_user ||
s->mark.data_type == BCH_DATA_cached)) {
d->_mark = s->mark;
d->_mark.owned_by_allocator = 0;
}
} }
}; };
...@@ -796,7 +776,7 @@ static int bch2_gc_start(struct bch_fs *c, ...@@ -796,7 +776,7 @@ static int bch2_gc_start(struct bch_fs *c,
* uses, GC could skip past them * uses, GC could skip past them
*/ */
int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys, int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
bool initial, bool metadata_only) bool initial)
{ {
struct bch_dev *ca; struct bch_dev *ca;
u64 start_time = local_clock(); u64 start_time = local_clock();
...@@ -812,13 +792,13 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys, ...@@ -812,13 +792,13 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
closure_wait_event(&c->btree_interior_update_wait, closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c)); !bch2_btree_interior_updates_nr_pending(c));
again: again:
ret = bch2_gc_start(c, metadata_only); ret = bch2_gc_start(c);
if (ret) if (ret)
goto out; goto out;
bch2_mark_superblocks(c); bch2_mark_superblocks(c);
ret = bch2_gc_btrees(c, journal_keys, initial, metadata_only); ret = bch2_gc_btrees(c, journal_keys, initial);
if (ret) if (ret)
goto out; goto out;
...@@ -857,7 +837,7 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys, ...@@ -857,7 +837,7 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
bch2_journal_block(&c->journal); bch2_journal_block(&c->journal);
percpu_down_write(&c->mark_lock); percpu_down_write(&c->mark_lock);
ret = bch2_gc_done(c, initial, metadata_only); ret = bch2_gc_done(c, initial);
bch2_journal_unblock(&c->journal); bch2_journal_unblock(&c->journal);
} else { } else {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
void bch2_coalesce(struct bch_fs *); void bch2_coalesce(struct bch_fs *);
struct journal_keys; struct journal_keys;
int bch2_gc(struct bch_fs *, struct journal_keys *, bool, bool); int bch2_gc(struct bch_fs *, struct journal_keys *, bool);
int bch2_gc_gens(struct bch_fs *); int bch2_gc_gens(struct bch_fs *);
void bch2_gc_thread_stop(struct bch_fs *); void bch2_gc_thread_stop(struct bch_fs *);
int bch2_gc_thread_start(struct bch_fs *); int bch2_gc_thread_start(struct bch_fs *);
......
...@@ -1099,27 +1099,13 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -1099,27 +1099,13 @@ int bch2_fs_recovery(struct bch_fs *c)
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags); set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
if ((c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) &&
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA))) {
/*
* interior btree node updates aren't consistent with the
* journal; after an unclean shutdown we have to walk all
* pointers to metadata:
*/
bch_info(c, "starting metadata mark and sweep");
err = "error in mark and sweep";
ret = bch2_gc(c, &c->journal_keys, true, true);
if (ret)
goto err;
bch_verbose(c, "mark and sweep done");
}
if (c->opts.fsck || if (c->opts.fsck ||
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) || !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) { test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
bch_info(c, "starting mark and sweep"); bch_info(c, "starting mark and sweep");
err = "error in mark and sweep"; err = "error in mark and sweep";
ret = bch2_gc(c, &c->journal_keys, true, false); ret = bch2_gc(c, &c->journal_keys, true);
if (ret) if (ret)
goto err; goto err;
bch_verbose(c, "mark and sweep done"); bch_verbose(c, "mark and sweep done");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment