Commit 260af156 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Kill alloc_v4.fragmentation_lru

The fragmentation_lru field hasn't been needed since we reworked the LRU
btrees to use the btree write buffer; previously it was used to resolve
collisions, but the revised LRU btree uses the backpointer (the bucket)
as part of the key.

It should have been deleted at the time of the LRU rework; since it
wasn't, that left places for bugs to hide, in check/repair.

This fixes LRU fsck on a filesystem image helpfully provided by a user
who disappeared before I could get his name for the reported-by.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 01bf5e3b
...@@ -332,7 +332,6 @@ void bch2_alloc_v4_swab(struct bkey_s k) ...@@ -332,7 +332,6 @@ void bch2_alloc_v4_swab(struct bkey_s k)
a->io_time[1] = swab64(a->io_time[1]); a->io_time[1] = swab64(a->io_time[1]);
a->stripe = swab32(a->stripe); a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers); a->nr_external_backpointers = swab32(a->nr_external_backpointers);
a->fragmentation_lru = swab64(a->fragmentation_lru);
a->stripe_sectors = swab32(a->stripe_sectors); a->stripe_sectors = swab32(a->stripe_sectors);
bps = alloc_v4_backpointers(a); bps = alloc_v4_backpointers(a);
...@@ -347,6 +346,7 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c ...@@ -347,6 +346,7 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
{ {
struct bch_alloc_v4 _a; struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL;
prt_newline(out); prt_newline(out);
printbuf_indent_add(out, 2); printbuf_indent_add(out, 2);
...@@ -364,9 +364,13 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c ...@@ -364,9 +364,13 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru);
if (ca)
prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca));
prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a)); prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
printbuf_indent_sub(out, 2); printbuf_indent_sub(out, 2);
bch2_dev_put(ca);
} }
void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
...@@ -882,12 +886,13 @@ int bch2_trigger_alloc(struct btree_trans *trans, ...@@ -882,12 +886,13 @@ int bch2_trigger_alloc(struct btree_trans *trans,
goto err; goto err;
} }
new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca); old_lru = alloc_lru_idx_fragmentation(*old_a, ca);
if (old_a->fragmentation_lru != new_a->fragmentation_lru) { new_lru = alloc_lru_idx_fragmentation(*new_a, ca);
if (old_lru != new_lru) {
ret = bch2_lru_change(trans, ret = bch2_lru_change(trans,
BCH_LRU_FRAGMENTATION_START, BCH_LRU_FRAGMENTATION_START,
bucket_to_u64(new.k->p), bucket_to_u64(new.k->p),
old_a->fragmentation_lru, new_a->fragmentation_lru); old_lru, new_lru);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -1629,18 +1634,22 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, ...@@ -1629,18 +1634,22 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode);
if (!ca)
return 0;
a = bch2_alloc_to_v4(alloc_k, &a_convert); a = bch2_alloc_to_v4(alloc_k, &a_convert);
if (a->fragmentation_lru) { u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
if (lru_idx) {
ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START, ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
a->fragmentation_lru, lru_idx, alloc_k, last_flushed);
alloc_k, last_flushed);
if (ret) if (ret)
return ret; goto err;
} }
if (a->data_type != BCH_DATA_cached) if (a->data_type != BCH_DATA_cached)
return 0; goto err;
if (fsck_err_on(!a->io_time[READ], if (fsck_err_on(!a->io_time[READ],
trans, alloc_key_cached_but_read_time_zero, trans, alloc_key_cached_but_read_time_zero,
...@@ -1669,6 +1678,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, ...@@ -1669,6 +1678,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
goto err; goto err;
err: err:
fsck_err: fsck_err:
bch2_dev_put(ca);
printbuf_exit(&buf); printbuf_exit(&buf);
return ret; return ret;
} }
......
...@@ -70,7 +70,7 @@ struct bch_alloc_v4 { ...@@ -70,7 +70,7 @@ struct bch_alloc_v4 {
__u32 stripe; __u32 stripe;
__u32 nr_external_backpointers; __u32 nr_external_backpointers;
/* end of fields in original version of alloc_v4 */ /* end of fields in original version of alloc_v4 */
__u64 fragmentation_lru; __u64 _fragmentation_lru; /* obsolete */
__u32 stripe_sectors; __u32 stripe_sectors;
__u32 pad; __u32 pad;
} __packed __aligned(8); } __packed __aligned(8);
......
...@@ -828,8 +828,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, ...@@ -828,8 +828,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
return ret; return ret;
} }
gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
if (fsck_err_on(new.data_type != gc.data_type, if (fsck_err_on(new.data_type != gc.data_type,
trans, alloc_key_data_type_wrong, trans, alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type" "bucket %llu:%llu gen %u has wrong data_type"
...@@ -857,7 +855,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, ...@@ -857,7 +855,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors); copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
copy_bucket_field(alloc_key_stripe_wrong, stripe); copy_bucket_field(alloc_key_stripe_wrong, stripe);
copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy); copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru);
#undef copy_bucket_field #undef copy_bucket_field
if (!bch2_alloc_v4_cmp(*old, new)) if (!bch2_alloc_v4_cmp(*old, new))
......
...@@ -133,7 +133,9 @@ static int bch2_check_lru_key(struct btree_trans *trans, ...@@ -133,7 +133,9 @@ static int bch2_check_lru_key(struct btree_trans *trans,
u64 idx; u64 idx;
int ret; int ret;
if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos), struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_pos);
if (fsck_err_on(!ca,
trans, lru_entry_to_invalid_bucket, trans, lru_entry_to_invalid_bucket,
"lru key points to nonexistent device:bucket %llu:%llu", "lru key points to nonexistent device:bucket %llu:%llu",
alloc_pos.inode, alloc_pos.offset)) alloc_pos.inode, alloc_pos.offset))
...@@ -151,7 +153,7 @@ static int bch2_check_lru_key(struct btree_trans *trans, ...@@ -151,7 +153,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
idx = alloc_lru_idx_read(*a); idx = alloc_lru_idx_read(*a);
break; break;
case BCH_LRU_fragmentation: case BCH_LRU_fragmentation:
idx = a->fragmentation_lru; idx = alloc_lru_idx_fragmentation(*a, ca);
break; break;
} }
...@@ -174,6 +176,7 @@ static int bch2_check_lru_key(struct btree_trans *trans, ...@@ -174,6 +176,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
err: err:
fsck_err: fsck_err:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
bch2_dev_put(ca);
printbuf_exit(&buf2); printbuf_exit(&buf2);
printbuf_exit(&buf1); printbuf_exit(&buf1);
return ret; return ret;
......
...@@ -692,7 +692,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, ...@@ -692,7 +692,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
a = bch2_alloc_to_v4(k, &a_convert); a = bch2_alloc_to_v4(k, &a_convert);
dirty_sectors = bch2_bucket_sectors_dirty(*a); dirty_sectors = bch2_bucket_sectors_dirty(*a);
bucket_size = ca->mi.bucket_size; bucket_size = ca->mi.bucket_size;
fragmentation = a->fragmentation_lru; fragmentation = alloc_lru_idx_fragmentation(*a, ca);
ret = bch2_btree_write_buffer_tryflush(trans); ret = bch2_btree_write_buffer_tryflush(trans);
bch_err_msg(c, ret, "flushing btree write buffer"); bch_err_msg(c, ret, "flushing btree write buffer");
......
...@@ -73,6 +73,7 @@ move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b) ...@@ -73,6 +73,7 @@ move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
static int bch2_bucket_is_movable(struct btree_trans *trans, static int bch2_bucket_is_movable(struct btree_trans *trans,
struct move_bucket *b, u64 time) struct move_bucket *b, u64 time)
{ {
struct bch_fs *c = trans->c;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bch_alloc_v4 _a; struct bch_alloc_v4 _a;
...@@ -90,14 +91,19 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, ...@@ -90,14 +91,19 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
if (!ca)
goto out;
a = bch2_alloc_to_v4(k, &_a); a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen; b->k.gen = a->gen;
b->sectors = bch2_bucket_sectors_dirty(*a); b->sectors = bch2_bucket_sectors_dirty(*a);
u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
ret = data_type_movable(a->data_type) && ret = lru_idx && lru_idx <= time;
a->fragmentation_lru &&
a->fragmentation_lru <= time;
bch2_dev_put(ca);
out:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
return ret; return ret;
} }
......
...@@ -115,8 +115,8 @@ enum bch_fsck_flags { ...@@ -115,8 +115,8 @@ enum bch_fsck_flags {
x(alloc_key_data_type_inconsistency, 101, 0) \ x(alloc_key_data_type_inconsistency, 101, 0) \
x(alloc_key_to_missing_dev_bucket, 102, 0) \ x(alloc_key_to_missing_dev_bucket, 102, 0) \
x(alloc_key_cached_inconsistency, 103, 0) \ x(alloc_key_cached_inconsistency, 103, 0) \
x(alloc_key_cached_but_read_time_zero, 104, 0) \ x(alloc_key_cached_but_read_time_zero, 104, FSCK_AUTOFIX) \
x(alloc_key_to_missing_lru_entry, 105, 0) \ x(alloc_key_to_missing_lru_entry, 105, FSCK_AUTOFIX) \
x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \ x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \
x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \ x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \
x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \ x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment