Commit 8777210b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: refactor key marking code a bit

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 2ecc6171
...@@ -232,12 +232,12 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, ...@@ -232,12 +232,12 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bch2_verify_btree_nr_keys(b); bch2_verify_btree_nr_keys(b);
gc_pos_set(c, gc_pos_btree_node(b));
ret = btree_gc_mark_node(c, b, &max_stale, initial); ret = btree_gc_mark_node(c, b, &max_stale, initial);
if (ret) if (ret)
break; break;
gc_pos_set(c, gc_pos_btree_node(b));
if (!initial) { if (!initial) {
if (max_stale > 64) if (max_stale > 64)
bch2_btree_node_rewrite(c, &iter, bch2_btree_node_rewrite(c, &iter,
...@@ -623,10 +623,13 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) ...@@ -623,10 +623,13 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
"persistent_reserved[%i]", i); "persistent_reserved[%i]", i);
for (i = 0; i < c->replicas.nr; i++) { for (i = 0; i < c->replicas.nr; i++) {
/* struct bch_replicas_entry *e =
* XXX: print out replicas entry cpu_replicas_entry(&c->replicas, i);
*/ char buf[80];
copy_fs_field(data[i], "data[%i]", i);
bch2_replicas_entry_to_text(&PBUF(buf), e);
copy_fs_field(data[i], "%s", buf);
} }
} }
......
...@@ -398,9 +398,22 @@ static inline void update_cached_sectors(struct bch_fs *c, ...@@ -398,9 +398,22 @@ static inline void update_cached_sectors(struct bch_fs *c,
update_replicas(c, fs_usage, &r.e, sectors); update_replicas(c, fs_usage, &r.e, sectors);
} }
static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, #define do_mark_fn(fn, c, pos, flags, ...) \
size_t b, struct bucket_mark *ret, ({ \
bool gc) int gc, ret = 0; \
\
percpu_rwsem_assert_held(&c->mark_lock); \
\
for (gc = 0; gc < 2 && !ret; gc++) \
if (!gc == !(flags & BCH_BUCKET_MARK_GC) || \
(gc && gc_visited(c, pos))) \
ret = fn(c, __VA_ARGS__, gc); \
ret; \
})
static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *ret,
bool gc)
{ {
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]); struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc); struct bucket *g = __bucket(ca, b, gc);
...@@ -421,28 +434,25 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -421,28 +434,25 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
update_cached_sectors(c, fs_usage, ca->dev_idx, update_cached_sectors(c, fs_usage, ca->dev_idx,
-old.cached_sectors); -old.cached_sectors);
if (ret) if (!gc)
*ret = old; *ret = old;
return 0;
} }
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old) size_t b, struct bucket_mark *old)
{ {
percpu_rwsem_assert_held(&c->mark_lock); do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
ca, b, old);
__bch2_invalidate_bucket(c, ca, b, old, false);
if (gc_visited(c, gc_phase(GC_PHASE_START)))
__bch2_invalidate_bucket(c, ca, b, NULL, true);
if (!old->owned_by_allocator && old->cached_sectors) if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b), trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors); old->cached_sectors);
} }
static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator, size_t b, bool owned_by_allocator,
bool gc) bool gc)
{ {
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]); struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc); struct bucket *g = __bucket(ca, b, gc);
...@@ -454,20 +464,16 @@ static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -454,20 +464,16 @@ static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
BUG_ON(!gc && BUG_ON(!gc &&
!owned_by_allocator && !old.owned_by_allocator); !owned_by_allocator && !old.owned_by_allocator);
return 0;
} }
void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator, size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags) struct gc_pos pos, unsigned flags)
{ {
percpu_rwsem_assert_held(&c->mark_lock); do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
ca, b, owned_by_allocator);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
if ((flags & BCH_BUCKET_MARK_GC) ||
gc_visited(c, pos))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, true);
} }
#define checked_add(a, b) \ #define checked_add(a, b) \
...@@ -477,9 +483,9 @@ do { \ ...@@ -477,9 +483,9 @@ do { \
BUG_ON((a) != _res); \ BUG_ON((a) != _res); \
} while (0) } while (0)
static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, enum bch_data_type type, size_t b, enum bch_data_type type,
unsigned sectors, bool gc) unsigned sectors, bool gc)
{ {
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]); struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc); struct bucket *g = __bucket(ca, b, gc);
...@@ -493,6 +499,8 @@ static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -493,6 +499,8 @@ static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
new.data_type = type; new.data_type = type;
checked_add(new.dirty_sectors, sectors); checked_add(new.dirty_sectors, sectors);
})); }));
return 0;
} }
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
...@@ -506,15 +514,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -506,15 +514,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
preempt_disable(); preempt_disable();
if (likely(c)) { if (likely(c)) {
percpu_rwsem_assert_held(&c->mark_lock); do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
ca, b, type, sectors);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors,
false);
if ((flags & BCH_BUCKET_MARK_GC) ||
gc_visited(c, pos))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors,
true);
} else { } else {
struct bucket *g; struct bucket *g;
struct bucket_mark new; struct bucket_mark new;
...@@ -833,30 +834,28 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -833,30 +834,28 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
unsigned journal_seq, unsigned flags, unsigned journal_seq, unsigned flags,
bool gc) bool gc)
{ {
int ret = 0; if (!fs_usage || gc)
fs_usage = this_cpu_ptr(c->usage[gc]);
switch (k.k->type) { switch (k.k->type) {
case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr:
ret = bch2_mark_extent(c, k, inserting return bch2_mark_extent(c, k, inserting
? c->opts.btree_node_size ? c->opts.btree_node_size
: -c->opts.btree_node_size, : -c->opts.btree_node_size,
BCH_DATA_BTREE, BCH_DATA_BTREE,
fs_usage, journal_seq, flags, gc); fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_extent: case KEY_TYPE_extent:
ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER, return bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
fs_usage, journal_seq, flags, gc); fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_stripe: case KEY_TYPE_stripe:
ret = bch2_mark_stripe(c, k, inserting, return bch2_mark_stripe(c, k, inserting,
fs_usage, journal_seq, flags, gc); fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_inode: case KEY_TYPE_inode:
if (inserting) if (inserting)
fs_usage->s.nr_inodes++; fs_usage->s.nr_inodes++;
else else
fs_usage->s.nr_inodes--; fs_usage->s.nr_inodes--;
break; return 0;
case KEY_TYPE_reservation: { case KEY_TYPE_reservation: {
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
...@@ -866,13 +865,11 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -866,13 +865,11 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
fs_usage->s.reserved += sectors; fs_usage->s.reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors; fs_usage->persistent_reserved[replicas - 1] += sectors;
break; return 0;
} }
default: default:
break; return 0;
} }
return ret;
} }
int bch2_mark_key_locked(struct bch_fs *c, int bch2_mark_key_locked(struct bch_fs *c,
...@@ -882,26 +879,9 @@ int bch2_mark_key_locked(struct bch_fs *c, ...@@ -882,26 +879,9 @@ int bch2_mark_key_locked(struct bch_fs *c,
struct bch_fs_usage *fs_usage, struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags) u64 journal_seq, unsigned flags)
{ {
int ret; return do_mark_fn(__bch2_mark_key, c, pos, flags,
k, inserting, sectors, fs_usage,
if (!(flags & BCH_BUCKET_MARK_GC)) { journal_seq, flags);
ret = __bch2_mark_key(c, k, inserting, sectors,
fs_usage ?: this_cpu_ptr(c->usage[0]),
journal_seq, flags, false);
if (ret)
return ret;
}
if ((flags & BCH_BUCKET_MARK_GC) ||
gc_visited(c, pos)) {
ret = __bch2_mark_key(c, k, inserting, sectors,
this_cpu_ptr(c->usage[1]),
journal_seq, flags, true);
if (ret)
return ret;
}
return 0;
} }
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
......
...@@ -249,8 +249,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, ...@@ -249,8 +249,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned, size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned); struct gc_pos, unsigned);
#define BCH_BUCKET_MARK_NOATOMIC (1 << 0) #define BCH_BUCKET_MARK_GC (1 << 0)
#define BCH_BUCKET_MARK_GC (1 << 1) #define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos, bool, s64, struct gc_pos,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment