Commit 6e738539 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Improve key marking interface

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 572ad769
...@@ -232,9 +232,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -232,9 +232,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret) for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0, bch2_mark_key(c, k, 0, NULL, 0,
BCH_BUCKET_MARK_NOATOMIC| BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_ALLOC_READ); BCH_BUCKET_MARK_NOATOMIC);
ret = bch2_trans_exit(&trans) ?: ret; ret = bch2_trans_exit(&trans) ?: ret;
if (ret) { if (ret) {
...@@ -244,10 +244,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -244,10 +244,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, j) for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC) if (j->btree_id == BTREE_ID_ALLOC)
bch2_mark_key(c, bkey_i_to_s_c(j->k), bch2_mark_key(c, bkey_i_to_s_c(j->k), 0, NULL, 0,
true, 0, NULL, 0, BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC| BCH_BUCKET_MARK_NOATOMIC);
BCH_BUCKET_MARK_ALLOC_READ);
percpu_down_write(&c->mark_lock); percpu_down_write(&c->mark_lock);
bch2_dev_usage_from_buckets(c); bch2_dev_usage_from_buckets(c);
...@@ -953,6 +952,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans, ...@@ -953,6 +952,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE| BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE| BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_BUCKET_INVALIDATE|
flags); flags);
if (ret == -EINTR) if (ret == -EINTR)
goto retry; goto retry;
......
...@@ -173,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -173,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
*max_stale = max(*max_stale, ptr_stale(ca, ptr)); *max_stale = max(*max_stale, ptr_stale(ca, ptr));
} }
bch2_mark_key(c, k, true, k.k->size, NULL, 0, flags); bch2_mark_key(c, k, k.k->size, NULL, 0, flags);
fsck_err: fsck_err:
return ret; return ret;
} }
...@@ -420,8 +420,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) ...@@ -420,8 +420,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d) for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done) if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key), bch2_mark_key(c, bkey_i_to_s_c(&d->key), 0, NULL, 0,
true, 0, NULL, 0,
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
......
...@@ -48,6 +48,7 @@ enum { ...@@ -48,6 +48,7 @@ enum {
__BTREE_INSERT_NOMARK, __BTREE_INSERT_NOMARK,
__BTREE_INSERT_MARK_INMEM, __BTREE_INSERT_MARK_INMEM,
__BTREE_INSERT_NO_CLEAR_REPLICAS, __BTREE_INSERT_NO_CLEAR_REPLICAS,
__BTREE_INSERT_BUCKET_INVALIDATE,
__BTREE_INSERT_NOWAIT, __BTREE_INSERT_NOWAIT,
__BTREE_INSERT_GC_LOCK_HELD, __BTREE_INSERT_GC_LOCK_HELD,
__BCH_HASH_SET_MUST_CREATE, __BCH_HASH_SET_MUST_CREATE,
...@@ -94,6 +95,8 @@ enum { ...@@ -94,6 +95,8 @@ enum {
#define BTREE_INSERT_NO_CLEAR_REPLICAS (1 << __BTREE_INSERT_NO_CLEAR_REPLICAS) #define BTREE_INSERT_NO_CLEAR_REPLICAS (1 << __BTREE_INSERT_NO_CLEAR_REPLICAS)
#define BTREE_INSERT_BUCKET_INVALIDATE (1 << __BTREE_INSERT_BUCKET_INVALIDATE)
/* Don't block on allocation failure (for new btree nodes: */ /* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT) #define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD) #define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD)
......
...@@ -194,7 +194,9 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b, ...@@ -194,7 +194,9 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
: gc_pos_btree_root(as->btree_id)) >= 0 && : gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
false, 0, NULL, 0, BCH_BUCKET_MARK_GC); 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
} }
static void __btree_node_free(struct bch_fs *c, struct btree *b) static void __btree_node_free(struct bch_fs *c, struct btree *b)
...@@ -264,13 +266,13 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c, ...@@ -264,13 +266,13 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
{ {
BUG_ON(!pending->index_update_done); BUG_ON(!pending->index_update_done);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
false, 0, BCH_BUCKET_MARK_OVERWRITE);
NULL, 0, 0);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE))) if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
false, 0, NULL, 0, BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
} }
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
...@@ -1075,10 +1077,12 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1075,10 +1077,12 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, &fs_usage->u, 0, 0); 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, NULL, 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
if (old && !btree_node_fake(old)) if (old && !btree_node_fake(old))
...@@ -1171,11 +1175,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1171,11 +1175,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, &fs_usage->u, 0, 0); 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_node(b))) if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, NULL, 0, BCH_BUCKET_MARK_GC); 0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0) bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
...@@ -1996,10 +2003,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1996,10 +2003,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, &fs_usage->u, 0, 0); 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, NULL, 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT||
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
bch2_btree_node_free_index(as, NULL, bch2_btree_node_free_index(as, NULL,
......
...@@ -542,6 +542,9 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -542,6 +542,9 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL; struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i; struct btree_insert_entry *i;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
int ret; int ret;
trans_for_each_update_iter(trans, i) trans_for_each_update_iter(trans, i)
...@@ -618,7 +621,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -618,7 +621,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
trans_for_each_update_iter(trans, i) trans_for_each_update_iter(trans, i)
if (update_has_triggers(trans, i) && if (update_has_triggers(trans, i) &&
!update_triggers_transactional(trans, i)) !update_triggers_transactional(trans, i))
bch2_mark_update(trans, i, &fs_usage->u, 0); bch2_mark_update(trans, i, &fs_usage->u, mark_flags);
if (fs_usage && trans->fs_usage_deltas) if (fs_usage && trans->fs_usage_deltas)
bch2_replicas_delta_list_apply(c, &fs_usage->u, bch2_replicas_delta_list_apply(c, &fs_usage->u,
...@@ -632,6 +635,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -632,6 +635,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
trans_for_each_update_iter(trans, i) trans_for_each_update_iter(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b))) if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i, NULL, bch2_mark_update(trans, i, NULL,
mark_flags|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
......
This diff is collapsed.
...@@ -249,16 +249,17 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, ...@@ -249,16 +249,17 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned, size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned); struct gc_pos, unsigned);
#define BCH_BUCKET_MARK_GC (1 << 0) #define BCH_BUCKET_MARK_INSERT (1 << 0)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 1) #define BCH_BUCKET_MARK_OVERWRITE (1 << 1)
#define BCH_BUCKET_MARK_ALLOC_READ (1 << 2) #define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 2)
#define BCH_BUCKET_MARK_GC (1 << 3)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, #define BCH_BUCKET_MARK_ALLOC_READ (1 << 4)
bool, s64, struct bch_fs_usage *, #define BCH_BUCKET_MARK_NOATOMIC (1 << 5)
u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64,
bool, s64, struct bch_fs_usage *, struct bch_fs_usage *, u64, unsigned);
u64, unsigned); int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *, int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *,
struct disk_reservation *, unsigned); struct disk_reservation *, unsigned);
...@@ -271,7 +272,7 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *, ...@@ -271,7 +272,7 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
void bch2_replicas_delta_list_apply(struct bch_fs *, void bch2_replicas_delta_list_apply(struct bch_fs *,
struct bch_fs_usage *, struct bch_fs_usage *,
struct replicas_delta_list *); struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, bool, s64); int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *, int bch2_trans_mark_update(struct btree_trans *,
struct btree_insert_entry *); struct btree_insert_entry *);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *); void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
......
...@@ -611,17 +611,21 @@ void bch2_stripes_heap_update(struct bch_fs *c, ...@@ -611,17 +611,21 @@ void bch2_stripes_heap_update(struct bch_fs *c,
ec_stripes_heap *h = &c->ec_stripes_heap; ec_stripes_heap *h = &c->ec_stripes_heap;
size_t i; size_t i;
heap_verify_backpointer(c, idx); if (m->alive) {
heap_verify_backpointer(c, idx);
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
i = m->heap_idx; i = m->heap_idx;
heap_sift_up(h, i, ec_stripes_heap_cmp, heap_sift_up(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer); ec_stripes_heap_set_backpointer);
heap_sift_down(h, i, ec_stripes_heap_cmp, heap_sift_down(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer); ec_stripes_heap_set_backpointer);
heap_verify_backpointer(c, idx); heap_verify_backpointer(c, idx);
} else {
bch2_stripes_heap_insert(c, m, idx);
}
if (stripe_idx_to_delete(c) >= 0) if (stripe_idx_to_delete(c) >= 0)
schedule_work(&c->ec_stripe_delete_work); schedule_work(&c->ec_stripe_delete_work);
...@@ -1274,7 +1278,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -1274,7 +1278,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret) for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0, 0); bch2_mark_key(c, k, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
ret = bch2_trans_exit(&trans) ?: ret; ret = bch2_trans_exit(&trans) ?: ret;
if (ret) { if (ret) {
...@@ -1285,7 +1291,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -1285,7 +1291,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, i) for_each_journal_key(*journal_keys, i)
if (i->btree_id == BTREE_ID_EC) if (i->btree_id == BTREE_ID_EC)
bch2_mark_key(c, bkey_i_to_s_c(i->k), bch2_mark_key(c, bkey_i_to_s_c(i->k),
true, 0, NULL, 0, 0); 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
return 0; return 0;
} }
......
...@@ -258,8 +258,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) ...@@ -258,8 +258,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
} while (bkey_cmp(iter->pos, k->k.p) < 0); } while (bkey_cmp(iter->pos, k->k.p) < 0);
if (split_compressed) { if (split_compressed) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), false, ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
-((s64) k->k.size)) ?: -((s64) k->k.size),
BCH_BUCKET_MARK_OVERWRITE) ?:
bch2_trans_commit(&trans, &disk_res, NULL, bch2_trans_commit(&trans, &disk_res, NULL,
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment