Commit 6820ac2c authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: move bch2_mark_alloc() to alloc_background.c

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6cacd0c4
...@@ -843,6 +843,114 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, ...@@ -843,6 +843,114 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
return 0; return 0;
} }
int bch2_mark_alloc(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
if ((flags & BTREE_TRIGGER_GC) &&
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
"alloc key for invalid device or bucket"))
return -EIO;
ca = bch_dev_bkey_exists(c, new.k->p.inode);
old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
}
if (!data_type_is_empty(old_a->data_type) &&
data_type_is_empty(new_a->data_type) &&
bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
return ret;
}
}
percpu_down_read(&c->mark_lock);
if (!gc && new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a->gen;
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_a->gen;
g->data_type = new_a->data_type;
g->stripe = new_a->stripe;
g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
percpu_up_read(&c->mark_lock);
if (new_a->data_type == BCH_DATA_free &&
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if (new_a->data_type == BCH_DATA_need_discard &&
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
if (old_a->data_type != BCH_DATA_cached &&
new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;
}
/* /*
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
* extents style btrees, but works on non-extents btrees: * extents style btrees, but works on non-extents btrees:
......
...@@ -234,6 +234,8 @@ int bch2_alloc_read(struct bch_fs *); ...@@ -234,6 +234,8 @@ int bch2_alloc_read(struct bch_fs *);
int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned, int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned); struct bkey_s_c, struct bkey_s, unsigned);
int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *);
void bch2_do_discards(struct bch_fs *); void bch2_do_discards(struct bch_fs *);
......
...@@ -296,9 +296,9 @@ void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage) ...@@ -296,9 +296,9 @@ void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
} }
} }
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
struct bch_alloc_v4 old, const struct bch_alloc_v4 *old,
struct bch_alloc_v4 new, const struct bch_alloc_v4 *new,
u64 journal_seq, bool gc) u64 journal_seq, bool gc)
{ {
struct bch_fs_usage *fs_usage; struct bch_fs_usage *fs_usage;
...@@ -307,24 +307,24 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, ...@@ -307,24 +307,24 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
preempt_disable(); preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc); fs_usage = fs_usage_ptr(c, journal_seq, gc);
if (data_type_is_hidden(old.data_type)) if (data_type_is_hidden(old->data_type))
fs_usage->hidden -= ca->mi.bucket_size; fs_usage->hidden -= ca->mi.bucket_size;
if (data_type_is_hidden(new.data_type)) if (data_type_is_hidden(new->data_type))
fs_usage->hidden += ca->mi.bucket_size; fs_usage->hidden += ca->mi.bucket_size;
u = dev_usage_ptr(ca, journal_seq, gc); u = dev_usage_ptr(ca, journal_seq, gc);
u->d[old.data_type].buckets--; u->d[old->data_type].buckets--;
u->d[new.data_type].buckets++; u->d[new->data_type].buckets++;
u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old); u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old);
u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new); u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new);
u->d[BCH_DATA_cached].sectors += new.cached_sectors; u->d[BCH_DATA_cached].sectors += new->cached_sectors;
u->d[BCH_DATA_cached].sectors -= old.cached_sectors; u->d[BCH_DATA_cached].sectors -= old->cached_sectors;
u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old); u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old);
u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new); u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new);
preempt_enable(); preempt_enable();
} }
...@@ -343,10 +343,10 @@ static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) ...@@ -343,10 +343,10 @@ static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca, static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
struct bucket old, struct bucket new) struct bucket old, struct bucket new)
{ {
bch2_dev_usage_update(c, ca, struct bch_alloc_v4 old_a = bucket_m_to_alloc(old);
bucket_m_to_alloc(old), struct bch_alloc_v4 new_a = bucket_m_to_alloc(new);
bucket_m_to_alloc(new),
0, true); bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true);
} }
static inline int __update_replicas(struct bch_fs *c, static inline int __update_replicas(struct bch_fs *c,
...@@ -496,114 +496,6 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 ...@@ -496,114 +496,6 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
return bch2_update_replicas_list(trans, &r.e, sectors); return bch2_update_replicas_list(trans, &r.e, sectors);
} }
int bch2_mark_alloc(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
if ((flags & BTREE_TRIGGER_GC) &&
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
"alloc key for invalid device or bucket"))
return -EIO;
ca = bch_dev_bkey_exists(c, new.k->p.inode);
old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
}
if (!data_type_is_empty(old_a->data_type) &&
data_type_is_empty(new_a->data_type) &&
bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
return ret;
}
}
percpu_down_read(&c->mark_lock);
if (!gc && new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a->gen;
bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_a->gen;
g->data_type = new_a->data_type;
g->stripe = new_a->stripe;
g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
percpu_up_read(&c->mark_lock);
if (new_a->data_type == BCH_DATA_free &&
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if (new_a->data_type == BCH_DATA_need_discard &&
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
if (old_a->data_type != BCH_DATA_cached &&
new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;
}
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, enum bch_data_type data_type, size_t b, enum bch_data_type data_type,
unsigned sectors, struct gc_pos pos, unsigned sectors, struct gc_pos pos,
......
...@@ -302,6 +302,10 @@ u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *); ...@@ -302,6 +302,10 @@ u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
struct bch_fs_usage_short struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *); bch2_fs_usage_read_short(struct bch_fs *);
void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
const struct bch_alloc_v4 *,
const struct bch_alloc_v4 *, u64, bool);
/* key/bucket marking: */ /* key/bucket marking: */
static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c, static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
...@@ -327,8 +331,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, ...@@ -327,8 +331,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned, size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned); struct gc_pos, unsigned);
int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned, int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned); struct bkey_s_c, struct bkey_s, unsigned);
int bch2_mark_stripe(struct btree_trans *, enum btree_id, unsigned, int bch2_mark_stripe(struct btree_trans *, enum btree_id, unsigned,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment