Commit 8eb7f3ee authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: move dirty into bucket_mark

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 90541a74
......@@ -185,9 +185,9 @@ static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a)
g->_mark.cached_sectors = get_alloc_field(a, &d, idx++);
}
static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g)
static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
struct bucket_mark m)
{
struct bucket_mark m = READ_ONCE(g->mark);
unsigned idx = 0;
void *d = a->v.data;
......@@ -280,6 +280,8 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
__BKEY_PADDED(k, 8) alloc_key;
#endif
struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k);
struct bucket *g;
struct bucket_mark m;
int ret;
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
......@@ -287,7 +289,10 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
a->k.p = POS(ca->dev_idx, b);
percpu_down_read(&c->usage_lock);
__alloc_write_key(a, bucket(ca, b));
g = bucket(ca, b);
m = bucket_cmpxchg(g, m, m.dirty = false);
__alloc_write_key(a, g, m);
percpu_up_read(&c->usage_lock);
bch2_btree_iter_cond_resched(iter);
......@@ -350,19 +355,24 @@ int bch2_alloc_write(struct bch_fs *c)
for_each_rw_member(ca, c, i) {
struct btree_iter iter;
unsigned long bucket;
struct bucket_array *buckets;
size_t b;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
down_read(&ca->bucket_lock);
for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
ret = __bch2_alloc_write_key(c, ca, bucket,
&iter, NULL, 0);
buckets = bucket_array(ca);
for (b = buckets->first_bucket;
b < buckets->nbuckets;
b++) {
if (!buckets->b[b].mark.dirty)
continue;
ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL, 0);
if (ret)
break;
clear_bit(bucket, ca->buckets_dirty);
}
up_read(&ca->bucket_lock);
bch2_btree_iter_unlock(&iter);
......@@ -541,6 +551,10 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
if (!is_available_bucket(mark))
return false;
if (ca->buckets_nouse &&
test_bit(bucket, ca->buckets_nouse))
return false;
gc_gen = bucket_gc_gen(ca, bucket);
if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
......@@ -1340,6 +1354,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
m = READ_ONCE(buckets->b[bu].mark);
if (!buckets->b[bu].gen_valid ||
!test_bit(bu, ca->buckets_nouse) ||
!is_available_bucket(m) ||
m.cached_sectors)
continue;
......@@ -1378,7 +1393,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
bch2_invalidate_one_bucket(c, ca, bu, &journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu);
set_bit(bu, ca->buckets_dirty);
bucket_set_dirty(ca, bu);
}
}
......
......@@ -395,7 +395,7 @@ struct bch_dev {
* Or rcu_read_lock(), but only for ptr_stale():
*/
struct bucket_array __rcu *buckets[2];
unsigned long *buckets_dirty;
unsigned long *buckets_nouse;
unsigned long *buckets_written;
/* most out of date gen in the btree */
u8 *oldest_gens;
......
......@@ -150,7 +150,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
set_bit(b, ca->buckets_dirty);
bucket_set_dirty(ca, b);
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
......@@ -158,7 +158,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
set_bit(b, ca->buckets_dirty);
bucket_set_dirty(ca, b);
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
}
......
......@@ -1132,7 +1132,7 @@ static void buckets_free_rcu(struct rcu_head *rcu)
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
struct bucket_array *buckets = NULL, *old_buckets = NULL;
unsigned long *buckets_dirty = NULL;
unsigned long *buckets_nouse = NULL;
unsigned long *buckets_written = NULL;
u8 *oldest_gens = NULL;
alloc_fifo free[RESERVE_NR];
......@@ -1162,7 +1162,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
GFP_KERNEL|__GFP_ZERO)) ||
!(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) *
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) *
......@@ -1199,8 +1199,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
memcpy(oldest_gens,
ca->oldest_gens,
n * sizeof(u8));
memcpy(buckets_dirty,
ca->buckets_dirty,
memcpy(buckets_nouse,
ca->buckets_nouse,
BITS_TO_LONGS(n) * sizeof(unsigned long));
memcpy(buckets_written,
ca->buckets_written,
......@@ -1211,7 +1211,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
buckets = old_buckets;
swap(ca->oldest_gens, oldest_gens);
swap(ca->buckets_dirty, buckets_dirty);
swap(ca->buckets_nouse, buckets_nouse);
swap(ca->buckets_written, buckets_written);
if (resize)
......@@ -1250,7 +1250,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&free[i]);
kvpfree(buckets_dirty,
kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(buckets_written,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
......@@ -1273,7 +1273,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
free_fifo(&ca->free[i]);
kvpfree(ca->buckets_written,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->buckets_dirty,
kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
......
......@@ -57,6 +57,18 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
return __bucket(ca, b, false);
}
static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
{
struct bucket *g;
struct bucket_mark m;
rcu_read_lock();
g = bucket(ca, b);
bucket_cmpxchg(g, m, m.dirty = true);
rcu_read_unlock();
}
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw)
{
......@@ -196,8 +208,7 @@ static inline bool is_available_bucket(struct bucket_mark mark)
{
return (!mark.owned_by_allocator &&
!mark.dirty_sectors &&
!mark.stripe &&
!mark.nouse);
!mark.stripe);
}
static inline bool bucket_needs_journal_commit(struct bucket_mark m,
......
......@@ -15,7 +15,7 @@ struct bucket_mark {
u8 gen;
u8 data_type:3,
owned_by_allocator:1,
nouse:1,
dirty:1,
journal_seq_valid:1,
stripe:1;
u16 dirty_sectors;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment