Commit 3187aa8d authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Don't use BTREE_INSERT_USE_RESERVE so much

Previously, we were using BTREE_INSERT_RESERVE in a lot of places where
it no longer makes sense.

 - we now have more open_buckets than we used to, and the reserves work
   better, so we shouldn't need to use BTREE_INSERT_RESERVE just because
   we're holding open_buckets pinned anymore.

 - We have the btree key cache for updates to the alloc btree, meaning
   we no longer need the btree reserve to ensure the allocator can make
   forward progress.

This means that we should only need a reserve for btree updates to
ensure that copygc can make forward progress.

Since it's now just for copygc, we can also fold RESERVE_BTREE into
RESERVE_MOVINGGC (the allocator's freelist reserve).
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent f0e70018
......@@ -319,9 +319,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
bch2_trans_update(trans, iter, &a->k_i,
BTREE_TRIGGER_NORUN);
ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
flags);
BTREE_INSERT_NOFAIL|flags);
err:
if (ret == -EINTR)
goto retry;
......@@ -575,8 +573,7 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
if (available > fifo_free(&ca->free_inc) ||
(available &&
(!fifo_full(&ca->free[RESERVE_BTREE]) ||
!fifo_full(&ca->free[RESERVE_MOVINGGC]))))
!fifo_full(&ca->free[RESERVE_MOVINGGC])))
break;
up_read(&c->gc_lock);
......@@ -977,8 +974,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_JOURNAL_RESERVED|
flags);
if (ret == -EINTR)
goto retry;
......
......@@ -204,10 +204,8 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
{
switch (reserve) {
case RESERVE_ALLOC:
case RESERVE_MOVINGGC:
return 0;
case RESERVE_BTREE:
return OPEN_BUCKETS_COUNT / 4;
default:
return OPEN_BUCKETS_COUNT / 2;
}
......@@ -263,16 +261,6 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
goto out;
switch (reserve) {
case RESERVE_ALLOC:
if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
goto out;
break;
case RESERVE_BTREE:
if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
ca->free[RESERVE_BTREE].size &&
fifo_pop(&ca->free[RESERVE_BTREE], bucket))
goto out;
break;
case RESERVE_MOVINGGC:
if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
goto out;
......
......@@ -37,11 +37,9 @@ struct bucket_clock {
/* There is one reserve for each type of btree, one for prios and gens
* and one for moving GC */
enum alloc_reserve {
RESERVE_ALLOC = -1,
RESERVE_BTREE = 0,
RESERVE_MOVINGGC = 1,
RESERVE_NONE = 2,
RESERVE_NR = 3,
RESERVE_MOVINGGC = 0,
RESERVE_NONE = 1,
RESERVE_NR = 2,
};
typedef FIFO(long) alloc_fifo;
......
......@@ -233,7 +233,6 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (max_stale > 64)
bch2_btree_node_rewrite(c, iter,
b->data->keys.seq,
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!bch2_btree_gc_rewrite_disabled &&
......
......@@ -350,8 +350,6 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_JOURNAL_RESERVED|
BTREE_INSERT_JOURNAL_RECLAIM);
err:
......
......@@ -20,7 +20,6 @@ enum btree_insert_flags {
__BTREE_INSERT_NOCHECK_RW,
__BTREE_INSERT_LAZY_RW,
__BTREE_INSERT_USE_RESERVE,
__BTREE_INSERT_USE_ALLOC_RESERVE,
__BTREE_INSERT_JOURNAL_REPLAY,
__BTREE_INSERT_JOURNAL_RESERVED,
__BTREE_INSERT_JOURNAL_RECLAIM,
......@@ -43,7 +42,6 @@ enum btree_insert_flags {
/* for copygc, or when merging btree nodes */
#define BTREE_INSERT_USE_RESERVE (1 << __BTREE_INSERT_USE_RESERVE)
#define BTREE_INSERT_USE_ALLOC_RESERVE (1 << __BTREE_INSERT_USE_ALLOC_RESERVE)
/* Insert is for journal replay - don't get journal reservations: */
#define BTREE_INSERT_JOURNAL_REPLAY (1 << __BTREE_INSERT_JOURNAL_REPLAY)
......
......@@ -201,12 +201,9 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
unsigned nr_reserve;
enum alloc_reserve alloc_reserve;
if (flags & BTREE_INSERT_USE_ALLOC_RESERVE) {
if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0;
alloc_reserve = RESERVE_ALLOC;
} else if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = BTREE_NODE_RESERVE / 2;
alloc_reserve = RESERVE_BTREE;
alloc_reserve = RESERVE_MOVINGGC;
} else {
nr_reserve = BTREE_NODE_RESERVE;
alloc_reserve = RESERVE_NONE;
......@@ -577,8 +574,6 @@ static void btree_update_nodes_written(struct btree_update *as)
bch2_trans_init(&trans, c, 0, 512);
ret = __bch2_trans_do(&trans, &as->disk_res, &journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_JOURNAL_RECLAIM|
BTREE_INSERT_JOURNAL_RESERVED,
......@@ -1457,15 +1452,6 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
struct btree_update *as;
struct closure cl;
int ret = 0;
struct btree_insert_entry *i;
/*
* We already have a disk reservation and open buckets pinned; this
* allocation must not block:
*/
trans_for_each_update(trans, i)
if (btree_node_type_needs_gc(i->iter->btree_id))
flags |= BTREE_INSERT_USE_RESERVE;
closure_init_stack(&cl);
......@@ -1926,10 +1912,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
retry:
as = bch2_btree_update_start(iter->trans, iter->btree_id,
parent ? btree_update_reserve_required(c, parent) : 0,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE,
&cl);
BTREE_INSERT_NOFAIL, &cl);
if (IS_ERR(as)) {
ret = PTR_ERR(as);
......
......@@ -1084,8 +1084,7 @@ int bch2_btree_delete_at(struct btree_trans *trans,
bch2_trans_update(trans, iter, &k, 0);
return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags);
BTREE_INSERT_NOFAIL|flags);
}
int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
......
......@@ -2186,7 +2186,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
ca->mi.bucket_size / c->opts.btree_node_size);
/* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
btree_reserve * 2);
bool resize = ca->buckets[0] != NULL;
......@@ -2203,7 +2203,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) ||
!init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
!init_fifo(&free[RESERVE_MOVINGGC],
copygc_reserve, GFP_KERNEL) ||
!init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
......
......@@ -800,8 +800,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
bch2_trans_update(&trans, iter, sk.k, 0);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
BTREE_INSERT_NOFAIL);
if (ret == -EINTR)
ret = 0;
if (ret)
......
......@@ -330,8 +330,7 @@ int bch2_extent_update(struct btree_trans *trans,
ret = bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
BTREE_INSERT_NOFAIL);
if (ret)
return ret;
......
......@@ -776,7 +776,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
}
} else {
rcu_read_lock();
ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
false, cl);
rcu_read_unlock();
if (IS_ERR(ob)) {
......
......@@ -167,7 +167,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
ret = bch2_trans_commit(&trans, &op->res,
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
m->data_opts.btree_insert_flags);
if (!ret)
atomic_long_inc(&c->extent_migrate_done);
......
......@@ -200,6 +200,11 @@ static int bch2_copygc(struct bch_fs *c)
return -1;
}
/*
* Our btree node allocations also come out of RESERVE_MOVINGGC:
*/
sectors_to_move = (sectors_to_move * 3) / 4;
for (i = h->data; i < h->data + h->used; i++)
sectors_to_move += i->sectors * i->replicas;
......
......@@ -798,7 +798,6 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
pr_buf(out,
"free_inc: %zu/%zu\n"
"free[RESERVE_BTREE]: %zu/%zu\n"
"free[RESERVE_MOVINGGC]: %zu/%zu\n"
"free[RESERVE_NONE]: %zu/%zu\n"
"buckets:\n"
......@@ -826,7 +825,6 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
"open_buckets_user: %u\n"
"btree reserve cache: %u\n",
fifo_used(&ca->free_inc), ca->free_inc.size,
fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
ca->mi.nbuckets - ca->mi.first_bucket,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment