Commit 1306f87d authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Plumb btree_trans through btree cache code

Soon, __bch2_btree_node_write() is going to require a btree_trans: zoned
device support is going to require a new allocation for every btree node
write. This is a bit of prep work.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b1cfe5ed
...@@ -561,8 +561,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c) ...@@ -561,8 +561,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
} }
} }
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks) struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{ {
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct list_head *freed = pcpu_read_locks struct list_head *freed = pcpu_read_locks
? &bc->freed_pcpu ? &bc->freed_pcpu
...@@ -673,8 +674,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks) ...@@ -673,8 +674,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
} }
/* Slowpath, don't want it inlined into btree_iter_traverse() */ /* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
const struct bkey_i *k, const struct bkey_i *k,
enum btree_id btree_id, enum btree_id btree_id,
...@@ -682,6 +682,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -682,6 +682,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
enum six_lock_type lock_type, enum six_lock_type lock_type,
bool sync) bool sync)
{ {
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
u32 seq; u32 seq;
...@@ -691,14 +692,14 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -691,14 +692,14 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* Parent node must be locked, else we could read in a btree node that's * Parent node must be locked, else we could read in a btree node that's
* been freed: * been freed:
*/ */
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path); trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
} }
b = bch2_btree_node_mem_alloc(c, level != 0); b = bch2_btree_node_mem_alloc(trans, level != 0);
if (trans && b == ERR_PTR(-ENOMEM)) { if (b == ERR_PTR(-ENOMEM)) {
trans->memory_allocation_failure = true; trans->memory_allocation_failure = true;
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path); trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
...@@ -744,7 +745,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -744,7 +745,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (!sync) if (!sync)
return NULL; return NULL;
if (trans) { if (path) {
int ret = bch2_trans_relock(trans) ?: int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path); bch2_btree_path_relock_intent(trans, path);
if (ret) { if (ret) {
...@@ -754,7 +755,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -754,7 +755,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
} }
if (!six_relock_type(&b->c.lock, lock_type, seq)) { if (!six_relock_type(&b->c.lock, lock_type, seq)) {
if (trans) if (path)
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
} }
...@@ -820,7 +821,7 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr ...@@ -820,7 +821,7 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
* else we could read in a btree node from disk that's been * else we could read in a btree node from disk that's been
* freed: * freed:
*/ */
b = bch2_btree_node_fill(c, trans, path, k, path->btree_id, b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true); level, lock_type, true);
/* We raced and found the btree node in the cache */ /* We raced and found the btree node in the cache */
...@@ -1029,7 +1030,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, ...@@ -1029,7 +1030,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
if (nofill) if (nofill)
goto out; goto out;
b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id, b = bch2_btree_node_fill(trans, NULL, k, btree_id,
level, SIX_LOCK_read, true); level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */ /* We raced and found the btree node in the cache */
...@@ -1089,12 +1090,12 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, ...@@ -1089,12 +1090,12 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
return b; return b;
} }
int bch2_btree_node_prefetch(struct bch_fs *c, int bch2_btree_node_prefetch(struct btree_trans *trans,
struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
const struct bkey_i *k, const struct bkey_i *k,
enum btree_id btree_id, unsigned level) enum btree_id btree_id, unsigned level)
{ {
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
...@@ -1105,7 +1106,7 @@ int bch2_btree_node_prefetch(struct bch_fs *c, ...@@ -1105,7 +1106,7 @@ int bch2_btree_node_prefetch(struct bch_fs *c,
if (b) if (b)
return 0; return 0;
b = bch2_btree_node_fill(c, trans, path, k, btree_id, b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false); level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b); return PTR_ERR_OR_ZERO(b);
} }
......
...@@ -21,7 +21,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *); ...@@ -21,7 +21,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *); int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *, bool); struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *, struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
const struct bkey_i *, unsigned, const struct bkey_i *, unsigned,
...@@ -30,7 +30,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *, ...@@ -30,7 +30,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *, struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
enum btree_id, unsigned, bool); enum btree_id, unsigned, bool);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, struct btree_path *, int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
const struct bkey_i *, enum btree_id, unsigned); const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *); void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
......
...@@ -1610,9 +1610,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ...@@ -1610,9 +1610,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
} }
} }
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
const struct bkey_i *k, unsigned level) const struct bkey_i *k, unsigned level)
{ {
struct bch_fs *c = trans->c;
struct closure cl; struct closure cl;
struct btree *b; struct btree *b;
int ret; int ret;
...@@ -1624,7 +1625,7 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, ...@@ -1624,7 +1625,7 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
closure_sync(&cl); closure_sync(&cl);
} while (ret); } while (ret);
b = bch2_btree_node_mem_alloc(c, level != 0); b = bch2_btree_node_mem_alloc(trans, level != 0);
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(c);
BUG_ON(IS_ERR(b)); BUG_ON(IS_ERR(b));
...@@ -1655,6 +1656,13 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, ...@@ -1655,6 +1656,13 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
return ret; return ret;
} }
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
const struct bkey_i *k, unsigned level)
{
return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level));
}
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w) struct btree_write *w)
{ {
......
...@@ -815,7 +815,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat ...@@ -815,7 +815,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
break; break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k); bch2_bkey_buf_unpack(&tmp, c, l->b, k);
ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id, ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1); path->level - 1);
} }
...@@ -850,7 +850,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p ...@@ -850,7 +850,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p
break; break;
bch2_bkey_buf_reassemble(&tmp, c, k); bch2_bkey_buf_reassemble(&tmp, c, k);
ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id, ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1); path->level - 1);
} }
......
...@@ -300,7 +300,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -300,7 +300,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
bch2_open_bucket_get(c, wp, &ob); bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp); bch2_alloc_sectors_done(c, wp);
mem_alloc: mem_alloc:
b = bch2_btree_node_mem_alloc(c, interior_node); b = bch2_btree_node_mem_alloc(trans, interior_node);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
...@@ -2261,7 +2261,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2261,7 +2261,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
return ret; return ret;
} }
new_hash = bch2_btree_node_mem_alloc(c, false); new_hash = bch2_btree_node_mem_alloc(trans, false);
} }
path->intent_ref++; path->intent_ref++;
...@@ -2324,8 +2324,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) ...@@ -2324,8 +2324,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
bch2_btree_set_root_inmem(c, b); bch2_btree_set_root_inmem(c, b);
} }
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
{ {
struct bch_fs *c = trans->c;
struct closure cl; struct closure cl;
struct btree *b; struct btree *b;
int ret; int ret;
...@@ -2337,7 +2338,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) ...@@ -2337,7 +2338,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
closure_sync(&cl); closure_sync(&cl);
} while (ret); } while (ret);
b = bch2_btree_node_mem_alloc(c, false); b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b); set_btree_node_fake(b);
...@@ -2366,6 +2367,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) ...@@ -2366,6 +2367,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
return 0;
}
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
{
bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id));
} }
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment