Commit c30402e5 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: btree_path_cached_set()

new helper - small refactoring
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 71fdc0b5
...@@ -39,6 +39,15 @@ static const struct rhashtable_params bch2_btree_key_cache_params = { ...@@ -39,6 +39,15 @@ static const struct rhashtable_params bch2_btree_key_cache_params = {
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path,
struct bkey_cached *ck,
enum btree_node_locked_type lock_held)
{
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
mark_btree_node_locked(trans, path, 0, lock_held);
}
__flatten __flatten
inline struct bkey_cached * inline struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos) bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
...@@ -259,9 +268,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, ...@@ -259,9 +268,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
path->l[0].b = (void *) ck; btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
ret = bch2_btree_node_lock_write(trans, path, &ck->c); ret = bch2_btree_node_lock_write(trans, path, &ck->c);
if (unlikely(ret)) { if (unlikely(ret)) {
...@@ -489,7 +496,7 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree ...@@ -489,7 +496,7 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree
if (!ck) if (!ck)
goto retry; goto retry;
mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED); btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
path->locks_want = 1; path->locks_want = 1;
} else { } else {
enum six_lock_type lock_want = __btree_lock_want(path, 0); enum six_lock_type lock_want = __btree_lock_want(path, 0);
...@@ -507,12 +514,8 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree ...@@ -507,12 +514,8 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree
goto retry; goto retry;
} }
mark_btree_node_locked(trans, path, 0, btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
(enum btree_node_locked_type) lock_want);
} }
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
fill: fill:
path->uptodate = BTREE_ITER_UPTODATE; path->uptodate = BTREE_ITER_UPTODATE;
...@@ -559,30 +562,25 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -559,30 +562,25 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
} }
retry: retry:
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
if (!ck) { if (!ck)
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags); return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
} else {
enum six_lock_type lock_want = __btree_lock_want(path, 0);
ret = btree_node_lock(trans, path, (void *) ck, 0, enum six_lock_type lock_want = __btree_lock_want(path, 0);
lock_want, _THIS_IP_);
EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
if (ret) ret = btree_node_lock(trans, path, (void *) ck, 0,
return ret; lock_want, _THIS_IP_);
EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
if (ck->key.btree_id != path->btree_id || if (ret)
!bpos_eq(ck->key.pos, path->pos)) { return ret;
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
mark_btree_node_locked(trans, path, 0, if (ck->key.btree_id != path->btree_id ||
(enum btree_node_locked_type) lock_want); !bpos_eq(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
} }
path->l[0].lock_seq = six_lock_seq(&ck->c.lock); btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
path->l[0].b = (void *) ck;
fill: fill:
if (!ck->valid) if (!ck->valid)
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags); return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment