Commit e9174370 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch2_btree_node_relock_notrace()

Most of the node_relock_fail trace events are generated from
bch2_btree_path_verify_level(), when debugcheck_iterators is enabled -
but we're not interested in these trace events, they don't indicate that
we're in a slowpath.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c36ff038
...@@ -167,7 +167,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, ...@@ -167,7 +167,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
if (!btree_path_node(path, level)) if (!btree_path_node(path, level))
return; return;
if (!bch2_btree_node_relock(trans, path, level)) if (!bch2_btree_node_relock_notrace(trans, path, level))
return; return;
BUG_ON(!btree_path_pos_in_node(path, l->b)); BUG_ON(!btree_path_pos_in_node(path, l->b));
......
...@@ -401,7 +401,8 @@ static inline bool btree_path_get_locks(struct btree_trans *trans, ...@@ -401,7 +401,8 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
} }
bool __bch2_btree_node_relock(struct btree_trans *trans, bool __bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level) struct btree_path *path, unsigned level,
bool trace)
{ {
struct btree *b = btree_path_node(path, level); struct btree *b = btree_path_node(path, level);
int want = __btree_lock_want(path, level); int want = __btree_lock_want(path, level);
...@@ -416,6 +417,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, ...@@ -416,6 +417,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
return true; return true;
} }
fail: fail:
if (trace)
trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
return false; return false;
} }
......
...@@ -317,7 +317,7 @@ static inline int bch2_btree_path_relock(struct btree_trans *trans, ...@@ -317,7 +317,7 @@ static inline int bch2_btree_path_relock(struct btree_trans *trans,
: __bch2_btree_path_relock(trans, path, trace_ip); : __bch2_btree_path_relock(trans, path, trace_ip);
} }
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned); bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
static inline bool bch2_btree_node_relock(struct btree_trans *trans, static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level) struct btree_path *path, unsigned level)
...@@ -328,7 +328,19 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans, ...@@ -328,7 +328,19 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
return likely(btree_node_locked(path, level)) || return likely(btree_node_locked(path, level)) ||
(!IS_ERR_OR_NULL(path->l[level].b) && (!IS_ERR_OR_NULL(path->l[level].b) &&
__bch2_btree_node_relock(trans, path, level)); __bch2_btree_node_relock(trans, path, level, true));
}
static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(path, level) &&
!btree_node_write_locked(path, level) &&
btree_node_locked_type(path, level) != __btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) ||
(!IS_ERR_OR_NULL(path->l[level].b) &&
__bch2_btree_node_relock(trans, path, level, false));
} }
/* upgrade */ /* upgrade */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment