Commit 49e401fa authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Tracepoint improvements

 - use strlcpy(), not strncpy()
 - add tracepoints for btree_path alloc and free
 - give the tracepoint for key cache upgrade fail a proper name
 - add a tracepoint for btree_node_upgrade_fail
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 17047fbc
......@@ -132,15 +132,37 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans,
bch2_btree_node_unlock_write_inlined(trans, path, b);
}
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
static struct six_lock_count btree_node_lock_counts(struct btree_trans *trans,
struct btree_path *skip,
struct btree *b,
unsigned level)
{
struct btree_path *linked;
unsigned readers = 0;
struct btree_path *path;
struct six_lock_count ret = { 0, 0 };
if ((unsigned long) b < 128)
return ret;
trans_for_each_path(trans, linked)
if (linked->l[b->c.level].b == b &&
btree_node_read_locked(linked, b->c.level))
readers++;
trans_for_each_path(trans, path)
if (path != skip && path->l[level].b == b) {
ret.read += btree_node_read_locked(path, level);
ret.intent += btree_node_intent_locked(path, level);
}
return ret;
}
static inline void six_lock_readers_add(struct six_lock *lock, int nr)
{
if (!lock->readers)
atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
else
this_cpu_add(*lock->readers, nr);
}
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{
int readers = btree_node_lock_counts(trans, NULL, b, b->c.level).read;
/*
* Must drop our read locks before calling six_lock_write() -
......@@ -148,19 +170,9 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
* goes to 0, and it's safe because we have the node intent
* locked:
*/
if (!b->c.lock.readers)
atomic64_sub(__SIX_VAL(read_lock, readers),
&b->c.lock.state.counter);
else
this_cpu_sub(*b->c.lock.readers, readers);
six_lock_readers_add(&b->c.lock, -readers);
six_lock_write(&b->c.lock, NULL, NULL);
if (!b->c.lock.readers)
atomic64_add(__SIX_VAL(read_lock, readers),
&b->c.lock.state.counter);
else
this_cpu_add(*b->c.lock.readers, readers);
six_lock_readers_add(&b->c.lock, readers);
}
bool __bch2_btree_node_relock(struct btree_trans *trans,
......@@ -229,6 +241,12 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
goto success;
}
trace_btree_node_upgrade_fail(trans->fn, _RET_IP_,
path->btree_id,
&path->pos,
btree_node_locked(path, level),
btree_node_lock_counts(trans, NULL, b, level),
six_lock_counts(&b->c.lock));
return false;
success:
mark_btree_node_intent_locked(trans, path, level);
......@@ -1800,6 +1818,7 @@ static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btr
static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
{
trace_btree_path_free(trans->fn, _RET_IP_, path->btree_id, &path->pos);
__bch2_btree_path_unlock(trans, path);
btree_path_list_remove(trans, path);
trans->paths_allocated &= ~(1ULL << path->idx);
......@@ -1975,6 +1994,8 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
__btree_path_get(path_pos, intent);
path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
} else {
trace_btree_path_alloc(trans->fn, _RET_IP_, btree_id, &pos, locks_want);
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
......
......@@ -414,9 +414,13 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
path->l[0].b = (void *) ck;
fill:
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
/*
* Using the underscore version because we haven't set
* path->uptodate yet:
*/
if (!path->locks_want &&
!__bch2_btree_path_upgrade(trans, path, 1)) {
trace_transaction_restart_ip(trans->fn, _THIS_IP_);
trace_transaction_restart_key_cache_upgrade(trans->fn, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
goto err;
}
......
......@@ -401,7 +401,7 @@ TRACE_EVENT(btree_node_relock_fail,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
......@@ -424,6 +424,59 @@ TRACE_EVENT(btree_node_relock_fail,
__entry->node_lock_seq)
);
TRACE_EVENT(btree_node_upgrade_fail,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
bool locked,
struct six_lock_count self_lock_count,
struct six_lock_count lock_count),
TP_ARGS(trans_fn, caller_ip, btree_id, pos,
locked, self_lock_count, lock_count),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
__field(u8, locked )
__field(u8, self_read_count )
__field(u8, read_count )
__field(u8, self_intent_count)
__field(u8, intent_count )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
__entry->locked = locked;
__entry->self_read_count = self_lock_count.read;
__entry->self_intent_count = self_lock_count.intent;
__entry->read_count = lock_count.read;
__entry->intent_count = lock_count.intent;
),
TP_printk("%s %pS btree %u pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->locked,
__entry->self_read_count,
__entry->self_intent_count,
__entry->read_count,
__entry->intent_count)
);
/* Garbage collection */
DEFINE_EVENT(bch_fs, gc_gens_start,
......@@ -688,7 +741,7 @@ DECLARE_EVENT_CLASS(transaction_event,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
),
......@@ -701,12 +754,6 @@ DEFINE_EVENT(transaction_event, transaction_commit,
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_event, transaction_restart_ip,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_event, transaction_restart_injected,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
......@@ -784,7 +831,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
......@@ -865,6 +912,12 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_event, transaction_restart_key_cache_upgrade,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
......@@ -939,7 +992,7 @@ TRACE_EVENT(trans_restart_would_deadlock,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->in_traverse_all = in_traverse_all;
__entry->reason = reason;
......@@ -983,7 +1036,7 @@ TRACE_EVENT(trans_restart_would_deadlock_write,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
),
TP_printk("%s", __entry->trans_fn)
......@@ -1002,7 +1055,7 @@ TRACE_EVENT(trans_restart_mem_realloced,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->bytes = bytes;
),
......@@ -1034,7 +1087,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->inode = pos->inode;
......@@ -1055,6 +1108,52 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
__entry->new_u64s)
);
TRACE_EVENT(btree_path_alloc,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
unsigned locks_want),
TP_ARGS(trans_fn, caller_ip, btree_id, pos, locks_want),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, locks_want )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->locks_want = locks_want;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
),
TP_printk("%s %pS btree %u locks_want %u pos %llu:%llu:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
__entry->locks_want,
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
);
DEFINE_EVENT(transaction_restart_iter, btree_path_free,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
#endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment