Commit 52483bc2 authored by Qu Wenruo's avatar Qu Wenruo Committed by Josef Bacik

btrfs: Add ftrace for btrfs_workqueue

Add ftrace for btrfs_workqueue for further workqueue tunning.
This patch needs to applied after the workqueue replace patchset.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent 6db8914f
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "async-thread.h" #include "async-thread.h"
#include "ctree.h"
#define WORK_DONE_BIT 0 #define WORK_DONE_BIT 0
#define WORK_ORDER_DONE_BIT 1 #define WORK_ORDER_DONE_BIT 1
...@@ -210,6 +211,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) ...@@ -210,6 +211,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
*/ */
if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
break; break;
trace_btrfs_ordered_sched(work);
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
work->ordered_func(work); work->ordered_func(work);
...@@ -223,6 +225,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) ...@@ -223,6 +225,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
* with the lock held though * with the lock held though
*/ */
work->ordered_free(work); work->ordered_free(work);
trace_btrfs_all_work_done(work);
} }
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
} }
...@@ -246,12 +249,15 @@ static void normal_work_helper(struct work_struct *arg) ...@@ -246,12 +249,15 @@ static void normal_work_helper(struct work_struct *arg)
need_order = 1; need_order = 1;
wq = work->wq; wq = work->wq;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq); thresh_exec_hook(wq);
work->func(work); work->func(work);
if (need_order) { if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags); set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq); run_ordered_work(wq);
} }
if (!need_order)
trace_btrfs_all_work_done(work);
} }
void btrfs_init_work(struct btrfs_work *work, void btrfs_init_work(struct btrfs_work *work,
...@@ -280,6 +286,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, ...@@ -280,6 +286,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
spin_unlock_irqrestore(&wq->list_lock, flags); spin_unlock_irqrestore(&wq->list_lock, flags);
} }
queue_work(wq->normal_wq, &work->normal_work); queue_work(wq->normal_wq, &work->normal_work);
trace_btrfs_work_queued(work);
} }
void btrfs_queue_work(struct btrfs_workqueue *wq, void btrfs_queue_work(struct btrfs_workqueue *wq,
......
...@@ -21,6 +21,7 @@ struct btrfs_block_group_cache; ...@@ -21,6 +21,7 @@ struct btrfs_block_group_cache;
struct btrfs_free_cluster; struct btrfs_free_cluster;
struct map_lookup; struct map_lookup;
struct extent_buffer; struct extent_buffer;
struct btrfs_work;
#define show_ref_type(type) \ #define show_ref_type(type) \
__print_symbolic(type, \ __print_symbolic(type, \
...@@ -982,6 +983,87 @@ TRACE_EVENT(free_extent_state, ...@@ -982,6 +983,87 @@ TRACE_EVENT(free_extent_state,
(void *)__entry->ip) (void *)__entry->ip)
); );
DECLARE_EVENT_CLASS(btrfs__work,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, wq )
__field( void *, func )
__field( void *, ordered_func )
__field( void *, ordered_free )
),
TP_fast_assign(
__entry->work = work;
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
),
TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
__entry->work, __entry->wq, __entry->func,
__entry->ordered_func, __entry->ordered_free)
);
/* For situiations that the work is freed */
DECLARE_EVENT_CLASS(btrfs__work__done,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
),
TP_fast_assign(
__entry->work = work;
),
TP_printk("work->%p", __entry->work)
);
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work)
);
DEFINE_EVENT(btrfs__work, btrfs_work_sched,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work)
);
DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work)
);
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work)
);
DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
TP_PROTO(struct btrfs_work *work),
TP_ARGS(work)
);
#endif /* _TRACE_BTRFS_H */ #endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment