Commit c73464b1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Fix trace_sched_switch()

__trace_sched_switch_state() is the last remaining PREEMPT_ACTIVE
user, move trace_sched_switch() from prepare_task_switch() to
__schedule() and propagate the @preempt argument.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fc13aeba
...@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, ...@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_ARGS(p)); TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS #ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{ {
long state = p->state;
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current); BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_SCHED_DEBUG */
/* /*
* For all intents and purposes a preempted task is a running task. * Preemption ignores task state, therefore preempted tasks are always
* RUNNING (we will not have dequeued if state != RUNNING).
*/ */
if (preempt_count() & PREEMPT_ACTIVE) return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
state = TASK_RUNNING | TASK_STATE_MAX;
#endif /* CONFIG_PREEMPT */
return state;
} }
#endif /* CREATE_TRACE_POINTS */ #endif /* CREATE_TRACE_POINTS */
...@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p) ...@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
*/ */
TRACE_EVENT(sched_switch, TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev, TP_PROTO(bool preempt,
struct task_struct *prev,
struct task_struct *next), struct task_struct *next),
TP_ARGS(prev, next), TP_ARGS(preempt, prev, next),
TP_STRUCT__entry( TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN ) __array( char, prev_comm, TASK_COMM_LEN )
...@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch, ...@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid; __entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio; __entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(prev); __entry->prev_state = __trace_sched_switch_state(preempt, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid; __entry->next_pid = next->pid;
__entry->next_prio = next->prio; __entry->next_prio = next->prio;
......
...@@ -2470,7 +2470,6 @@ static inline void ...@@ -2470,7 +2470,6 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev, prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
trace_sched_switch(prev, next);
sched_info_switch(rq, prev, next); sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next); perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next); fire_sched_out_preempt_notifiers(prev, next);
...@@ -3132,6 +3131,7 @@ static void __sched __schedule(bool preempt) ...@@ -3132,6 +3131,7 @@ static void __sched __schedule(bool preempt)
rq->curr = next; rq->curr = next;
++*switch_count; ++*switch_count;
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next); /* unlocks the rq */ rq = context_switch(rq, prev, next); /* unlocks the rq */
cpu = cpu_of(rq); cpu = cpu_of(rq);
} else { } else {
......
...@@ -5697,7 +5697,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) ...@@ -5697,7 +5697,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
} }
static void static void
ftrace_graph_probe_sched_switch(void *ignore, ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next) struct task_struct *prev, struct task_struct *next)
{ {
unsigned long long timestamp; unsigned long long timestamp;
......
...@@ -16,7 +16,8 @@ static int sched_ref; ...@@ -16,7 +16,8 @@ static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex); static DEFINE_MUTEX(sched_register_mutex);
static void static void
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) probe_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{ {
if (unlikely(!sched_ref)) if (unlikely(!sched_ref))
return; return;
......
...@@ -420,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -420,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
} }
static void notrace static void notrace
probe_wakeup_sched_switch(void *ignore, probe_wakeup_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next) struct task_struct *prev, struct task_struct *next)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment