Commit fbaa6a18 authored by Elliot Berman's avatar Elliot Berman Committed by Ingo Molnar

sched/core: Remove ifdeffery for saved_state

In preparation for freezer to also use saved_state, remove the
CONFIG_PREEMPT_RT compilation guard around saved_state.

On the arm64 platform I tested which did not have CONFIG_PREEMPT_RT,
there was no statistically significant deviation by applying this patch.

Test methodology:

perf bench sched message -g 40 -l 40
Signed-off-by: default avatarElliot Berman <quic_eberman@quicinc.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4ff34ad3
...@@ -750,10 +750,8 @@ struct task_struct { ...@@ -750,10 +750,8 @@ struct task_struct {
#endif #endif
unsigned int __state; unsigned int __state;
#ifdef CONFIG_PREEMPT_RT
/* saved state for "spinlock sleepers" */ /* saved state for "spinlock sleepers" */
unsigned int saved_state; unsigned int saved_state;
#endif
/* /*
* This begins the randomizable portion of task_struct. Only * This begins the randomizable portion of task_struct. Only
......
...@@ -2232,23 +2232,20 @@ int __task_state_match(struct task_struct *p, unsigned int state) ...@@ -2232,23 +2232,20 @@ int __task_state_match(struct task_struct *p, unsigned int state)
if (READ_ONCE(p->__state) & state) if (READ_ONCE(p->__state) & state)
return 1; return 1;
#ifdef CONFIG_PREEMPT_RT
if (READ_ONCE(p->saved_state) & state) if (READ_ONCE(p->saved_state) & state)
return -1; return -1;
#endif
return 0; return 0;
} }
static __always_inline static __always_inline
int task_state_match(struct task_struct *p, unsigned int state) int task_state_match(struct task_struct *p, unsigned int state)
{ {
#ifdef CONFIG_PREEMPT_RT
/* /*
* Serialize against current_save_and_set_rtlock_wait_state() and * Serialize against current_save_and_set_rtlock_wait_state() and
* current_restore_rtlock_saved_state(). * current_restore_rtlock_saved_state().
*/ */
guard(raw_spinlock_irq)(&p->pi_lock); guard(raw_spinlock_irq)(&p->pi_lock);
#endif
return __task_state_match(p, state); return __task_state_match(p, state);
} }
...@@ -4038,7 +4035,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) ...@@ -4038,7 +4035,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
*success = !!(match = __task_state_match(p, state)); *success = !!(match = __task_state_match(p, state));
#ifdef CONFIG_PREEMPT_RT
/* /*
* Saved state preserves the task state across blocking on * Saved state preserves the task state across blocking on
* an RT lock. If the state matches, set p::saved_state to * an RT lock. If the state matches, set p::saved_state to
...@@ -4054,7 +4050,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) ...@@ -4054,7 +4050,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
*/ */
if (match < 0) if (match < 0)
p->saved_state = TASK_RUNNING; p->saved_state = TASK_RUNNING;
#endif
return match > 0; return match > 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment