Commit 96c79b6b authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Michael Ellerman

powerpc: Remove more redundant VSX save/tests

__giveup_vsx/save_vsx are completely equivalent to testing MSR_FP
and MSR_VEC and calling the corresponding giveup/save function so
just remove the spurious VSX cases. Also add WARN_ONs checking that
we never have VSX enabled without the two other.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent dc801081
...@@ -355,14 +355,6 @@ static void giveup_vsx(struct task_struct *tsk) ...@@ -355,14 +355,6 @@ static void giveup_vsx(struct task_struct *tsk)
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
} }
static void save_vsx(struct task_struct *tsk)
{
if (tsk->thread.regs->msr & MSR_FP)
save_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
save_altivec(tsk);
}
void enable_kernel_vsx(void) void enable_kernel_vsx(void)
{ {
unsigned long cpumsr; unsigned long cpumsr;
...@@ -411,7 +403,6 @@ static int restore_vsx(struct task_struct *tsk) ...@@ -411,7 +403,6 @@ static int restore_vsx(struct task_struct *tsk)
} }
#else #else
static inline int restore_vsx(struct task_struct *tsk) { return 0; } static inline int restore_vsx(struct task_struct *tsk) { return 0; }
static inline void save_vsx(struct task_struct *tsk) { }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
...@@ -491,6 +482,8 @@ void giveup_all(struct task_struct *tsk) ...@@ -491,6 +482,8 @@ void giveup_all(struct task_struct *tsk)
msr_check_and_set(msr_all_available); msr_check_and_set(msr_all_available);
check_if_tm_restore_required(tsk); check_if_tm_restore_required(tsk);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP) if (usermsr & MSR_FP)
__giveup_fpu(tsk); __giveup_fpu(tsk);
...@@ -499,10 +492,6 @@ void giveup_all(struct task_struct *tsk) ...@@ -499,10 +492,6 @@ void giveup_all(struct task_struct *tsk)
if (usermsr & MSR_VEC) if (usermsr & MSR_VEC)
__giveup_altivec(tsk); __giveup_altivec(tsk);
#endif #endif
#ifdef CONFIG_VSX
if (usermsr & MSR_VSX)
__giveup_vsx(tsk);
#endif
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
if (usermsr & MSR_SPE) if (usermsr & MSR_SPE)
__giveup_spe(tsk); __giveup_spe(tsk);
...@@ -561,19 +550,13 @@ void save_all(struct task_struct *tsk) ...@@ -561,19 +550,13 @@ void save_all(struct task_struct *tsk)
msr_check_and_set(msr_all_available); msr_check_and_set(msr_all_available);
/* WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
* Saving the way the register space is in hardware, save_vsx boils
* down to a save_fpu() and save_altivec()
*/
if (usermsr & MSR_VSX) {
save_vsx(tsk);
} else {
if (usermsr & MSR_FP)
save_fpu(tsk);
if (usermsr & MSR_VEC) if (usermsr & MSR_FP)
save_altivec(tsk); save_fpu(tsk);
}
if (usermsr & MSR_VEC)
save_altivec(tsk);
if (usermsr & MSR_SPE) if (usermsr & MSR_SPE)
__giveup_spe(tsk); __giveup_spe(tsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment