Commit a7d623d4 authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman

powerpc: Move part of giveup_vsx into c

Move the MSR modification into c. Removing it from the assembly
function will allow us to avoid costly MSR writes by batching them
up.

Check the FP and VMX bits before calling the relevant giveup_*()
function. This makes giveup_vsx() and flush_vsx_to_thread() perform
more like their sister functions, and allows us to use
flush_vsx_to_thread() in the signal code.

Move the check_if_tm_restore_required() check in.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 98da581e
...@@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); ...@@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
void giveup_vsx(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;
check_if_tm_restore_required(tsk);
newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
if (tsk->thread.regs->msr & MSR_FP)
__giveup_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
__giveup_altivec(tsk);
__giveup_vsx(tsk);
}
EXPORT_SYMBOL(giveup_vsx);
void enable_kernel_vsx(void) void enable_kernel_vsx(void)
{ {
WARN_ON(preemptible()); WARN_ON(preemptible());
...@@ -220,15 +239,6 @@ void enable_kernel_vsx(void) ...@@ -220,15 +239,6 @@ void enable_kernel_vsx(void)
} }
EXPORT_SYMBOL(enable_kernel_vsx); EXPORT_SYMBOL(enable_kernel_vsx);
void giveup_vsx(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
giveup_fpu(tsk);
giveup_altivec(tsk);
__giveup_vsx(tsk);
}
EXPORT_SYMBOL(giveup_vsx);
void flush_vsx_to_thread(struct task_struct *tsk) void flush_vsx_to_thread(struct task_struct *tsk)
{ {
if (tsk->thread.regs) { if (tsk->thread.regs) {
......
...@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, ...@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
* contains valid data * contains valid data
*/ */
if (current->thread.used_vsr && ctx_has_vsx_region) { if (current->thread.used_vsr && ctx_has_vsx_region) {
__giveup_vsx(current); flush_vsx_to_thread(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current)) if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1; return 1;
msr |= MSR_VSX; msr |= MSR_VSX;
...@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs, ...@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
* contains valid data * contains valid data
*/ */
if (current->thread.used_vsr) { if (current->thread.used_vsr) {
__giveup_vsx(current); flush_vsx_to_thread(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current)) if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1; return 1;
if (msr & MSR_VSX) { if (msr & MSR_VSX) {
......
...@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, ...@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* VMX data. * VMX data.
*/ */
if (current->thread.used_vsr && ctx_has_vsx_region) { if (current->thread.used_vsr && ctx_has_vsx_region) {
__giveup_vsx(current); flush_vsx_to_thread(current);
v_regs += ELF_NVRREG; v_regs += ELF_NVRREG;
err |= copy_vsx_to_user(v_regs, current); err |= copy_vsx_to_user(v_regs, current);
/* set MSR_VSX in the MSR value in the frame to /* set MSR_VSX in the MSR value in the frame to
...@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, ...@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
* VMX data. * VMX data.
*/ */
if (current->thread.used_vsr) { if (current->thread.used_vsr) {
__giveup_vsx(current); flush_vsx_to_thread(current);
v_regs += ELF_NVRREG; v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG;
......
...@@ -177,14 +177,8 @@ _GLOBAL(load_up_vsx) ...@@ -177,14 +177,8 @@ _GLOBAL(load_up_vsx)
* __giveup_vsx(tsk) * __giveup_vsx(tsk)
* Disable VSX for the task given as the argument. * Disable VSX for the task given as the argument.
* Does NOT save vsx registers. * Does NOT save vsx registers.
* Enables the VSX for use in the kernel on return.
*/ */
_GLOBAL(__giveup_vsx) _GLOBAL(__giveup_vsx)
mfmsr r5
oris r5,r5,MSR_VSX@h
mtmsrd r5 /* enable use of VSX now */
isync
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3) ld r5,PT_REGS(r3)
cmpdi 0,r5,0 cmpdi 0,r5,0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment