Commit 62e160b5 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds

[PATCH] ppc32: arch code preempt fixes

I figured the best way to stop beeing bothered by users trying to run
preempt was to fix it ;)

Here's a first batch that close some races we had when testing regs->msr
for altivec or FPU enable, then doing the giveup_* function.  A preempt
in between those would have caused us to save a stale altivec or FPU
context.
parent 01472f19
...@@ -163,6 +163,7 @@ dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) ...@@ -163,6 +163,7 @@ dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
void void
enable_kernel_altivec(void) enable_kernel_altivec(void)
{ {
preempt_disable();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current); giveup_altivec(current);
...@@ -171,12 +172,14 @@ enable_kernel_altivec(void) ...@@ -171,12 +172,14 @@ enable_kernel_altivec(void)
#else #else
giveup_altivec(last_task_used_altivec); giveup_altivec(last_task_used_altivec);
#endif /* __SMP __ */ #endif /* __SMP __ */
preempt_enable();
} }
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
void void
enable_kernel_fp(void) enable_kernel_fp(void)
{ {
preempt_disable();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current); giveup_fpu(current);
...@@ -185,13 +188,16 @@ enable_kernel_fp(void) ...@@ -185,13 +188,16 @@ enable_kernel_fp(void)
#else #else
giveup_fpu(last_task_used_math); giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
preempt_enable();
} }
int int
dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{ {
if (tsk->thread.regs && tsk->thread.regs->msr & MSR_FP) preempt_disable();
if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
giveup_fpu(tsk); giveup_fpu(tsk);
preempt_enable();
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1; return 1;
} }
...@@ -329,12 +335,14 @@ void prepare_to_copy(struct task_struct *tsk) ...@@ -329,12 +335,14 @@ void prepare_to_copy(struct task_struct *tsk)
if (regs == NULL) if (regs == NULL)
return; return;
preempt_disable();
if (regs->msr & MSR_FP) if (regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC) if (regs->msr & MSR_VEC)
giveup_altivec(current); giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
preempt_enable();
} }
/* /*
...@@ -479,12 +487,14 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, ...@@ -479,12 +487,14 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) if (IS_ERR(filename))
goto out; goto out;
preempt_disable();
if (regs->msr & MSR_FP) if (regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC) if (regs->msr & MSR_VEC)
giveup_altivec(current); giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
preempt_enable();
error = do_execve(filename, (char __user *__user *) a1, error = do_execve(filename, (char __user *__user *) a1,
(char __user *__user *) a2, regs); (char __user *__user *) a2, regs);
if (error == 0) if (error == 0)
......
...@@ -391,14 +391,25 @@ check_bug_trap(struct pt_regs *regs) ...@@ -391,14 +391,25 @@ check_bug_trap(struct pt_regs *regs)
return 0; return 0;
if (bug->line & BUG_WARNING_TRAP) { if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */ /* this is a WARN_ON rather than BUG/BUG_ON */
#ifdef CONFIG_XMON
xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
#endif /* CONFIG_XMON */
printk(KERN_ERR "Badness in %s at %s:%d\n", printk(KERN_ERR "Badness in %s at %s:%d\n",
bug->function, bug->file, bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP); bug->line & ~BUG_WARNING_TRAP);
dump_stack(); dump_stack();
return 1; return 1;
} }
#ifdef CONFIG_XMON
xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
bug->function, bug->file, bug->line);
xmon(regs);
#endif /* CONFIG_XMON */
printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
bug->function, bug->file, bug->line); bug->function, bug->file, bug->line);
return 0; return 0;
} }
...@@ -427,8 +438,14 @@ ProgramCheckException(struct pt_regs *regs) ...@@ -427,8 +438,14 @@ ProgramCheckException(struct pt_regs *regs)
int code = 0; int code = 0;
u32 fpscr; u32 fpscr;
/* We must make sure the FP state is consistent with
* our MSR_FP in regs
*/
preempt_disable();
if (regs->msr & MSR_FP) if (regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
preempt_enable();
fpscr = current->thread.fpscr; fpscr = current->thread.fpscr;
fpscr &= fpscr << 22; /* mask summary bits with enables */ fpscr &= fpscr << 22; /* mask summary bits with enables */
if (fpscr & FPSCR_VX) if (fpscr & FPSCR_VX)
...@@ -592,13 +609,17 @@ TAUException(struct pt_regs *regs) ...@@ -592,13 +609,17 @@ TAUException(struct pt_regs *regs)
void void
AltivecAssistException(struct pt_regs *regs) AltivecAssistException(struct pt_regs *regs)
{ {
preempt_disable();
if (regs->msr & MSR_VEC) if (regs->msr & MSR_VEC)
giveup_altivec(current); giveup_altivec(current);
preempt_enable();
/* XXX quick hack for now: set the non-Java bit in the VSCR */ /* XXX quick hack for now: set the non-Java bit in the VSCR */
current->thread.vscr.u[3] |= 0x10000; current->thread.vscr.u[3] |= 0x10000;
} }
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
void __init trap_init(void) void __init trap_init(void)
{ {
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment