Commit 5bff44fc authored by Linus Torvalds's avatar Linus Torvalds

Be a lot more careful about TS_USEDFPU and preemption

We had some races where we testecd (or set) TS_USEDFPU together
with sequences that depended on the setting (like clearing or
setting the TS flag in %cr0) and we could be preempted in between,
which screws up the FPU state, since preemption will itself change
USEDFPU and the TS flag.

This makes it a lot more explicit: the "internal" low-level FPU
functions ("__xxxx_fpu()") all require preemption to be disabled,
and the exported "real" functions will make sure that is the case.

One case - in __switch_to() - was switched to the non-preempt-safe
internal version, since the scheduler itself has already disabled
preemption.
parent 3d9dd6d2
......@@ -452,7 +452,7 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
unlazy_fpu(prev_p);
__unlazy_fpu(prev_p);
/*
* Reload esp0, LDT and the page table pointer:
......
......@@ -745,7 +745,8 @@ asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
* Don't touch unless you *really* know how it works.
*
* Must be called with kernel preemption disabled.
* Must be called with kernel preemption disabled (in this case,
* local interrupts are disabled at the call-site in entry.S).
*/
asmlinkage void math_state_restore(struct pt_regs regs)
{
......
......@@ -26,7 +26,9 @@ extern void restore_fpu( struct task_struct *tsk );
extern void kernel_fpu_begin(void);
#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
/*
* These must be called with preempt disabled
*/
static inline void __save_init_fpu( struct task_struct *tsk )
{
if ( cpu_has_fxsr ) {
......@@ -39,19 +41,12 @@ static inline void __save_init_fpu( struct task_struct *tsk )
tsk->thread_info->status &= ~TS_USEDFPU;
}
static inline void save_init_fpu( struct task_struct *tsk )
{
__save_init_fpu(tsk);
stts();
}
#define unlazy_fpu( tsk ) do { \
#define __unlazy_fpu( tsk ) do { \
if ((tsk)->thread_info->status & TS_USEDFPU) \
save_init_fpu( tsk ); \
} while (0)
#define clear_fpu( tsk ) \
#define __clear_fpu( tsk ) \
do { \
if ((tsk)->thread_info->status & TS_USEDFPU) { \
asm volatile("fwait"); \
......@@ -60,6 +55,30 @@ do { \
} \
} while (0)
/*
* These disable preemption on their own and are safe
*/
static inline void save_init_fpu( struct task_struct *tsk )
{
preempt_disable();
__save_init_fpu(tsk);
stts();
preempt_enable();
}
#define unlazy_fpu( tsk ) do { \
preempt_disable(); \
__unlazy_fpu(tsk); \
preempt_enable(); \
} while (0)
#define clear_fpu( tsk ) do { \
preempt_disable(); \
__clear_fpu( tsk ); \
preempt_enable(); \
} while (0)
\
/*
* FPU state interaction...
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment