Commit 31de1877 authored by David Mosberger's avatar David Mosberger

ia64: Fix SMP fph-handling. Patch by Asit Mallick with some additional

      changes by yours truly.
parent 051cbd81
......@@ -378,7 +378,7 @@ copy_thread (int nr, unsigned long clone_flags,
# define THREAD_FLAGS_TO_SET 0
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET);
p->thread.last_fph_cpu = -1;
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
#ifdef CONFIG_IA32_SUPPORT
/*
* If we're cloning an IA32 task then save the IA32 extra
......@@ -606,11 +606,7 @@ flush_thread (void)
{
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
#ifndef CONFIG_SMP
if (ia64_get_fpu_owner() == current)
ia64_set_fpu_owner(0);
#endif
ia64_drop_fpu(current);
}
#ifdef CONFIG_PERFMON
......@@ -648,10 +644,7 @@ release_thread (struct task_struct *task)
void
exit_thread (void)
{
#ifndef CONFIG_SMP
if (ia64_get_fpu_owner() == current)
ia64_set_fpu_owner(0);
#endif
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
if (current->thread.pfm_context)
......
......@@ -598,18 +598,13 @@ inline void
ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
#ifdef CONFIG_SMP
struct task_struct *fpu_owner = current;
#else
struct task_struct *fpu_owner = ia64_get_fpu_owner();
#endif
if (task == fpu_owner && psr->mfh) {
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
ia64_save_fpu(&task->thread.fph[0]);
task->thread.flags |= IA64_THREAD_FPH_VALID;
task->thread.last_fph_cpu = smp_processor_id();
ia64_save_fpu(&task->thread.fph[0]);
}
ia64_drop_fpu(task);
}
/*
......@@ -628,11 +623,8 @@ ia64_sync_fph (struct task_struct *task)
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
task->thread.flags |= IA64_THREAD_FPH_VALID;
task->thread.last_fph_cpu = -1; /* force reload */
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
if (ia64_get_fpu_owner() == task)
ia64_set_fpu_owner(0);
psr->dfh = 1;
}
......
......@@ -143,11 +143,11 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
__copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
psr->mfh = 0; /* drop signal handler's fph contents... */
if (psr->dfh)
current->thread.last_fph_cpu = -1;
ia64_drop_fpu(current);
else {
/* We already own the local fph, otherwise psr->dfh wouldn't be 0. */
__ia64_load_fpu(current->thread.fph);
ia64_set_fpu_owner(current);
current->thread.last_fph_cpu = smp_processor_id();
ia64_set_local_fpu_owner(current);
}
}
return err;
......
......@@ -247,7 +247,8 @@ disabled_fph_fault (struct pt_regs *regs)
psr->dfh = 0;
#ifndef CONFIG_SMP
{
struct task_struct *fpu_owner = ia64_get_fpu_owner();
struct task_struct *fpu_owner
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
if (fpu_owner == current)
return;
......@@ -256,7 +257,7 @@ disabled_fph_fault (struct pt_regs *regs)
ia64_flush_fph(fpu_owner);
}
#endif /* !CONFIG_SMP */
ia64_set_fpu_owner(current);
ia64_set_local_fpu_owner(current);
if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
__ia64_load_fpu(current->thread.fph);
psr->mfh = 0;
......
......@@ -417,17 +417,28 @@ ia64_set_kr (unsigned long regnum, unsigned long r)
}
}
static inline struct task_struct *
ia64_get_fpu_owner (void)
{
return (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER);
}
/*
* The following three macros can't be inline functions because we don't have struct
* task_struct at this point.
*/
static inline void
ia64_set_fpu_owner (struct task_struct *t)
{
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t);
}
/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
#define ia64_is_local_fpu_owner(t) \
({ \
struct task_struct *__ia64_islfo_task = (t); \
(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
})
/* Mark task T as owning the fph partition of the CPU we're running on. */
#define ia64_set_local_fpu_owner(t) do { \
struct task_struct *__ia64_slfo_task = (t); \
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
} while (0)
/* Mark the fph partition of task T as being invalid on all CPUs. */
#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
......
......@@ -217,13 +217,11 @@ extern void ia64_load_extra (struct task_struct *task);
|| IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
struct task_struct *__fpu_owner = ia64_get_fpu_owner(); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
ia64_load_extra(next); \
ia64_psr(ia64_task_regs(next))->dfh = \
!(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \
ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
(last) = ia64_switch_to((next)); \
} while (0)
......@@ -239,7 +237,6 @@ extern void ia64_load_extra (struct task_struct *task);
ia64_psr(ia64_task_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
(prev)->thread.last_fph_cpu = smp_processor_id(); \
} \
__switch_to(prev, next, last); \
} while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment