Commit d3ea9fa0 authored by Stuart Menefy's avatar Stuart Menefy Committed by Paul Mundt

sh: Minor optimisations to FPU handling

A number of small optimisations to FPU handling, in particular:

 - move the task USEDFPU flag from the thread_info flags field (which
   is accessed asynchronously to the thread) to a new status field,
   which is only accessed by the thread itself. This allows locking to
   be removed in most cases, or can be reduced to a preempt_lock().
   This mimics the i386 behaviour.

 - move the modification of regs->sr and thread_info->status flags out
   of save_fpu() to __unlazy_fpu(). This gives the compiler a better
   chance to optimise things, as well as making save_fpu() symmetrical
   with restore_fpu() and init_fpu().

 - implement prepare_to_copy(), so that when creating a thread, we can
   unlazy the FPU prior to copying the thread data structures.

Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,

In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: default avatarStuart Menefy <stuart.menefy@st.com>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 39ac11c1
...@@ -18,17 +18,14 @@ static inline void grab_fpu(struct pt_regs *regs) ...@@ -18,17 +18,14 @@ static inline void grab_fpu(struct pt_regs *regs)
struct task_struct; struct task_struct;
extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); extern void save_fpu(struct task_struct *__tsk);
void fpu_state_restore(struct pt_regs *regs); void fpu_state_restore(struct pt_regs *regs);
#else #else
#define save_fpu(tsk) do { } while (0)
#define release_fpu(regs) do { } while (0) #define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0) #define grab_fpu(regs) do { } while (0)
static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
}
#endif #endif
struct user_regset; struct user_regset;
...@@ -40,21 +37,28 @@ extern int fpregs_get(struct task_struct *target, ...@@ -40,21 +37,28 @@ extern int fpregs_get(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf); void *kbuf, void __user *ubuf);
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
save_fpu(tsk);
release_fpu(regs);
} else
tsk->fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{ {
preempt_disable(); preempt_disable();
if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) __unlazy_fpu(tsk, regs);
save_fpu(tsk, regs);
else
tsk->fpu_counter = 0;
preempt_enable(); preempt_enable();
} }
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
{ {
preempt_disable(); preempt_disable();
if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { if (task_thread_info(tsk)->status & TS_USEDFPU) {
clear_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status &= ~TS_USEDFPU;
release_fpu(regs); release_fpu(regs);
} }
preempt_enable(); preempt_enable();
......
...@@ -56,6 +56,7 @@ asmlinkage void __init sh_cpu_init(void); ...@@ -56,6 +56,7 @@ asmlinkage void __init sh_cpu_init(void);
#define SR_DSP 0x00001000 #define SR_DSP 0x00001000
#define SR_IMASK 0x000000f0 #define SR_IMASK 0x000000f0
#define SR_FD 0x00008000 #define SR_FD 0x00008000
#define SR_MD 0x40000000
/* /*
* DSP structure and data * DSP structure and data
...@@ -136,7 +137,7 @@ struct mm_struct; ...@@ -136,7 +137,7 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */ /* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0) void prepare_to_copy(struct task_struct *tsk);
/* /*
* create a kernel thread without removing it from tasklists * create a kernel thread without removing it from tasklists
......
...@@ -51,6 +51,7 @@ struct thread_info { ...@@ -51,6 +51,7 @@ struct thread_info {
.task = &tsk, \ .task = &tsk, \
.exec_domain = &default_exec_domain, \ .exec_domain = &default_exec_domain, \
.flags = 0, \ .flags = 0, \
.status = 0, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
...@@ -117,7 +118,6 @@ extern void free_thread_info(struct thread_info *ti); ...@@ -117,7 +118,6 @@ extern void free_thread_info(struct thread_info *ti);
#define TIF_SECCOMP 6 /* secure computing */ #define TIF_SECCOMP 6 /* secure computing */
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 #define TIF_MEMDIE 18
#define TIF_FREEZE 19 /* Freezing for suspend */ #define TIF_FREEZE 19 /* Freezing for suspend */
...@@ -130,7 +130,6 @@ extern void free_thread_info(struct thread_info *ti); ...@@ -130,7 +130,6 @@ extern void free_thread_info(struct thread_info *ti);
#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_USEDFPU (1 << TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_FREEZE (1 << TIF_FREEZE)
...@@ -163,6 +162,7 @@ extern void free_thread_info(struct thread_info *ti); ...@@ -163,6 +162,7 @@ extern void free_thread_info(struct thread_info *ti);
* have to worry about atomic accesses. * have to worry about atomic accesses.
*/ */
#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1 #define HAVE_SET_RESTORE_SIGMASK 1
......
...@@ -311,12 +311,12 @@ asmlinkage void __init sh_cpu_init(void) ...@@ -311,12 +311,12 @@ asmlinkage void __init sh_cpu_init(void)
if (fpu_disabled) { if (fpu_disabled) {
printk("FPU Disabled\n"); printk("FPU Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_FPU; current_cpu_data.flags &= ~CPU_HAS_FPU;
disable_fpu();
} }
/* FPU initialization */ /* FPU initialization */
disable_fpu();
if ((current_cpu_data.flags & CPU_HAS_FPU)) { if ((current_cpu_data.flags & CPU_HAS_FPU)) {
clear_thread_flag(TIF_USEDFPU); current_thread_info()->status &= ~TS_USEDFPU;
clear_used_math(); clear_used_math();
} }
......
...@@ -25,14 +25,12 @@ ...@@ -25,14 +25,12 @@
/* /*
* Save FPU registers onto task structure. * Save FPU registers onto task structure.
* Assume called with FPU enabled (SR.FD=0).
*/ */
void void
save_fpu(struct task_struct *tsk, struct pt_regs *regs) save_fpu(struct task_struct *tsk)
{ {
unsigned long dummy; unsigned long dummy;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu(); enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t" asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t" "sts.l fpscr, @-%0\n\t"
...@@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs) ...@@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs)
: "memory"); : "memory");
disable_fpu(); disable_fpu();
release_fpu(regs);
} }
static void static void
...@@ -598,13 +595,13 @@ BUILD_TRAP_HANDLER(fpu_error) ...@@ -598,13 +595,13 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current; struct task_struct *tsk = current;
TRAP_HANDLER_DECL; TRAP_HANDLER_DECL;
save_fpu(tsk, regs); __unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) { if (ieee_fpe_handler(regs)) {
tsk->thread.fpu.hard.fpscr &= tsk->thread.fpu.hard.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs); grab_fpu(regs);
restore_fpu(tsk); restore_fpu(tsk);
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
return; return;
} }
...@@ -630,5 +627,5 @@ BUILD_TRAP_HANDLER(fpu_state_restore) ...@@ -630,5 +627,5 @@ BUILD_TRAP_HANDLER(fpu_state_restore)
fpu_init(); fpu_init();
set_used_math(); set_used_math();
} }
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
} }
...@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags; ...@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags;
/* /*
* Save FPU registers onto task structure. * Save FPU registers onto task structure.
* Assume called with FPU enabled (SR.FD=0).
*/ */
void save_fpu(struct task_struct *tsk, struct pt_regs *regs) void save_fpu(struct task_struct *tsk)
{ {
unsigned long dummy; unsigned long dummy;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu(); enable_fpu();
asm volatile ("sts.l fpul, @-%0\n\t" asm volatile ("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t" "sts.l fpscr, @-%0\n\t"
...@@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs) ...@@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
:"memory"); :"memory");
disable_fpu(); disable_fpu();
release_fpu(regs);
} }
static void restore_fpu(struct task_struct *tsk) static void restore_fpu(struct task_struct *tsk)
...@@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) ...@@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* fcnvsd */ /* fcnvsd */
struct task_struct *tsk = current; struct task_struct *tsk = current;
save_fpu(tsk, regs);
if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */ /* FPU error */
denormal_to_double(&tsk->thread.fpu.hard, denormal_to_double(&tsk->thread.fpu.hard,
...@@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error) ...@@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current; struct task_struct *tsk = current;
TRAP_HANDLER_DECL; TRAP_HANDLER_DECL;
save_fpu(tsk, regs); __unlazy_fpu(tsk, regs);
fpu_exception_flags = 0; fpu_exception_flags = 0;
if (ieee_fpe_handler(regs)) { if (ieee_fpe_handler(regs)) {
tsk->thread.fpu.hard.fpscr &= tsk->thread.fpu.hard.fpscr &=
...@@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error) ...@@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error)
tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
grab_fpu(regs); grab_fpu(regs);
restore_fpu(tsk); restore_fpu(tsk);
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
(fpu_exception_flags >> 2)) == 0) { (fpu_exception_flags >> 2)) == 0) {
return; return;
...@@ -502,7 +498,7 @@ void fpu_state_restore(struct pt_regs *regs) ...@@ -502,7 +498,7 @@ void fpu_state_restore(struct pt_regs *regs)
fpu_init(); fpu_init();
set_used_math(); set_used_math();
} }
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
tsk->fpu_counter++; tsk->fpu_counter++;
} }
......
...@@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) ...@@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
regs.regs[5] = (unsigned long)fn; regs.regs[5] = (unsigned long)fn;
regs.pc = (unsigned long)kernel_thread_helper; regs.pc = (unsigned long)kernel_thread_helper;
regs.sr = (1 << 30); regs.sr = SR_MD;
#if defined(CONFIG_SH_FPU)
regs.sr |= SR_FD;
#endif
/* Ok, create the new process.. */ /* Ok, create the new process.. */
pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
...@@ -189,6 +192,15 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) ...@@ -189,6 +192,15 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
} }
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
/*
* This gets called before we allocate a new thread and copy
* the current task into it.
*/
void prepare_to_copy(struct task_struct *tsk)
{
unlazy_fpu(tsk, task_pt_regs(tsk));
}
asmlinkage void ret_from_fork(void); asmlinkage void ret_from_fork(void);
int copy_thread(unsigned long clone_flags, unsigned long usp, int copy_thread(unsigned long clone_flags, unsigned long usp,
...@@ -197,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -197,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
{ {
struct thread_info *ti = task_thread_info(p); struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs; struct pt_regs *childregs;
#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) #if defined(CONFIG_SH_DSP)
struct task_struct *tsk = current; struct task_struct *tsk = current;
#endif #endif
#if defined(CONFIG_SH_FPU)
unlazy_fpu(tsk, regs);
p->thread.fpu = tsk->thread.fpu;
copy_to_stopped_child_used_math(p);
#endif
#if defined(CONFIG_SH_DSP) #if defined(CONFIG_SH_DSP)
if (is_dsp_enabled(tsk)) { if (is_dsp_enabled(tsk)) {
/* We can use the __save_dsp or just copy the struct: /* We can use the __save_dsp or just copy the struct:
...@@ -226,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -226,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
} else { } else {
childregs->regs[15] = (unsigned long)childregs; childregs->regs[15] = (unsigned long)childregs;
ti->addr_limit = KERNEL_DS; ti->addr_limit = KERNEL_DS;
ti->status &= ~TS_USEDFPU;
p->fpu_counter = 0;
} }
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
......
...@@ -558,7 +558,7 @@ static int ieee_fpe_handler(struct pt_regs *regs) ...@@ -558,7 +558,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
(finsn >> 8) & 0xf); (finsn >> 8) & 0xf);
tsk->thread.fpu.hard.fpscr &= tsk->thread.fpu.hard.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
} else { } else {
info.si_signo = SIGFPE; info.si_signo = SIGFPE;
info.si_errno = 0; info.si_errno = 0;
...@@ -619,10 +619,10 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) ...@@ -619,10 +619,10 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft); struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft);
if (!test_tsk_thread_flag(tsk, TIF_USEDFPU)) { if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */ /* initialize once. */
fpu_init(fpu); fpu_init(fpu);
set_tsk_thread_flag(tsk, TIF_USEDFPU); task_thread_info(tsk)->status |= TS_USEDFPU;
} }
return fpu_emulate(inst, fpu, regs); return fpu_emulate(inst, fpu, regs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment