Commit d223a861 authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] FP affinity: Coding style cleanups

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent e7c4782f
...@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, ...@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
/* Compute new global allowed CPU set if necessary */ /* Compute new global allowed CPU set if necessary */
if( (p->thread.mflags & MF_FPUBOUND) if ((p->thread.mflags & MF_FPUBOUND)
&& cpus_intersects(new_mask, mt_fpu_cpumask)) { && cpus_intersects(new_mask, mt_fpu_cpumask)) {
cpus_and(effective_mask, new_mask, mt_fpu_cpumask); cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
retval = set_cpus_allowed(p, effective_mask); retval = set_cpus_allowed(p, effective_mask);
...@@ -195,17 +195,21 @@ void mips_mt_regdump(unsigned long mvpctl) ...@@ -195,17 +195,21 @@ void mips_mt_regdump(unsigned long mvpctl)
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n"); printk("-- per-VPE State --\n");
for(i = 0; i < nvpe; i++) { for (i = 0; i < nvpe; i++) {
for(tc = 0; tc < ntc; tc++) { for (tc = 0; tc < ntc; tc++) {
settc(tc); settc(tc);
if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
printk(" VPE %d\n", i); printk(" VPE %d\n", i);
printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); printk(" VPEControl : %08lx\n",
printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); read_vpe_c0_vpecontrol());
printk(" VPEConf0 : %08lx\n",
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n", printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status()); i, read_vpe_c0_status());
printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); printk(" VPE%d.EPC : %08lx\n",
printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); i, read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n", printk(" VPE%d.Config7 : %08lx\n",
i, read_vpe_c0_config7()); i, read_vpe_c0_config7());
break; /* Next VPE */ break; /* Next VPE */
...@@ -213,9 +217,9 @@ void mips_mt_regdump(unsigned long mvpctl) ...@@ -213,9 +217,9 @@ void mips_mt_regdump(unsigned long mvpctl)
} }
} }
printk("-- per-TC State --\n"); printk("-- per-TC State --\n");
for(tc = 0; tc < ntc; tc++) { for (tc = 0; tc < ntc; tc++) {
settc(tc); settc(tc);
if(read_tc_c0_tcbind() == read_c0_tcbind()) { if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */ /* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */ haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */ tcstatval = flags; /* And pre-dump TCStatus is flags */
...@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void) ...@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void)
mt_fpemul_threshold = fpaff_threshold; mt_fpemul_threshold = fpaff_threshold;
} else { } else {
mt_fpemul_threshold = mt_fpemul_threshold =
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; (FPUSEFACTOR * (loops_per_jiffy / (500000 / HZ))) / HZ;
} }
printk("FPU Affinity set after %ld emulations\n", printk("FPU Affinity set after %ld emulations\n",
mt_fpemul_threshold); mt_fpemul_threshold);
......
...@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs) ...@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
cpus_and(tmask, current->thread.user_cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed(current, tmask);
current->thread.mflags |= MF_FPUBOUND;
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
asmlinkage void do_cpu(struct pt_regs *regs) asmlinkage void do_cpu(struct pt_regs *regs)
{ {
unsigned int cpid; unsigned int cpid;
...@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
&current->thread.fpu, 0); &current->thread.fpu, 0);
if (sig) if (sig)
force_sig(sig, current); force_sig(sig, current);
#ifdef CONFIG_MIPS_MT_FPAFF else
else { mt_ase_fp_affinity();
/*
* MIPS MT processors may have fewer FPU contexts
* than CPU threads. If we've emulated more than
* some threshold number of instructions, force
* migration to a "CPU" that has FP support.
*/
if(mt_fpemul_threshold > 0
&& ((current->thread.emulated_fp++
> mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the
* application has already restricted
* the allowed set to exclude any CPUs
* with FPUs, we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed,
mt_fpu_cpumask)) {
cpumask_t tmask;
cpus_and(tmask,
current->thread.user_cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed(current, tmask);
current->thread.mflags |= MF_FPUBOUND;
}
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
} }
return; return;
......
...@@ -44,7 +44,7 @@ struct task_struct; ...@@ -44,7 +44,7 @@ struct task_struct;
* different thread. * different thread.
*/ */
#define switch_to(prev,next,last) \ #define __mips_mt_fpaff_switch_to(prev) \
do { \ do { \
if (cpu_has_fpu && \ if (cpu_has_fpu && \
(prev->thread.mflags & MF_FPUBOUND) && \ (prev->thread.mflags & MF_FPUBOUND) && \
...@@ -52,24 +52,22 @@ do { \ ...@@ -52,24 +52,22 @@ do { \
prev->thread.mflags &= ~MF_FPUBOUND; \ prev->thread.mflags &= ~MF_FPUBOUND; \
prev->cpus_allowed = prev->thread.user_cpus_allowed; \ prev->cpus_allowed = prev->thread.user_cpus_allowed; \
} \ } \
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \ next->thread.emulated_fp = 0; \
(last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0) } while(0)
#else #else
#define __mips_mt_fpaff_switch_to(prev) do { (prev); } while (0)
#endif
#define switch_to(prev,next,last) \ #define switch_to(prev,next,last) \
do { \ do { \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \ if (cpu_has_dsp) \
__save_dsp(prev); \ __save_dsp(prev); \
(last) = resume(prev, next, task_thread_info(next)); \ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \ if (cpu_has_dsp) \
__restore_dsp(current); \ __restore_dsp(current); \
} while(0) } while(0)
#endif
/* /*
* On SMP systems, when the scheduler does migration-cost autodetection, * On SMP systems, when the scheduler does migration-cost autodetection,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment