Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d223a861
Commit
d223a861
authored
Jul 10, 2007
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] FP affinity: Coding style cleanups
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
e7c4782f
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
56 additions
and
55 deletions
+56
-55
arch/mips/kernel/mips-mt.c
arch/mips/kernel/mips-mt.c
+22
-18
arch/mips/kernel/traps.c
arch/mips/kernel/traps.c
+29
-30
include/asm-mips/system.h
include/asm-mips/system.h
+5
-7
No files found.
arch/mips/kernel/mips-mt.c
View file @
d223a861
...
...
@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
read_unlock
(
&
tasklist_lock
);
/* Compute new global allowed CPU set if necessary */
if
(
(
p
->
thread
.
mflags
&
MF_FPUBOUND
)
if
(
(
p
->
thread
.
mflags
&
MF_FPUBOUND
)
&&
cpus_intersects
(
new_mask
,
mt_fpu_cpumask
))
{
cpus_and
(
effective_mask
,
new_mask
,
mt_fpu_cpumask
);
retval
=
set_cpus_allowed
(
p
,
effective_mask
);
...
...
@@ -195,17 +195,21 @@ void mips_mt_regdump(unsigned long mvpctl)
nvpe
=
((
mvpconf0
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
ntc
=
((
mvpconf0
&
MVPCONF0_PTC
)
>>
MVPCONF0_PTC_SHIFT
)
+
1
;
printk
(
"-- per-VPE State --
\n
"
);
for
(
i
=
0
;
i
<
nvpe
;
i
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
for
(
i
=
0
;
i
<
nvpe
;
i
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
settc
(
tc
);
if
((
read_tc_c0_tcbind
()
&
TCBIND_CURVPE
)
==
i
)
{
if
((
read_tc_c0_tcbind
()
&
TCBIND_CURVPE
)
==
i
)
{
printk
(
" VPE %d
\n
"
,
i
);
printk
(
" VPEControl : %08lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
" VPEConf0 : %08lx
\n
"
,
read_vpe_c0_vpeconf0
());
printk
(
" VPEControl : %08lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
" VPEConf0 : %08lx
\n
"
,
read_vpe_c0_vpeconf0
());
printk
(
" VPE%d.Status : %08lx
\n
"
,
i
,
read_vpe_c0_status
());
printk
(
" VPE%d.EPC : %08lx
\n
"
,
i
,
read_vpe_c0_epc
());
printk
(
" VPE%d.Cause : %08lx
\n
"
,
i
,
read_vpe_c0_cause
());
printk
(
" VPE%d.EPC : %08lx
\n
"
,
i
,
read_vpe_c0_epc
());
printk
(
" VPE%d.Cause : %08lx
\n
"
,
i
,
read_vpe_c0_cause
());
printk
(
" VPE%d.Config7 : %08lx
\n
"
,
i
,
read_vpe_c0_config7
());
break
;
/* Next VPE */
...
...
@@ -213,9 +217,9 @@ void mips_mt_regdump(unsigned long mvpctl)
}
}
printk
(
"-- per-TC State --
\n
"
);
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
settc
(
tc
);
if
(
read_tc_c0_tcbind
()
==
read_c0_tcbind
())
{
if
(
read_tc_c0_tcbind
()
==
read_c0_tcbind
())
{
/* Are we dumping ourself? */
haltval
=
0
;
/* Then we're not halted, and mustn't be */
tcstatval
=
flags
;
/* And pre-dump TCStatus is flags */
...
...
@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void)
mt_fpemul_threshold
=
fpaff_threshold
;
}
else
{
mt_fpemul_threshold
=
(
FPUSEFACTOR
*
(
loops_per_jiffy
/
(
500000
/
HZ
)))
/
HZ
;
(
FPUSEFACTOR
*
(
loops_per_jiffy
/
(
500000
/
HZ
)))
/
HZ
;
}
printk
(
"FPU Affinity set after %ld emulations
\n
"
,
mt_fpemul_threshold
);
...
...
arch/mips/kernel/traps.c
View file @
d223a861
...
...
@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
force_sig
(
SIGILL
,
current
);
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static
void
mt_ase_fp_affinity
(
void
)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if
(
mt_fpemul_threshold
>
0
&&
((
current
->
thread
.
emulated_fp
++
>
mt_fpemul_threshold
)))
{
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if
(
cpus_intersects
(
current
->
cpus_allowed
,
mt_fpu_cpumask
))
{
cpumask_t
tmask
;
cpus_and
(
tmask
,
current
->
thread
.
user_cpus_allowed
,
mt_fpu_cpumask
);
set_cpus_allowed
(
current
,
tmask
);
current
->
thread
.
mflags
|=
MF_FPUBOUND
;
}
}
#endif
/* CONFIG_MIPS_MT_FPAFF */
}
asmlinkage
void
do_cpu
(
struct
pt_regs
*
regs
)
{
unsigned
int
cpid
;
...
...
@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
&
current
->
thread
.
fpu
,
0
);
if
(
sig
)
force_sig
(
sig
,
current
);
#ifdef CONFIG_MIPS_MT_FPAFF
else
{
/*
* MIPS MT processors may have fewer FPU contexts
* than CPU threads. If we've emulated more than
* some threshold number of instructions, force
* migration to a "CPU" that has FP support.
*/
if
(
mt_fpemul_threshold
>
0
&&
((
current
->
thread
.
emulated_fp
++
>
mt_fpemul_threshold
)))
{
/*
* If there's no FPU present, or if the
* application has already restricted
* the allowed set to exclude any CPUs
* with FPUs, we'll skip the procedure.
*/
if
(
cpus_intersects
(
current
->
cpus_allowed
,
mt_fpu_cpumask
))
{
cpumask_t
tmask
;
cpus_and
(
tmask
,
current
->
thread
.
user_cpus_allowed
,
mt_fpu_cpumask
);
set_cpus_allowed
(
current
,
tmask
);
current
->
thread
.
mflags
|=
MF_FPUBOUND
;
}
}
}
#endif
/* CONFIG_MIPS_MT_FPAFF */
else
mt_ase_fp_affinity
();
}
return
;
...
...
include/asm-mips/system.h
View file @
d223a861
...
...
@@ -44,7 +44,7 @@ struct task_struct;
* different thread.
*/
#define
switch_to(prev,next,last
) \
#define
__mips_mt_fpaff_switch_to(prev
) \
do { \
if (cpu_has_fpu && \
(prev->thread.mflags & MF_FPUBOUND) && \
...
...
@@ -52,24 +52,22 @@ do { \
prev->thread.mflags &= ~MF_FPUBOUND; \
prev->cpus_allowed = prev->thread.user_cpus_allowed; \
} \
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \
(last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
#else
#define __mips_mt_fpaff_switch_to(prev) do { (prev); } while (0)
#endif
#define switch_to(prev,next,last) \
do { \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
(last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
#endif
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment