Commit 762a1f43 authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle

MIPS: disable preemption whilst initialising MSA

Preemption must be disabled throughout the process of enabling the FPU,
enabling MSA & initialising the vector registers. Without doing so it
is possible to lose the FPU or MSA whilst initialising them causing
that initialisation to fail.
Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/7307/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 3587ea88
...@@ -164,8 +164,6 @@ static inline int init_fpu(void) ...@@ -164,8 +164,6 @@ static inline int init_fpu(void)
{ {
int ret = 0; int ret = 0;
preempt_disable();
if (cpu_has_fpu) { if (cpu_has_fpu) {
ret = __own_fpu(); ret = __own_fpu();
if (!ret) if (!ret)
...@@ -173,8 +171,6 @@ static inline int init_fpu(void) ...@@ -173,8 +171,6 @@ static inline int init_fpu(void)
} else } else
fpu_emulator_init_fpu(); fpu_emulator_init_fpu();
preempt_enable();
return ret; return ret;
} }
......
...@@ -1093,6 +1093,7 @@ static int enable_restore_fp_context(int msa) ...@@ -1093,6 +1093,7 @@ static int enable_restore_fp_context(int msa)
if (!used_math()) { if (!used_math()) {
/* First time FP context user. */ /* First time FP context user. */
preempt_disable();
err = init_fpu(); err = init_fpu();
if (msa && !err) { if (msa && !err) {
enable_msa(); enable_msa();
...@@ -1100,6 +1101,7 @@ static int enable_restore_fp_context(int msa) ...@@ -1100,6 +1101,7 @@ static int enable_restore_fp_context(int msa)
set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE); set_thread_flag(TIF_MSA_CTX_LIVE);
} }
preempt_enable();
if (!err) if (!err)
set_used_math(); set_used_math();
return err; return err;
...@@ -1139,10 +1141,11 @@ static int enable_restore_fp_context(int msa) ...@@ -1139,10 +1141,11 @@ static int enable_restore_fp_context(int msa)
* This task is using or has previously used MSA. Thus we require * This task is using or has previously used MSA. Thus we require
* that Status.FR == 1. * that Status.FR == 1.
*/ */
preempt_disable();
was_fpu_owner = is_fpu_owner(); was_fpu_owner = is_fpu_owner();
err = own_fpu(0); err = own_fpu_inatomic(0);
if (err) if (err)
return err; goto out;
enable_msa(); enable_msa();
write_msa_csr(current->thread.fpu.msacsr); write_msa_csr(current->thread.fpu.msacsr);
...@@ -1158,7 +1161,8 @@ static int enable_restore_fp_context(int msa) ...@@ -1158,7 +1161,8 @@ static int enable_restore_fp_context(int msa)
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
if (!prior_msa && was_fpu_owner) { if (!prior_msa && was_fpu_owner) {
_init_msa_upper(); _init_msa_upper();
return 0;
goto out;
} }
if (!prior_msa) { if (!prior_msa) {
...@@ -1182,6 +1186,10 @@ static int enable_restore_fp_context(int msa) ...@@ -1182,6 +1186,10 @@ static int enable_restore_fp_context(int msa)
if (!was_fpu_owner) if (!was_fpu_owner)
asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
} }
out:
preempt_enable();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment