Commit 7e049e8b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/fpu/signal: Convert to fpstate

Convert signal related code to the new register storage mechanism in
preparation for dynamically sized buffers.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20211013145322.607370221@linutronix.de
parent caee31a3
...@@ -72,13 +72,13 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf, ...@@ -72,13 +72,13 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf) static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf)
{ {
if (use_fxsr()) { if (use_fxsr()) {
struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave;
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
struct _fpstate_32 __user *fp = buf; struct _fpstate_32 __user *fp = buf;
fpregs_lock(); fpregs_lock();
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) if (!test_thread_flag(TIF_NEED_FPU_LOAD))
fxsave(&tsk->thread.fpu.state.fxsave); fxsave(&tsk->thread.fpu.fpstate->regs.fxsave);
fpregs_unlock(); fpregs_unlock();
convert_from_fxsr(&env, tsk); convert_from_fxsr(&env, tsk);
...@@ -303,7 +303,7 @@ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, ...@@ -303,7 +303,7 @@ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
* been restored from a user buffer directly. * been restored from a user buffer directly.
*/ */
if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor()) if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor()); os_xrstor(&fpu->fpstate->regs.xsave, xfeatures_mask_supervisor());
fpregs_mark_activate(); fpregs_mark_activate();
fpregs_unlock(); fpregs_unlock();
...@@ -317,6 +317,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -317,6 +317,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
union fpregs_state *fpregs;
u64 user_xfeatures = 0; u64 user_xfeatures = 0;
bool fx_only = false; bool fx_only = false;
bool success; bool success;
...@@ -349,6 +350,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -349,6 +350,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
if (__copy_from_user(&env, buf, sizeof(env))) if (__copy_from_user(&env, buf, sizeof(env)))
return false; return false;
fpregs = &fpu->fpstate->regs;
/* /*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
* not modified on context switch and that the xstate is considered * not modified on context switch and that the xstate is considered
...@@ -366,7 +368,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -366,7 +368,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* the right place in memory. It's ia32 mode. Shrug. * the right place in memory. It's ia32 mode. Shrug.
*/ */
if (xfeatures_mask_supervisor()) if (xfeatures_mask_supervisor())
os_xsave(&fpu->state.xsave); os_xsave(&fpregs->xsave);
set_thread_flag(TIF_NEED_FPU_LOAD); set_thread_flag(TIF_NEED_FPU_LOAD);
} }
__fpu_invalidate_fpregs_state(fpu); __fpu_invalidate_fpregs_state(fpu);
...@@ -374,29 +376,29 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -374,29 +376,29 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
fpregs_unlock(); fpregs_unlock();
if (use_xsave() && !fx_only) { if (use_xsave() && !fx_only) {
if (copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx)) if (copy_sigframe_from_user_to_xstate(&fpregs->xsave, buf_fx))
return false; return false;
} else { } else {
if (__copy_from_user(&fpu->state.fxsave, buf_fx, if (__copy_from_user(&fpregs->fxsave, buf_fx,
sizeof(fpu->state.fxsave))) sizeof(fpregs->fxsave)))
return false; return false;
if (IS_ENABLED(CONFIG_X86_64)) { if (IS_ENABLED(CONFIG_X86_64)) {
/* Reject invalid MXCSR values. */ /* Reject invalid MXCSR values. */
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask) if (fpregs->fxsave.mxcsr & ~mxcsr_feature_mask)
return false; return false;
} else { } else {
/* Mask invalid bits out for historical reasons (broken hardware). */ /* Mask invalid bits out for historical reasons (broken hardware). */
fpu->state.fxsave.mxcsr &= mxcsr_feature_mask; fpregs->fxsave.mxcsr &= mxcsr_feature_mask;
} }
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */ /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave()) if (use_xsave())
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; fpregs->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
} }
/* Fold the legacy FP storage */ /* Fold the legacy FP storage */
convert_to_fxsr(&fpu->state.fxsave, &env); convert_to_fxsr(&fpregs->fxsave, &env);
fpregs_lock(); fpregs_lock();
if (use_xsave()) { if (use_xsave()) {
...@@ -411,10 +413,10 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -411,10 +413,10 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
*/ */
u64 mask = user_xfeatures | xfeatures_mask_supervisor(); u64 mask = user_xfeatures | xfeatures_mask_supervisor();
fpu->state.xsave.header.xfeatures &= mask; fpregs->xsave.header.xfeatures &= mask;
success = !os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all); success = !os_xrstor_safe(&fpregs->xsave, xfeatures_mask_all);
} else { } else {
success = !fxrstor_safe(&fpu->state.fxsave); success = !fxrstor_safe(&fpregs->fxsave);
} }
if (likely(success)) if (likely(success))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment