Commit c2ff9e9a authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Borislav Petkov

x86/fpu: Merge the two code paths in __fpu__restore_sig()

The ia32_fxstate case (32bit with fxsr) and the other (64bit frames or
32bit frames without fxsr) restore both from kernel memory and sanitize
the content.

The !ia32_fxstate version restores missing xstates from "init state"
while the ia32_fxstate doesn't and skips it.

Merge the two code paths and keep the !ia32_fxstate one. Copy only the
user_i387_ia32_struct data structure in the ia32_fxstate.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-23-bigeasy@linutronix.de
parent 926b21f3
...@@ -263,12 +263,17 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_ ...@@ -263,12 +263,17 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{ {
struct user_i387_ia32_struct *envp = NULL;
int state_size = fpu_kernel_xstate_size;
int ia32_fxstate = (buf != buf_fx); int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
int state_size = fpu_kernel_xstate_size; struct user_i387_ia32_struct env;
union fpregs_state *state;
u64 xfeatures = 0; u64 xfeatures = 0;
int fx_only = 0; int fx_only = 0;
int ret = 0;
void *tmp;
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION)); IS_ENABLED(CONFIG_IA32_EMULATION));
...@@ -303,56 +308,24 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -303,56 +308,24 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} }
} }
if (ia32_fxstate) {
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
* header. Validate and sanitize the copied state.
*/
struct user_i387_ia32_struct env;
union fpregs_state *state;
int err = 0;
void *tmp;
tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL); tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
state = PTR_ALIGN(tmp, 64); state = PTR_ALIGN(tmp, 64);
if (using_compacted_format()) { if ((unsigned long)buf_fx % 64)
err = copy_user_to_xstate(&state->xsave, buf_fx); fx_only = 1;
} else {
err = __copy_from_user(&state->xsave, buf_fx, state_size);
if (!err && state_size > offsetof(struct xregs_state, header))
err = validate_xstate_header(&state->xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
err = -1;
} else {
sanitize_restored_xstate(state, &env, xfeatures, fx_only);
copy_kernel_to_fpregs(state);
}
kfree(tmp);
return err;
} else {
union fpregs_state *state;
void *tmp;
int ret;
tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
state = PTR_ALIGN(tmp, 64);
/* /*
* For 64-bit frames and 32-bit fsave frames, restore the user * For 32-bit frames with fxstate, copy the fxstate so it can be
* state to the registers directly (with exceptions handled). * reconstructed later.
*/ */
if ((unsigned long)buf_fx % 64) if (ia32_fxstate) {
fx_only = 1; ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
goto err_out;
envp = &env;
}
if (use_xsave() && !fx_only) { if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask & ~xfeatures; u64 init_bv = xfeatures_mask & ~xfeatures;
...@@ -368,7 +341,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -368,7 +341,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (ret) if (ret)
goto err_out; goto err_out;
sanitize_restored_xstate(state, NULL, xfeatures, fx_only); sanitize_restored_xstate(state, envp, xfeatures, fx_only);
if (unlikely(init_bv)) if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
...@@ -379,11 +352,11 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -379,11 +352,11 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (ret) if (ret)
goto err_out; goto err_out;
sanitize_restored_xstate(state, envp, xfeatures, fx_only);
if (use_xsave()) { if (use_xsave()) {
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
} }
state->fxsave.mxcsr &= mxcsr_feature_mask;
ret = copy_kernel_to_fxregs_err(&state->fxsave); ret = copy_kernel_to_fxregs_err(&state->fxsave);
} else { } else {
...@@ -395,13 +368,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -395,13 +368,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
err_out: err_out:
kfree(tmp); kfree(tmp);
if (ret) { if (ret)
fpu__clear(fpu); fpu__clear(fpu);
return -1; return ret;
}
}
return 0;
} }
static inline int xstate_sigframe_size(void) static inline int xstate_sigframe_size(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment