Commit b860eb8d authored by Fenghua Yu's avatar Fenghua Yu Committed by Borislav Petkov

x86/fpu/xstate: Define new functions for clearing fpregs and xstates

Currently, fpu__clear() clears all fpregs and xstates.  Once XSAVES
supervisor states are introduced, supervisor settings (e.g. CET xstates)
must remain active for signals; It is necessary to have separate functions:

- Create fpu__clear_user_states(): clear only user settings for signals;
- Create fpu__clear_all(): clear both user and supervisor settings in
   flush_thread().

Also modify copy_init_fpstate_to_fpregs() to take a mask from above two
functions.

Remove obvious side-comment in fpu__clear(), while at it.

 [ bp: Make the second argument of fpu__clear() bool after requesting it
   a bunch of times during review.
  - Add a comment about copy_init_fpstate_to_fpregs() locking needs. ]
Co-developed-by: default avatarYu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: default avatarFenghua Yu <fenghua.yu@intel.com>
Signed-off-by: default avatarYu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: default avatarTony Luck <tony.luck@intel.com>
Link: https://lkml.kernel.org/r/20200512145444.15483-6-yu-cheng.yu@intel.com
parent 71581eef
...@@ -31,7 +31,8 @@ extern void fpu__save(struct fpu *fpu); ...@@ -31,7 +31,8 @@ extern void fpu__save(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame); extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu); extern void fpu__drop(struct fpu *fpu);
extern int fpu__copy(struct task_struct *dst, struct task_struct *src); extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
extern void fpu__clear(struct fpu *fpu); extern void fpu__clear_user_states(struct fpu *fpu);
extern void fpu__clear_all(struct fpu *fpu);
extern int fpu__exception_code(struct fpu *fpu, int trap_nr); extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
......
...@@ -291,15 +291,13 @@ void fpu__drop(struct fpu *fpu) ...@@ -291,15 +291,13 @@ void fpu__drop(struct fpu *fpu)
} }
/* /*
* Clear FPU registers by setting them up from * Clear FPU registers by setting them up from the init fpstate.
* the init fpstate: * Caller must do fpregs_[un]lock() around it.
*/ */
static inline void copy_init_fpstate_to_fpregs(void) static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
{ {
fpregs_lock();
if (use_xsave()) if (use_xsave())
copy_kernel_to_xregs(&init_fpstate.xsave, -1); copy_kernel_to_xregs(&init_fpstate.xsave, features_mask);
else if (static_cpu_has(X86_FEATURE_FXSR)) else if (static_cpu_has(X86_FEATURE_FXSR))
copy_kernel_to_fxregs(&init_fpstate.fxsave); copy_kernel_to_fxregs(&init_fpstate.fxsave);
else else
...@@ -307,9 +305,6 @@ static inline void copy_init_fpstate_to_fpregs(void) ...@@ -307,9 +305,6 @@ static inline void copy_init_fpstate_to_fpregs(void)
if (boot_cpu_has(X86_FEATURE_OSPKE)) if (boot_cpu_has(X86_FEATURE_OSPKE))
copy_init_pkru_to_fpregs(); copy_init_pkru_to_fpregs();
fpregs_mark_activate();
fpregs_unlock();
} }
/* /*
...@@ -318,18 +313,40 @@ static inline void copy_init_fpstate_to_fpregs(void) ...@@ -318,18 +313,40 @@ static inline void copy_init_fpstate_to_fpregs(void)
* Called by sys_execve(), by the signal handler code and by various * Called by sys_execve(), by the signal handler code and by various
* error paths. * error paths.
*/ */
void fpu__clear(struct fpu *fpu) static void fpu__clear(struct fpu *fpu, bool user_only)
{ {
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ WARN_ON_FPU(fpu != &current->thread.fpu);
fpu__drop(fpu); if (!static_cpu_has(X86_FEATURE_FPU)) {
fpu__drop(fpu);
fpu__initialize(fpu);
return;
}
/* fpregs_lock();
* Make sure fpstate is cleared and initialized.
*/ if (user_only) {
fpu__initialize(fpu); if (!fpregs_state_valid(fpu, smp_processor_id()) &&
if (static_cpu_has(X86_FEATURE_FPU)) xfeatures_mask_supervisor())
copy_init_fpstate_to_fpregs(); copy_kernel_to_xregs(&fpu->state.xsave,
xfeatures_mask_supervisor());
copy_init_fpstate_to_fpregs(xfeatures_mask_user());
} else {
copy_init_fpstate_to_fpregs(xfeatures_mask_all);
}
fpregs_mark_activate();
fpregs_unlock();
}
void fpu__clear_user_states(struct fpu *fpu)
{
fpu__clear(fpu, true);
}
void fpu__clear_all(struct fpu *fpu)
{
fpu__clear(fpu, false);
} }
/* /*
......
...@@ -289,7 +289,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -289,7 +289,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
IS_ENABLED(CONFIG_IA32_EMULATION)); IS_ENABLED(CONFIG_IA32_EMULATION));
if (!buf) { if (!buf) {
fpu__clear(fpu); fpu__clear_user_states(fpu);
return 0; return 0;
} }
...@@ -416,7 +416,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -416,7 +416,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
err_out: err_out:
if (ret) if (ret)
fpu__clear(fpu); fpu__clear_user_states(fpu);
return ret; return ret;
} }
......
...@@ -191,7 +191,7 @@ void flush_thread(void) ...@@ -191,7 +191,7 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk); flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
fpu__clear(&tsk->thread.fpu); fpu__clear_all(&tsk->thread.fpu);
} }
void disable_TSC(void) void disable_TSC(void)
......
...@@ -732,7 +732,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -732,7 +732,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* /*
* Ensure the signal handler starts with the new fpu state. * Ensure the signal handler starts with the new fpu state.
*/ */
fpu__clear(fpu); fpu__clear_user_states(fpu);
} }
signal_setup_done(failed, ksig, stepping); signal_setup_done(failed, ksig, stepping);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment