Commit c20942ce authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/fpu/core: Convert to fpstate

Convert the rest of the core code to the new register storage mechanism in
preparation for dynamically sized buffers.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20211013145322.659456185@linutronix.de
parent 7e049e8b
...@@ -50,9 +50,9 @@ static inline void kernel_fpu_begin(void) ...@@ -50,9 +50,9 @@ static inline void kernel_fpu_begin(void)
} }
/* /*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state. * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
* A context switch will (and softirq might) save CPU's FPU registers to * A context switch will (and softirq might) save CPU's FPU registers to
* fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
* a random state. * a random state.
* *
* local_bh_disable() protects against both preemption and soft interrupts * local_bh_disable() protects against both preemption and soft interrupts
......
...@@ -89,7 +89,7 @@ bool irq_fpu_usable(void) ...@@ -89,7 +89,7 @@ bool irq_fpu_usable(void)
EXPORT_SYMBOL(irq_fpu_usable); EXPORT_SYMBOL(irq_fpu_usable);
/* /*
* Save the FPU register state in fpu->state. The register state is * Save the FPU register state in fpu->fpstate->regs. The register state is
* preserved. * preserved.
* *
* Must be called with fpregs_lock() held. * Must be called with fpregs_lock() held.
...@@ -105,19 +105,19 @@ EXPORT_SYMBOL(irq_fpu_usable); ...@@ -105,19 +105,19 @@ EXPORT_SYMBOL(irq_fpu_usable);
void save_fpregs_to_fpstate(struct fpu *fpu) void save_fpregs_to_fpstate(struct fpu *fpu)
{ {
if (likely(use_xsave())) { if (likely(use_xsave())) {
os_xsave(&fpu->state.xsave); os_xsave(&fpu->fpstate->regs.xsave);
/* /*
* AVX512 state is tracked here because its use is * AVX512 state is tracked here because its use is
* known to slow the max clock speed of the core. * known to slow the max clock speed of the core.
*/ */
if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512) if (fpu->fpstate->regs.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
fpu->avx512_timestamp = jiffies; fpu->avx512_timestamp = jiffies;
return; return;
} }
if (likely(use_fxsr())) { if (likely(use_fxsr())) {
fxsave(&fpu->state.fxsave); fxsave(&fpu->fpstate->regs.fxsave);
return; return;
} }
...@@ -125,8 +125,8 @@ void save_fpregs_to_fpstate(struct fpu *fpu) ...@@ -125,8 +125,8 @@ void save_fpregs_to_fpstate(struct fpu *fpu)
* Legacy FPU register saving, FNSAVE always clears FPU registers, * Legacy FPU register saving, FNSAVE always clears FPU registers,
* so we have to reload them from the memory state. * so we have to reload them from the memory state.
*/ */
asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
frstor(&fpu->state.fsave); frstor(&fpu->fpstate->regs.fsave);
} }
void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask) void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
...@@ -167,7 +167,8 @@ void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask) ...@@ -167,7 +167,8 @@ void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask)
if (save) { if (save) {
if (test_thread_flag(TIF_NEED_FPU_LOAD)) { if (test_thread_flag(TIF_NEED_FPU_LOAD)) {
memcpy(&save->state, &current->thread.fpu.state, memcpy(&save->fpstate->regs,
&current->thread.fpu.fpstate->regs,
fpu_kernel_xstate_size); fpu_kernel_xstate_size);
} else { } else {
save_fpregs_to_fpstate(save); save_fpregs_to_fpstate(save);
...@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu); ...@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
void fpu_copy_fpstate_to_kvm_uabi(struct fpu *fpu, void *buf, void fpu_copy_fpstate_to_kvm_uabi(struct fpu *fpu, void *buf,
unsigned int size, u32 pkru) unsigned int size, u32 pkru)
{ {
union fpregs_state *kstate = &fpu->state; union fpregs_state *kstate = &fpu->fpstate->regs;
union fpregs_state *ustate = buf; union fpregs_state *ustate = buf;
struct membuf mb = { .p = buf, .left = size }; struct membuf mb = { .p = buf, .left = size };
...@@ -205,7 +206,7 @@ EXPORT_SYMBOL_GPL(fpu_copy_fpstate_to_kvm_uabi); ...@@ -205,7 +206,7 @@ EXPORT_SYMBOL_GPL(fpu_copy_fpstate_to_kvm_uabi);
int fpu_copy_kvm_uabi_to_fpstate(struct fpu *fpu, const void *buf, u64 xcr0, int fpu_copy_kvm_uabi_to_fpstate(struct fpu *fpu, const void *buf, u64 xcr0,
u32 *vpkru) u32 *vpkru)
{ {
union fpregs_state *kstate = &fpu->state; union fpregs_state *kstate = &fpu->fpstate->regs;
const union fpregs_state *ustate = buf; const union fpregs_state *ustate = buf;
struct pkru_state *xpkru; struct pkru_state *xpkru;
int ret; int ret;
...@@ -378,7 +379,7 @@ int fpu_clone(struct task_struct *dst) ...@@ -378,7 +379,7 @@ int fpu_clone(struct task_struct *dst)
*/ */
if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) { if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
/* Clear out the minimal state */ /* Clear out the minimal state */
memcpy(&dst_fpu->state, &init_fpstate.regs, memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
init_fpstate_copy_size()); init_fpstate_copy_size());
return 0; return 0;
} }
...@@ -389,11 +390,12 @@ int fpu_clone(struct task_struct *dst) ...@@ -389,11 +390,12 @@ int fpu_clone(struct task_struct *dst)
* child's FPU context, without any memory-to-memory copying. * child's FPU context, without any memory-to-memory copying.
*/ */
fpregs_lock(); fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD)) if (test_thread_flag(TIF_NEED_FPU_LOAD)) {
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size); memcpy(&dst_fpu->fpstate->regs, &src_fpu->fpstate->regs,
fpu_kernel_xstate_size);
else } else {
save_fpregs_to_fpstate(dst_fpu); save_fpregs_to_fpstate(dst_fpu);
}
fpregs_unlock(); fpregs_unlock();
trace_x86_fpu_copy_src(src_fpu); trace_x86_fpu_copy_src(src_fpu);
...@@ -466,7 +468,7 @@ static void fpu_reset_fpstate(void) ...@@ -466,7 +468,7 @@ static void fpu_reset_fpstate(void)
* user space as PKRU is eagerly written in switch_to() and * user space as PKRU is eagerly written in switch_to() and
* flush_thread(). * flush_thread().
*/ */
memcpy(&fpu->state, &init_fpstate.regs, init_fpstate_copy_size()); memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
set_thread_flag(TIF_NEED_FPU_LOAD); set_thread_flag(TIF_NEED_FPU_LOAD);
fpregs_unlock(); fpregs_unlock();
} }
...@@ -493,7 +495,7 @@ void fpu__clear_user_states(struct fpu *fpu) ...@@ -493,7 +495,7 @@ void fpu__clear_user_states(struct fpu *fpu)
*/ */
if (xfeatures_mask_supervisor() && if (xfeatures_mask_supervisor() &&
!fpregs_state_valid(fpu, smp_processor_id())) { !fpregs_state_valid(fpu, smp_processor_id())) {
os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor()); os_xrstor(&fpu->fpstate->regs.xsave, xfeatures_mask_supervisor());
} }
/* Reset user states in registers. */ /* Reset user states in registers. */
...@@ -574,11 +576,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) ...@@ -574,11 +576,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
* fully reproduce the context of the exception. * fully reproduce the context of the exception.
*/ */
if (boot_cpu_has(X86_FEATURE_FXSR)) { if (boot_cpu_has(X86_FEATURE_FXSR)) {
cwd = fpu->state.fxsave.cwd; cwd = fpu->fpstate->regs.fxsave.cwd;
swd = fpu->state.fxsave.swd; swd = fpu->fpstate->regs.fxsave.swd;
} else { } else {
cwd = (unsigned short)fpu->state.fsave.cwd; cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
swd = (unsigned short)fpu->state.fsave.swd; swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
} }
err = swd & ~cwd; err = swd & ~cwd;
...@@ -592,7 +594,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) ...@@ -592,7 +594,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
unsigned short mxcsr = MXCSR_DEFAULT; unsigned short mxcsr = MXCSR_DEFAULT;
if (boot_cpu_has(X86_FEATURE_XMM)) if (boot_cpu_has(X86_FEATURE_XMM))
mxcsr = fpu->state.fxsave.mxcsr; mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
err = ~(mxcsr >> 7) & mxcsr; err = ~(mxcsr >> 7) & mxcsr;
} }
......
...@@ -38,7 +38,7 @@ static void fpu__init_cpu_generic(void) ...@@ -38,7 +38,7 @@ static void fpu__init_cpu_generic(void)
/* Flush out any pending x87 state: */ /* Flush out any pending x87 state: */
#ifdef CONFIG_MATH_EMULATION #ifdef CONFIG_MATH_EMULATION
if (!boot_cpu_has(X86_FEATURE_FPU)) if (!boot_cpu_has(X86_FEATURE_FPU))
fpstate_init_soft(&current->thread.fpu.state.soft); fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
else else
#endif #endif
asm volatile ("fninit"); asm volatile ("fninit");
......
...@@ -1094,7 +1094,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave, ...@@ -1094,7 +1094,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave,
void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode copy_mode) enum xstate_copy_mode copy_mode)
{ {
__copy_xstate_to_uabi_buf(to, &tsk->thread.fpu.state.xsave, __copy_xstate_to_uabi_buf(to, &tsk->thread.fpu.fpstate->regs.xsave,
tsk->thread.pkru, copy_mode); tsk->thread.pkru, copy_mode);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment