Commit 7366ed77 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Simplify FPU handling by embedding the fpstate in task_struct (again)

So 6 years ago we made the FPU fpstate dynamically allocated:

  aa283f49 ("x86, fpu: lazy allocation of FPU area - v5")
  61c4628b ("x86, fpu: split FPU state from task struct - v5")

In hindsight this was a mistake:

   - it complicated context allocation failure handling, such as:

		/* kthread execs. TODO: cleanup this horror. */
		if (WARN_ON(fpstate_alloc_init(fpu)))
			force_sig(SIGKILL, tsk);

   - it caused us to enable irqs in fpu__restore():

                local_irq_enable();
                /*
                 * does a slab alloc which can sleep
                 */
                if (fpstate_alloc_init(fpu)) {
                        /*
                         * ran out of memory!
                         */
                        do_group_exit(SIGKILL);
                        return;
                }
                local_irq_disable();

   - it (slightly) slowed down task creation/destruction by adding
     slab allocation/free pattens.

   - it made access to context contents (slightly) slower by adding
     one more pointer dereference.

The motivation for the dynamic allocation was two-fold:

   - reduce memory consumption by non-FPU tasks

   - allocate and handle only the necessary amount of context for
     various XSAVE processors that have varying hardware frame
     sizes.

These days, with glibc using SSE memcpy by default and GCC optimizing
for SSE/AVX by default, the scope of FPU using apps on an x86 system is
much larger than it was 6 years ago.

For example on a freshly installed Fedora 21 desktop system, with a
recent kernel, all non-kthread tasks have used the FPU shortly after
bootup.

Also, even modern embedded x86 CPUs try to support the latest vector
instruction set - so they'll too often use the larger xstate frame
sizes.

So remove the dynamic allocation complication by embedding the FPU
fpstate in task_struct again. This should make the FPU a lot more
accessible to all sorts of atomic contexts.

We could still optimize for the xstate frame size in the future,
by moving the state structure to the last element of task_struct,
and allocating only a part of that.

This change is kept minimal by still keeping the ctx_alloc()/free()
routines (that now do nothing substantial) - we'll remove them in
the following patches.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1bc6b056
...@@ -232,9 +232,9 @@ static inline int frstor_user(struct i387_fsave_struct __user *fx) ...@@ -232,9 +232,9 @@ static inline int frstor_user(struct i387_fsave_struct __user *fx)
static inline void fpu_fxsave(struct fpu *fpu) static inline void fpu_fxsave(struct fpu *fpu)
{ {
if (config_enabled(CONFIG_X86_32)) if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ)) else if (config_enabled(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave)); asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
else { else {
/* Using "rex64; fxsave %0" is broken because, if the memory /* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second * operand uses any extended registers for addressing, a second
...@@ -251,15 +251,15 @@ static inline void fpu_fxsave(struct fpu *fpu) ...@@ -251,15 +251,15 @@ static inline void fpu_fxsave(struct fpu *fpu)
* an extended register is needed for addressing (fix submitted * an extended register is needed for addressing (fix submitted
* to mainline 2005-11-21). * to mainline 2005-11-21).
* *
* asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
* *
* This, however, we can work around by forcing the compiler to * This, however, we can work around by forcing the compiler to
* select an addressing mode that doesn't require extended * select an addressing mode that doesn't require extended
* registers. * registers.
*/ */
asm volatile( "rex64/fxsave (%[fx])" asm volatile( "rex64/fxsave (%[fx])"
: "=m" (fpu->state->fxsave) : "=m" (fpu->state.fxsave)
: [fx] "R" (&fpu->state->fxsave)); : [fx] "R" (&fpu->state.fxsave));
} }
} }
...@@ -276,7 +276,7 @@ static inline void fpu_fxsave(struct fpu *fpu) ...@@ -276,7 +276,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
static inline int copy_fpregs_to_fpstate(struct fpu *fpu) static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
{ {
if (likely(use_xsave())) { if (likely(use_xsave())) {
xsave_state(&fpu->state->xsave); xsave_state(&fpu->state.xsave);
return 1; return 1;
} }
...@@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) ...@@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
* Legacy FPU register saving, FNSAVE always clears FPU registers, * Legacy FPU register saving, FNSAVE always clears FPU registers,
* so we have to mark them inactive: * so we have to mark them inactive:
*/ */
asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state->fsave)); asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state.fsave));
return 0; return 0;
} }
...@@ -299,11 +299,11 @@ extern void fpu__save(struct fpu *fpu); ...@@ -299,11 +299,11 @@ extern void fpu__save(struct fpu *fpu);
static inline int fpu_restore_checking(struct fpu *fpu) static inline int fpu_restore_checking(struct fpu *fpu)
{ {
if (use_xsave()) if (use_xsave())
return fpu_xrstor_checking(&fpu->state->xsave); return fpu_xrstor_checking(&fpu->state.xsave);
else if (use_fxsr()) else if (use_fxsr())
return fxrstor_checking(&fpu->state->fxsave); return fxrstor_checking(&fpu->state.fxsave);
else else
return frstor_checking(&fpu->state->fsave); return frstor_checking(&fpu->state.fsave);
} }
static inline int restore_fpu_checking(struct fpu *fpu) static inline int restore_fpu_checking(struct fpu *fpu)
...@@ -454,7 +454,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -454,7 +454,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
if (fpu.preload) { if (fpu.preload) {
new_fpu->counter++; new_fpu->counter++;
__fpregs_activate(new_fpu); __fpregs_activate(new_fpu);
prefetch(new_fpu->state); prefetch(&new_fpu->state);
} else if (!use_eager_fpu()) } else if (!use_eager_fpu())
stts(); stts();
} else { } else {
...@@ -465,7 +465,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -465,7 +465,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
if (fpu_want_lazy_restore(new_fpu, cpu)) if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0; fpu.preload = 0;
else else
prefetch(new_fpu->state); prefetch(&new_fpu->state);
fpregs_activate(new_fpu); fpregs_activate(new_fpu);
} }
} }
...@@ -534,25 +534,25 @@ static inline void user_fpu_begin(void) ...@@ -534,25 +534,25 @@ static inline void user_fpu_begin(void)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk) static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{ {
if (cpu_has_fxsr) { if (cpu_has_fxsr) {
return tsk->thread.fpu.state->fxsave.cwd; return tsk->thread.fpu.state.fxsave.cwd;
} else { } else {
return (unsigned short)tsk->thread.fpu.state->fsave.cwd; return (unsigned short)tsk->thread.fpu.state.fsave.cwd;
} }
} }
static inline unsigned short get_fpu_swd(struct task_struct *tsk) static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{ {
if (cpu_has_fxsr) { if (cpu_has_fxsr) {
return tsk->thread.fpu.state->fxsave.swd; return tsk->thread.fpu.state.fxsave.swd;
} else { } else {
return (unsigned short)tsk->thread.fpu.state->fsave.swd; return (unsigned short)tsk->thread.fpu.state.fsave.swd;
} }
} }
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{ {
if (cpu_has_xmm) { if (cpu_has_xmm) {
return tsk->thread.fpu.state->fxsave.mxcsr; return tsk->thread.fpu.state.fxsave.mxcsr;
} else { } else {
return MXCSR_DEFAULT; return MXCSR_DEFAULT;
} }
......
...@@ -143,7 +143,7 @@ struct fpu { ...@@ -143,7 +143,7 @@ struct fpu {
unsigned int last_cpu; unsigned int last_cpu;
unsigned int fpregs_active; unsigned int fpregs_active;
union thread_xstate *state; union thread_xstate state;
/* /*
* This counter contains the number of consecutive context switches * This counter contains the number of consecutive context switches
* during which the FPU stays used. If this is over a threshold, the * during which the FPU stays used. If this is over a threshold, the
......
...@@ -174,9 +174,9 @@ static void __save_fpu(struct fpu *fpu) ...@@ -174,9 +174,9 @@ static void __save_fpu(struct fpu *fpu)
{ {
if (use_xsave()) { if (use_xsave()) {
if (unlikely(system_state == SYSTEM_BOOTING)) if (unlikely(system_state == SYSTEM_BOOTING))
xsave_state_booting(&fpu->state->xsave); xsave_state_booting(&fpu->state.xsave);
else else
xsave_state(&fpu->state->xsave); xsave_state(&fpu->state.xsave);
} else { } else {
fpu_fxsave(fpu); fpu_fxsave(fpu);
} }
...@@ -207,16 +207,16 @@ EXPORT_SYMBOL_GPL(fpu__save); ...@@ -207,16 +207,16 @@ EXPORT_SYMBOL_GPL(fpu__save);
void fpstate_init(struct fpu *fpu) void fpstate_init(struct fpu *fpu)
{ {
if (!cpu_has_fpu) { if (!cpu_has_fpu) {
finit_soft_fpu(&fpu->state->soft); finit_soft_fpu(&fpu->state.soft);
return; return;
} }
memset(fpu->state, 0, xstate_size); memset(&fpu->state, 0, xstate_size);
if (cpu_has_fxsr) { if (cpu_has_fxsr) {
fx_finit(&fpu->state->fxsave); fx_finit(&fpu->state.fxsave);
} else { } else {
struct i387_fsave_struct *fp = &fpu->state->fsave; struct i387_fsave_struct *fp = &fpu->state.fsave;
fp->cwd = 0xffff037fu; fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u; fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu; fp->twd = 0xffffffffu;
...@@ -241,15 +241,8 @@ void fpstate_cache_init(void) ...@@ -241,15 +241,8 @@ void fpstate_cache_init(void)
int fpstate_alloc(struct fpu *fpu) int fpstate_alloc(struct fpu *fpu)
{ {
if (fpu->state)
return 0;
fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
if (!fpu->state)
return -ENOMEM;
/* The CPU requires the FPU state to be aligned to 16 byte boundaries: */ /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
WARN_ON((unsigned long)fpu->state & 15); WARN_ON((unsigned long)&fpu->state & 15);
return 0; return 0;
} }
...@@ -257,10 +250,6 @@ EXPORT_SYMBOL_GPL(fpstate_alloc); ...@@ -257,10 +250,6 @@ EXPORT_SYMBOL_GPL(fpstate_alloc);
void fpstate_free(struct fpu *fpu) void fpstate_free(struct fpu *fpu)
{ {
if (fpu->state) {
kmem_cache_free(task_xstate_cachep, fpu->state);
fpu->state = NULL;
}
} }
EXPORT_SYMBOL_GPL(fpstate_free); EXPORT_SYMBOL_GPL(fpstate_free);
...@@ -277,11 +266,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -277,11 +266,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
WARN_ON(src_fpu != &current->thread.fpu); WARN_ON(src_fpu != &current->thread.fpu);
if (use_eager_fpu()) { if (use_eager_fpu()) {
memset(&dst_fpu->state->xsave, 0, xstate_size); memset(&dst_fpu->state.xsave, 0, xstate_size);
__save_fpu(dst_fpu); __save_fpu(dst_fpu);
} else { } else {
fpu__save(src_fpu); fpu__save(src_fpu);
memcpy(dst_fpu->state, src_fpu->state, xstate_size); memcpy(&dst_fpu->state, &src_fpu->state, xstate_size);
} }
} }
...@@ -289,7 +278,6 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -289,7 +278,6 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{ {
dst_fpu->counter = 0; dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0; dst_fpu->fpregs_active = 0;
dst_fpu->state = NULL;
dst_fpu->last_cpu = -1; dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active) { if (src_fpu->fpstate_active) {
...@@ -483,7 +471,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, ...@@ -483,7 +471,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
sanitize_i387_state(target); sanitize_i387_state(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state->fxsave, 0, -1); &fpu->state.fxsave, 0, -1);
} }
int xfpregs_set(struct task_struct *target, const struct user_regset *regset, int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
...@@ -503,19 +491,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -503,19 +491,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
sanitize_i387_state(target); sanitize_i387_state(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state->fxsave, 0, -1); &fpu->state.fxsave, 0, -1);
/* /*
* mxcsr reserved bits must be masked to zero for security reasons. * mxcsr reserved bits must be masked to zero for security reasons.
*/ */
fpu->state->fxsave.mxcsr &= mxcsr_feature_mask; fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
/* /*
* update the header bits in the xsave header, indicating the * update the header bits in the xsave header, indicating the
* presence of FP and SSE state. * presence of FP and SSE state.
*/ */
if (cpu_has_xsave) if (cpu_has_xsave)
fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE; fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
return ret; return ret;
} }
...@@ -535,7 +523,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, ...@@ -535,7 +523,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (ret) if (ret)
return ret; return ret;
xsave = &fpu->state->xsave; xsave = &fpu->state.xsave;
/* /*
* Copy the 48bytes defined by the software first into the xstate * Copy the 48bytes defined by the software first into the xstate
...@@ -566,7 +554,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -566,7 +554,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (ret) if (ret)
return ret; return ret;
xsave = &fpu->state->xsave; xsave = &fpu->state.xsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/* /*
...@@ -657,7 +645,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) ...@@ -657,7 +645,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
void void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{ {
struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i; int i;
...@@ -695,7 +683,7 @@ void convert_to_fxsr(struct task_struct *tsk, ...@@ -695,7 +683,7 @@ void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env) const struct user_i387_ia32_struct *env)
{ {
struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i; int i;
...@@ -736,7 +724,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, ...@@ -736,7 +724,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) if (!cpu_has_fxsr)
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state->fsave, 0, &fpu->state.fsave, 0,
-1); -1);
sanitize_i387_state(target); sanitize_i387_state(target);
...@@ -770,7 +758,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -770,7 +758,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) if (!cpu_has_fxsr)
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state->fsave, 0, &fpu->state.fsave, 0,
-1); -1);
if (pos > 0 || count < sizeof(env)) if (pos > 0 || count < sizeof(env))
...@@ -785,7 +773,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -785,7 +773,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP. * presence of FP.
*/ */
if (cpu_has_xsave) if (cpu_has_xsave)
fpu->state->xsave.header.xfeatures |= XSTATE_FP; fpu->state.xsave.header.xfeatures |= XSTATE_FP;
return ret; return ret;
} }
......
...@@ -44,14 +44,14 @@ static unsigned int xfeatures_nr; ...@@ -44,14 +44,14 @@ static unsigned int xfeatures_nr;
*/ */
void __sanitize_i387_state(struct task_struct *tsk) void __sanitize_i387_state(struct task_struct *tsk)
{ {
struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; struct i387_fxsave_struct *fx = &tsk->thread.fpu.state.fxsave;
int feature_bit; int feature_bit;
u64 xfeatures; u64 xfeatures;
if (!fx) if (!fx)
return; return;
xfeatures = tsk->thread.fpu.state->xsave.header.xfeatures; xfeatures = tsk->thread.fpu.state.xsave.header.xfeatures;
/* /*
* None of the feature bits are in init state. So nothing else * None of the feature bits are in init state. So nothing else
...@@ -147,7 +147,7 @@ static inline int check_for_xstate(struct i387_fxsave_struct __user *buf, ...@@ -147,7 +147,7 @@ static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{ {
if (use_fxsr()) { if (use_fxsr()) {
struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
struct _fpstate_ia32 __user *fp = buf; struct _fpstate_ia32 __user *fp = buf;
...@@ -245,7 +245,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf) ...@@ -245,7 +245,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
*/ */
int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{ {
struct xsave_struct *xsave = &current->thread.fpu.state->xsave; struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
struct task_struct *tsk = current; struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx); int ia32_fxstate = (buf != buf_fx);
...@@ -288,7 +288,7 @@ sanitize_restored_xstate(struct task_struct *tsk, ...@@ -288,7 +288,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
struct user_i387_ia32_struct *ia32_env, struct user_i387_ia32_struct *ia32_env,
u64 xfeatures, int fx_only) u64 xfeatures, int fx_only)
{ {
struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
struct xstate_header *header = &xsave->header; struct xstate_header *header = &xsave->header;
if (use_xsave()) { if (use_xsave()) {
...@@ -402,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -402,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
*/ */
drop_fpu(fpu); drop_fpu(fpu);
if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
__copy_from_user(&env, buf, sizeof(env))) { __copy_from_user(&env, buf, sizeof(env))) {
fpstate_init(fpu); fpstate_init(fpu);
err = -1; err = -1;
......
...@@ -396,7 +396,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) ...@@ -396,7 +396,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
* do an xsave and then pull it out of the xsave buffer. * do an xsave and then pull it out of the xsave buffer.
*/ */
copy_fpregs_to_fpstate(&tsk->thread.fpu); copy_fpregs_to_fpstate(&tsk->thread.fpu);
xsave_buf = &(tsk->thread.fpu.state->xsave); xsave_buf = &(tsk->thread.fpu.state.xsave);
bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
if (!bndcsr) if (!bndcsr)
goto exit_trap; goto exit_trap;
......
...@@ -3196,7 +3196,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ...@@ -3196,7 +3196,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{ {
struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state.xsave;
u64 xstate_bv = xsave->header.xfeatures; u64 xstate_bv = xsave->header.xfeatures;
u64 valid; u64 valid;
...@@ -3232,7 +3232,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) ...@@ -3232,7 +3232,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{ {
struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state.xsave;
u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
u64 valid; u64 valid;
...@@ -3277,7 +3277,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, ...@@ -3277,7 +3277,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
fill_xsave((u8 *) guest_xsave->region, vcpu); fill_xsave((u8 *) guest_xsave->region, vcpu);
} else { } else {
memcpy(guest_xsave->region, memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave, &vcpu->arch.guest_fpu.state.fxsave,
sizeof(struct i387_fxsave_struct)); sizeof(struct i387_fxsave_struct));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XSTATE_FPSSE; XSTATE_FPSSE;
...@@ -3302,7 +3302,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, ...@@ -3302,7 +3302,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
} else { } else {
if (xstate_bv & ~XSTATE_FPSSE) if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL; return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->fxsave, memcpy(&vcpu->arch.guest_fpu.state.fxsave,
guest_xsave->region, sizeof(struct i387_fxsave_struct)); guest_xsave->region, sizeof(struct i387_fxsave_struct));
} }
return 0; return 0;
...@@ -6973,7 +6973,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -6973,7 +6973,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct i387_fxsave_struct *fxsave = struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave; &vcpu->arch.guest_fpu.state.fxsave;
memcpy(fpu->fpr, fxsave->st_space, 128); memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd; fpu->fcw = fxsave->cwd;
...@@ -6990,7 +6990,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) ...@@ -6990,7 +6990,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct i387_fxsave_struct *fxsave = struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave; &vcpu->arch.guest_fpu.state.fxsave;
memcpy(fxsave->st_space, fpu->fpr, 128); memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw; fxsave->cwd = fpu->fcw;
...@@ -7014,7 +7014,7 @@ int fx_init(struct kvm_vcpu *vcpu) ...@@ -7014,7 +7014,7 @@ int fx_init(struct kvm_vcpu *vcpu)
fpstate_init(&vcpu->arch.guest_fpu); fpstate_init(&vcpu->arch.guest_fpu);
if (cpu_has_xsaves) if (cpu_has_xsaves)
vcpu->arch.guest_fpu.state->xsave.header.xcomp_bv = vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
host_xcr0 | XSTATE_COMPACTION_ENABLED; host_xcr0 | XSTATE_COMPACTION_ENABLED;
/* /*
......
...@@ -52,7 +52,7 @@ void finit_soft_fpu(struct i387_soft_struct *soft) ...@@ -52,7 +52,7 @@ void finit_soft_fpu(struct i387_soft_struct *soft)
void finit(void) void finit(void)
{ {
finit_soft_fpu(&current->thread.fpu.state->soft); finit_soft_fpu(&current->thread.fpu.state.soft);
} }
/* /*
......
...@@ -683,7 +683,7 @@ int fpregs_soft_set(struct task_struct *target, ...@@ -683,7 +683,7 @@ int fpregs_soft_set(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
struct i387_soft_struct *s387 = &target->thread.fpu.state->soft; struct i387_soft_struct *s387 = &target->thread.fpu.state.soft;
void *space = s387->st_space; void *space = s387->st_space;
int ret; int ret;
int offset, other, i, tags, regnr, tag, newtop; int offset, other, i, tags, regnr, tag, newtop;
...@@ -735,7 +735,7 @@ int fpregs_soft_get(struct task_struct *target, ...@@ -735,7 +735,7 @@ int fpregs_soft_get(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
struct i387_soft_struct *s387 = &target->thread.fpu.state->soft; struct i387_soft_struct *s387 = &target->thread.fpu.state.soft;
const void *space = s387->st_space; const void *space = s387->st_space;
int ret; int ret;
int offset = (S387->ftop & 7) * 10, other = 80 - offset; int offset = (S387->ftop & 7) * 10, other = 80 - offset;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \ #define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
== (1 << 10)) == (1 << 10))
#define I387 (current->thread.fpu.state) #define I387 (&current->thread.fpu.state)
#define FPU_info (I387->soft.info) #define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
......
...@@ -358,7 +358,7 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk) ...@@ -358,7 +358,7 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
* only accessible if we first do an xsave. * only accessible if we first do an xsave.
*/ */
copy_fpregs_to_fpstate(&tsk->thread.fpu); copy_fpregs_to_fpstate(&tsk->thread.fpu);
bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR); bndcsr = get_xsave_addr(&tsk->thread.fpu.state.xsave, XSTATE_BNDCSR);
if (!bndcsr) if (!bndcsr)
return MPX_INVALID_BOUNDS_DIR; return MPX_INVALID_BOUNDS_DIR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment