Commit cb8818b6 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Use 'struct fpu' in switch_fpu_prepare()

Migrate this function to pure 'struct fpu' usage.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent af2d94fd
......@@ -402,10 +402,9 @@ static inline void fpu_reset_state(struct fpu *fpu)
*/
typedef struct { int preload; } fpu_switch_t;
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
static inline fpu_switch_t
switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
{
struct fpu *old_fpu = &old->thread.fpu;
struct fpu *new_fpu = &new->thread.fpu;
fpu_switch_t fpu;
/*
......@@ -413,33 +412,33 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = new_fpu->fpstate_active &&
(use_eager_fpu() || new->thread.fpu.counter > 5);
(use_eager_fpu() || new_fpu->counter > 5);
if (old_fpu->has_fpu) {
if (!fpu_save_init(&old->thread.fpu))
old->thread.fpu.last_cpu = -1;
if (!fpu_save_init(old_fpu))
old_fpu->last_cpu = -1;
else
old->thread.fpu.last_cpu = cpu;
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
old->thread.fpu.has_fpu = 0;
old_fpu->has_fpu = 0;
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
new->thread.fpu.counter++;
new_fpu->counter++;
__thread_set_has_fpu(new_fpu);
prefetch(new->thread.fpu.state);
prefetch(new_fpu->state);
} else if (!use_eager_fpu())
stts();
} else {
old->thread.fpu.counter = 0;
old->thread.fpu.last_cpu = -1;
old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
new->thread.fpu.counter++;
new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
prefetch(new_fpu->state);
__thread_fpu_begin(new_fpu);
}
}
......
......@@ -248,7 +248,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
......
......@@ -278,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
unsigned fsindex, gsindex;
fpu_switch_t fpu;
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment