Commit 6dd677a0 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Borislav Petkov

x86/fpu: Remove fpu__restore()

There are no users of fpu__restore() so it is time to remove it. The
comment regarding fpu__restore() and TS bit is stale since commit

  b3b0870e ("i387: do not preload FPU state at task switch time")

and has no meaning since.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aubrey Li <aubrey.li@intel.com>
Cc: Babu Moger <Babu.Moger@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: Dmitry Safonov <dima@arista.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: linux-doc@vger.kernel.org
Cc: Nicolai Stange <nstange@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-3-bigeasy@linutronix.de
parent 39ea9baf
...@@ -52,7 +52,6 @@ preemption must be disabled around such regions. ...@@ -52,7 +52,6 @@ preemption must be disabled around such regions.
Note, some FPU functions are already explicitly preempt safe. For example, Note, some FPU functions are already explicitly preempt safe. For example,
kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
However, fpu__restore() must be called with preemption disabled.
RULE #3: Lock acquire and release must be performed by same task RULE #3: Lock acquire and release must be performed by same task
......
...@@ -28,7 +28,6 @@ extern void fpu__initialize(struct fpu *fpu); ...@@ -28,7 +28,6 @@ extern void fpu__initialize(struct fpu *fpu);
extern void fpu__prepare_read(struct fpu *fpu); extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu); extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu); extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame); extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu); extern void fpu__drop(struct fpu *fpu);
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
......
...@@ -303,30 +303,6 @@ void fpu__prepare_write(struct fpu *fpu) ...@@ -303,30 +303,6 @@ void fpu__prepare_write(struct fpu *fpu)
} }
} }
/*
* 'fpu__restore()' is called to copy FPU registers from
* the FPU fpstate to the live hw registers and to activate
* access to the hardware registers, so that FPU instructions
* can be used afterwards.
*
* Must be called with kernel preemption disabled (for example
* with local interrupts disabled, as it is in the case of
* do_device_not_available()).
*/
void fpu__restore(struct fpu *fpu)
{
fpu__initialize(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(fpu__restore);
/* /*
* Drops current FPU state: deactivates the fpregs and * Drops current FPU state: deactivates the fpregs and
* the fpstate. NOTE: it still leaves previous contents * the fpstate. NOTE: it still leaves previous contents
......
...@@ -267,9 +267,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -267,9 +267,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* /*
* Leave lazy mode, flushing any hypercalls made here. * Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so * This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be * the GDT and LDT are properly updated.
* done before fpu__restore(), so the TS bit is up
* to date.
*/ */
arch_end_context_switch(next_p); arch_end_context_switch(next_p);
......
...@@ -538,9 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -538,9 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* /*
* Leave lazy mode, flushing any hypercalls made here. This * Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before * must be done after loading TLS entries in the GDT but before
* loading segments that might reference them, and and it must * loading segments that might reference them.
* be done before fpu__restore(), so the TS bit is up to
* date.
*/ */
arch_end_context_switch(next_p); arch_end_context_switch(next_p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment