Commit d6e867a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "Misc preparatory changes for an upcoming FPU optimization that will
  delay the loading of FPU registers to return-to-userspace"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Don't export __kernel_fpu_{begin,end}()
  x86/fpu: Update comment for __raw_xsave_addr()
  x86/fpu: Add might_fault() to user_insn()
  x86/pkeys: Make init_pkru_value static
  x86/thread_info: Remove _TIF_ALLWORK_MASK
  x86/process/32: Remove asm/math_emu.h include
  x86/fpu: Use unsigned long long shift in xfeature_uncompacted_offset()
parents db2ab474 12209993
...@@ -82,8 +82,7 @@ struct efi_scratch { ...@@ -82,8 +82,7 @@ struct efi_scratch {
#define arch_efi_call_virt_setup() \ #define arch_efi_call_virt_setup() \
({ \ ({ \
efi_sync_low_kernel_mappings(); \ efi_sync_low_kernel_mappings(); \
preempt_disable(); \ kernel_fpu_begin(); \
__kernel_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \ firmware_restrict_branch_speculation_start(); \
\ \
if (!efi_enabled(EFI_OLD_MEMMAP)) \ if (!efi_enabled(EFI_OLD_MEMMAP)) \
...@@ -99,8 +98,7 @@ struct efi_scratch { ...@@ -99,8 +98,7 @@ struct efi_scratch {
efi_switch_mm(efi_scratch.prev_mm); \ efi_switch_mm(efi_scratch.prev_mm); \
\ \
firmware_restrict_branch_speculation_end(); \ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \ kernel_fpu_end(); \
preempt_enable(); \
}) })
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
......
...@@ -12,17 +12,12 @@ ...@@ -12,17 +12,12 @@
#define _ASM_X86_FPU_API_H #define _ASM_X86_FPU_API_H
/* /*
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* and they don't touch the preempt state on their own. * disables preemption so be careful if you intend to use it for long periods
* If you enable preemption after __kernel_fpu_begin(), preempt notifier * of time.
* should call the __kernel_fpu_end() to prevent the kernel/user FPU * If you intend to use the FPU in softirq you need to check first with
* state from getting corrupted. KVM for example uses this model. * irq_fpu_usable() if it is possible.
*
* All other cases use kernel_fpu_begin/end() which disable preemption
* during kernel FPU usage.
*/ */
extern void __kernel_fpu_begin(void);
extern void __kernel_fpu_end(void);
extern void kernel_fpu_begin(void); extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void); extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void); extern bool irq_fpu_usable(void);
......
...@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); ...@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \ #define user_insn(insn, output, input...) \
({ \ ({ \
int err; \ int err; \
\
might_fault(); \
\
asm volatile(ASM_STAC "\n" \ asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \ "1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \ "2: " ASM_CLAC "\n" \
......
...@@ -140,14 +140,6 @@ struct thread_info { ...@@ -140,14 +140,6 @@ struct thread_info {
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ) _TIF_NOHZ)
/* work to do on any return to user space */
#define _TIF_ALLWORK_MASK \
(_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
_TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
_TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
_TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \
_TIF_FSCHECK)
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \ #define _TIF_WORK_CTXSW_BASE \
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
......
...@@ -93,7 +93,7 @@ bool irq_fpu_usable(void) ...@@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
} }
EXPORT_SYMBOL(irq_fpu_usable); EXPORT_SYMBOL(irq_fpu_usable);
void __kernel_fpu_begin(void) static void __kernel_fpu_begin(void)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
...@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) ...@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
__cpu_invalidate_fpregs_state(); __cpu_invalidate_fpregs_state();
} }
} }
EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void) static void __kernel_fpu_end(void)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
...@@ -122,7 +121,6 @@ void __kernel_fpu_end(void) ...@@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
kernel_fpu_enable(); kernel_fpu_enable();
} }
EXPORT_SYMBOL(__kernel_fpu_end);
void kernel_fpu_begin(void) void kernel_fpu_begin(void)
{ {
......
...@@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr) ...@@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr)
* format. Checking a supervisor state's uncompacted offset is * format. Checking a supervisor state's uncompacted offset is
* an error. * an error.
*/ */
if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) { if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr); WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
return -1; return -1;
} }
...@@ -808,8 +808,6 @@ void fpu__resume_cpu(void) ...@@ -808,8 +808,6 @@ void fpu__resume_cpu(void)
* Given an xstate feature mask, calculate where in the xsave * Given an xstate feature mask, calculate where in the xsave
* buffer the state is. Callers should ensure that the buffer * buffer the state is. Callers should ensure that the buffer
* is valid. * is valid.
*
* Note: does not work for compacted buffers.
*/ */
static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
{ {
......
...@@ -44,9 +44,6 @@ ...@@ -44,9 +44,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <asm/desc.h> #include <asm/desc.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
#include <linux/err.h> #include <linux/err.h>
......
...@@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey ...@@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
* in the process's lifetime will not accidentally get access * in the process's lifetime will not accidentally get access
* to data which is pkey-protected later on. * to data which is pkey-protected later on.
*/ */
static
u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) | u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) | PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) | PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment