Commit 70ad6368 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "The biggest part is a series of reverts for the macro based GCC
  inlining workarounds. It caused regressions in distro build and other
  kernel tooling environments, and the GCC project was very receptive to
  fixing the underlying inliner weaknesses - so as time ran out we
  decided to do a reasonably straightforward revert of the patches. The
  plan is to rely on the 'asm inline' GCC 9 feature, which might be
  backported to GCC 8 and could thus become reasonably widely available
  on modern distros.

  Other than those reverts, there's misc fixes from all around the
  place.

  I wish our final x86 pull request for v4.20 was smaller..."

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs"
  Revert "x86/objtool: Use asm macros to work around GCC inlining bugs"
  Revert "x86/refcount: Work around GCC inlining bug"
  Revert "x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs"
  Revert "x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs"
  Revert "x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops"
  Revert "x86/extable: Macrofy inline assembly code to work around GCC inlining bugs"
  Revert "x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs"
  Revert "x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs"
  x86/mtrr: Don't copy uninitialized gentry fields back to userspace
  x86/fsgsbase/64: Fix the base write helper functions
  x86/mm/cpa: Fix cpa_flush_array() TLB invalidation
  x86/vdso: Pass --eh-frame-hdr to the linker
  x86/mm: Fix decoy address handling vs 32-bit builds
  x86/intel_rdt: Ensure a CPU remains online for the region's pseudo-locking sequence
  x86/dump_pagetables: Fix LDT remap address marker
  x86/mm: Fix guard hole handling
parents 96d6ee7d 6ac38934
...@@ -1076,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h) ...@@ -1076,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
# version.h and scripts_basic is processed / created. # version.h and scripts_basic is processed / created.
# Listed in dependency order # Listed in dependency order
PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3 PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory, # prepare3 is used to check if we are building in a separate output directory,
# and if so do: # and if so do:
...@@ -1099,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic ...@@ -1099,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir) $(cmd_crmodverdir)
macroprepare: prepare1 archmacros archprepare: archheaders archscripts prepare1 scripts_basic
archprepare: archheaders archscripts macroprepare scripts_basic
prepare0: archprepare gcc-plugins prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=. $(Q)$(MAKE) $(build)=.
...@@ -1177,9 +1175,6 @@ archheaders: ...@@ -1177,9 +1175,6 @@ archheaders:
PHONY += archscripts PHONY += archscripts
archscripts: archscripts:
PHONY += archmacros
archmacros:
PHONY += __headers PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts __headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef $(Q)$(MAKE) $(build)=scripts build_unifdef
......
...@@ -232,13 +232,6 @@ archscripts: scripts_basic ...@@ -232,13 +232,6 @@ archscripts: scripts_basic
archheaders: archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
### ###
# Kernel objects # Kernel objects
......
...@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode .macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif #endif
call enter_from_user_mode call enter_from_user_mode
.Lafter_call_\@: .Lafter_call_\@:
......
...@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@ ...@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
$(call ld-option, --build-id) -Bsymbolic $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
-Bsymbolic
GCOV_PROFILE := n GCOV_PROFILE := n
# #
......
...@@ -7,24 +7,16 @@ ...@@ -7,24 +7,16 @@
#include <asm/asm.h> #include <asm/asm.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.macro LOCK_PREFIX_HERE .macro LOCK_PREFIX
672: lock
.pushsection .smp_locks,"a" .pushsection .smp_locks,"a"
.balign 4 .balign 4
.long 671f - . # offset .long 672b - .
.popsection .popsection
671: .endm
.endm
.macro LOCK_PREFIX insn:vararg
LOCK_PREFIX_HERE
lock \insn
.endm
#else #else
.macro LOCK_PREFIX_HERE .macro LOCK_PREFIX
.endm .endm
.macro LOCK_PREFIX insn:vararg
.endm
#endif #endif
/* /*
......
...@@ -31,8 +31,15 @@ ...@@ -31,8 +31,15 @@
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t" #define LOCK_PREFIX_HERE \
#define LOCK_PREFIX "LOCK_PREFIX " ".pushsection .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".popsection\n" \
"671:"
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */ #else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE "" #define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX "" #define LOCK_PREFIX ""
......
...@@ -120,25 +120,12 @@ ...@@ -120,25 +120,12 @@
/* Exception table entry */ /* Exception table entry */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \ # define _ASM_EXTABLE_HANDLE(from, to, handler) \
ASM_EXTABLE_HANDLE from to handler .pushsection "__ex_table","a" ; \
.balign 4 ; \
.macro ASM_EXTABLE_HANDLE from:req to:req handler:req .long (from) - . ; \
.pushsection "__ex_table","a" .long (to) - . ; \
.balign 4 .long (handler) - . ; \
.long (\from) - .
.long (\to) - .
.long (\handler) - .
.popsection .popsection
.endm
#else /* __ASSEMBLY__ */
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
"ASM_EXTABLE_HANDLE from=" #from " to=" #to \
" handler=\"" #handler "\"\n\t"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \ # define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default) _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
...@@ -161,7 +148,6 @@ ...@@ -161,7 +148,6 @@
_ASM_PTR (entry); \ _ASM_PTR (entry); \
.popsection .popsection
#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION .macro ALIGN_DESTINATION
/* check for bad alignment of destination */ /* check for bad alignment of destination */
movl %edi,%ecx movl %edi,%ecx
...@@ -185,7 +171,34 @@ ...@@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b) _ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b) _ASM_EXTABLE_UA(101b, 103b)
.endm .endm
#endif /* __ASSEMBLY__ */
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
# define _ASM_EXTABLE_REFCOUNT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#ifndef __ASSEMBLY__
/* /*
* Despite that some emulators terminate on UD2, we use it for WARN(). * Despite that some emulators terminate on UD2, we use it for WARN().
* *
...@@ -22,15 +20,53 @@ ...@@ -22,15 +20,53 @@
#define LEN_UD2 2 #define LEN_UD2 2
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
# define __BUG_REL(val) ".long " __stringify(val)
#else
# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \ asm volatile("1:\t" ins "\n" \
"flags=%c2 size=%c3" \ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \ "i" (flags), \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
} while (0) } while (0)
#else /* !CONFIG_DEBUG_BUGVERBOSE */
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
".popsection" \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG
#define BUG() \ #define BUG() \
do { \ do { \
...@@ -46,54 +82,4 @@ do { \ ...@@ -46,54 +82,4 @@ do { \
#include <asm-generic/bug.h> #include <asm-generic/bug.h>
#else /* __ASSEMBLY__ */
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
.macro __BUG_REL val:req
.long \val
.endm
#else
.macro __BUG_REL val:req
.long \val - 2b
.endm
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
__BUG_REL val=\file # bug_entry::file
.word \line # bug_entry::line
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#else /* !CONFIG_DEBUG_BUGVERBOSE */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else /* CONFIG_GENERIC_BUG */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
\ins
.endm
#endif /* CONFIG_GENERIC_BUG */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_BUG_H */ #endif /* _ASM_X86_BUG_H */
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
#ifndef _ASM_X86_CPUFEATURE_H #ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h> #include <asm/processor.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <asm/asm.h> #include <asm/asm.h>
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); ...@@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/ */
static __always_inline __pure bool _static_cpu_has(u16 bit) static __always_inline __pure bool _static_cpu_has(u16 bit)
{ {
asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " asm_volatile_goto("1: jmp 6f\n"
"cap_byte=\"%[cap_byte]\" " "2:\n"
"feature=%P[feature] t_yes=%l[t_yes] " ".skip -(((5f-4f) - (2b-1b)) > 0) * "
"t_no=%l[t_no] always=%P[always]" "((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 4f - .\n" /* repl offset */
" .word %P[always]\n" /* always replace */
" .byte 3b - 1b\n" /* src len */
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"4: jmp %l[t_no]\n"
"5:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P[feature]\n" /* feature bit */
" .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum],%[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".previous\n"
: : [feature] "i" (bit), : : [feature] "i" (bit),
[always] "i" (X86_FEATURE_ALWAYS), [always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)), [bitnum] "i" (1 << (bit & 7)),
...@@ -199,44 +226,5 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) ...@@ -199,44 +226,5 @@ static __always_inline __pure bool _static_cpu_has(u16 bit)
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model boot_cpu_data.x86_model
#else /* __ASSEMBLY__ */ #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
1:
jmp 6f
2:
.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
3:
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 4f - . /* repl offset */
.word \always /* always replace */
.byte 3b - 1b /* src len */
.byte 5f - 4f /* repl len */
.byte 3b - 2b /* pad len */
.previous
.section .altinstr_replacement,"ax"
4:
jmp \t_no
5:
.previous
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 0 /* no replacement */
.word \feature /* feature bit */
.byte 3b - 1b /* src len */
.byte 0 /* repl len */
.byte 0 /* pad len */
.previous
.section .altinstr_aux,"ax"
6:
testb \bitnum,\cap_byte
jnz \t_yes
jmp \t_no
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_CPUFEATURE_H */ #endif /* _ASM_X86_CPUFEATURE_H */
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
*/ */
extern unsigned long x86_fsbase_read_task(struct task_struct *task); extern unsigned long x86_fsbase_read_task(struct task_struct *task);
extern unsigned long x86_gsbase_read_task(struct task_struct *task); extern unsigned long x86_gsbase_read_task(struct task_struct *task);
extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
/* Helper functions for reading/writing FS/GS base */ /* Helper functions for reading/writing FS/GS base */
...@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void) ...@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase; return gsbase;
} }
extern void x86_fsbase_write_cpu(unsigned long fsbase); static inline void x86_fsbase_write_cpu(unsigned long fsbase)
extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); {
wrmsrl(MSR_FS_BASE, fsbase);
}
static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
...@@ -2,6 +2,19 @@ ...@@ -2,6 +2,19 @@
#ifndef _ASM_X86_JUMP_LABEL_H #ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H
#ifndef HAVE_JUMP_LABEL
/*
* For better or for worse, if jump labels (the gcc extension) are missing,
* then the entire static branch patching infrastructure is compiled out.
* If that happens, the code in here will malfunction. Raise a compiler
* error instead.
*
* In theory, jump labels and the static branch patching infrastructure
* could be decoupled to fix this.
*/
#error asm/jump_label.h included on a non-jump-label kernel
#endif
#define JUMP_LABEL_NOP_SIZE 5 #define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -20,9 +33,15 @@ ...@@ -20,9 +33,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch) static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{ {
asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" " asm_volatile_goto("1:"
"branch=\"%c1\"" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes); : : "i" (key), "i" (branch) : : l_yes);
return false; return false;
l_yes: l_yes:
return true; return true;
...@@ -30,8 +49,14 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran ...@@ -30,8 +49,14 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{ {
asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" " asm_volatile_goto("1:"
"branch=\"%c1\"" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes); : : "i" (key), "i" (branch) : : l_yes);
return false; return false;
...@@ -41,26 +66,37 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool ...@@ -41,26 +66,37 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req .macro STATIC_JUMP_IF_TRUE target, key, def
.Lstatic_branch_nop_\@: .Lstatic_jump_\@:
.if \def
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.else
.byte STATIC_KEY_INIT_NOP .byte STATIC_KEY_INIT_NOP
.Lstatic_branch_no_after_\@: .endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
.long .Lstatic_branch_nop_\@ - ., \l_yes - . .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + \branch - . _ASM_PTR \key - .
.popsection .popsection
.endm .endm
.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req .macro STATIC_JUMP_IF_FALSE target, key, def
.Lstatic_branch_jmp_\@: .Lstatic_jump_\@:
.if \def
.byte STATIC_KEY_INIT_NOP
.else
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9 .byte 0xe9
.long \l_yes - .Lstatic_branch_jmp_after_\@ .long \target - .Lstatic_jump_after_\@
.Lstatic_branch_jmp_after_\@: .Lstatic_jump_after_\@:
.endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
.long .Lstatic_branch_jmp_\@ - ., \l_yes - . .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + \branch - . _ASM_PTR \key + 1 - .
.popsection .popsection
.endm .endm
......
...@@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops; ...@@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops;
#define paravirt_clobber(clobber) \ #define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber) [paravirt_clobber] "i" (clobber)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */ /* Generate patchable code, with the default asm parameters. */
#define paravirt_call \ #define paravirt_alt(insn_string) \
"PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
" clobber=\"%c[paravirt_clobber]\"" \
" pv_opptr=\"%c[paravirt_opptr]\";"
/* Simple instruction patching code. */ /* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
...@@ -372,6 +384,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len); ...@@ -372,6 +384,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
int paravirt_disable_iospace(void); int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
/* /*
* These macros are intended to wrap calls through one of the paravirt * These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at * ops structs, so that they can be later identified and patched at
...@@ -509,7 +531,7 @@ int paravirt_disable_iospace(void); ...@@ -509,7 +531,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \ /* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \ if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -519,7 +541,7 @@ int paravirt_disable_iospace(void); ...@@ -519,7 +541,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \ } else { \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -546,7 +568,7 @@ int paravirt_disable_iospace(void); ...@@ -546,7 +568,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \ PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \ PVOP_TEST_NULL(op); \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -664,26 +686,6 @@ struct paravirt_patch_site { ...@@ -664,26 +686,6 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[], extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[]; __parainstructions_end[];
#else /* __ASSEMBLY__ */
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
771: ANNOTATE_RETPOLINE_SAFE
call *\pv_opptr
772: .pushsection .parainstructions,"a"
_ASM_ALIGN
_ASM_PTR 771b
.byte \type
.byte 772b-771b
.short \clobber
.popsection
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */
...@@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d; ...@@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
*/ */
#define MAXMEM (1UL << MAX_PHYSMEM_BITS) #define MAXMEM (1UL << MAX_PHYSMEM_BITS)
#define GUARD_HOLE_PGD_ENTRY -256UL
#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
#define LDT_PGD_ENTRY -240UL #define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
......
...@@ -4,41 +4,6 @@ ...@@ -4,41 +4,6 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity. * PaX/grsecurity.
*/ */
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/bug.h>
.macro REFCOUNT_EXCEPTION counter:req
.pushsection .text..refcount
111: lea \counter, %_ASM_CX
112: ud2
ASM_UNREACHABLE
.popsection
113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
.endm
/* Trigger refcount exception if refcount result is negative. */
.macro REFCOUNT_CHECK_LT_ZERO counter:req
js 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
/* Trigger refcount exception if refcount result is zero or negative. */
.macro REFCOUNT_CHECK_LE_ZERO counter:req
jz 111f
REFCOUNT_CHECK_LT_ZERO counter="\counter"
.endm
/* Trigger refcount exception unconditionally. */
.macro REFCOUNT_ERROR counter:req
jmp 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
#else /* __ASSEMBLY__ */
#include <linux/refcount.h> #include <linux/refcount.h>
#include <asm/bug.h> #include <asm/bug.h>
...@@ -50,12 +15,35 @@ ...@@ -50,12 +15,35 @@
* central refcount exception. The fixup address for the exception points * central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text. * back to the regular execution flow in .text.
*/ */
#define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \
"111:\tlea %[var], %%" _ASM_CX "\n" \
"112:\t" ASM_UD2 "\n" \
ASM_UNREACHABLE \
".popsection\n" \
"113:\n" \
_ASM_EXTABLE_REFCOUNT(112b, 113b)
/* Trigger refcount exception if refcount result is negative. */
#define REFCOUNT_CHECK_LT_ZERO \
"js 111f\n\t" \
_REFCOUNT_EXCEPTION
/* Trigger refcount exception if refcount result is zero or negative. */
#define REFCOUNT_CHECK_LE_ZERO \
"jz 111f\n\t" \
REFCOUNT_CHECK_LT_ZERO
/* Trigger refcount exception unconditionally. */
#define REFCOUNT_ERROR \
"jmp 111f\n\t" \
_REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r) static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0\n\t" asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: "ir" (i) : "ir" (i)
: "cc", "cx"); : "cc", "cx");
} }
...@@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) ...@@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r) static __always_inline void refcount_inc(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "incl %0\n\t" asm volatile(LOCK_PREFIX "incl %0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
static __always_inline void refcount_dec(refcount_t *r) static __always_inline void refcount_dec(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "decl %0\n\t" asm volatile(LOCK_PREFIX "decl %0\n\t"
"REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LE_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
static __always_inline __must_check static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r) bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{ {
return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx"); r->refs.counter, e, "er", i, "cx");
} }
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{ {
return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "cx"); r->refs.counter, e, "cx");
} }
...@@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) ...@@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */ /* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) { if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile("REFCOUNT_ERROR counter=\"%[counter]\"" asm volatile(REFCOUNT_ERROR
: : [counter] "m" (r->refs.counter) : : [var] "m" (r->refs.counter)
: "cc", "cx"); : "cc", "cx");
break; break;
} }
...@@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) ...@@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r); return refcount_add_not_zero(1, r);
} }
#endif /* __ASSEMBLY__ */
#endif #endif
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/kernfs.h> #include <linux/kernfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
return -EINVAL; return -EINVAL;
buf[nbytes - 1] = '\0'; buf[nbytes - 1] = '\0';
cpus_read_lock();
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) { if (!rdtgrp) {
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return -ENOENT; return -ENOENT;
} }
rdt_last_cmd_clear(); rdt_last_cmd_clear();
...@@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
out: out:
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes; return ret ?: nbytes;
} }
......
...@@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) ...@@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry; struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg; void __user *arg = (void __user *) __arg;
memset(&gentry, 0, sizeof(gentry));
switch (cmd) { switch (cmd) {
case MTRRIOC_ADD_ENTRY: case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY: case MTRRIOC_SET_ENTRY:
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file includes headers whose assembly part includes macros which are
* commonly used. The macros are precompiled into assmebly file which is later
* assembled together with each compiled file.
*/
#include <linux/compiler.h>
#include <asm/refcount.h>
#include <asm/alternative-asm.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/jump_label.h>
...@@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, ...@@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base; return base;
} }
void x86_fsbase_write_cpu(unsigned long fsbase)
{
/*
* Set the selector to 0 as a notion, that the segment base is
* overwritten, which will be checked for skipping the segment load
* during context switch.
*/
loadseg(FS, 0);
wrmsrl(MSR_FS_BASE, fsbase);
}
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
/* Set the selector to 0 for the same reason as %fs above. */
loadseg(GS, 0);
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
unsigned long x86_fsbase_read_task(struct task_struct *task) unsigned long x86_fsbase_read_task(struct task_struct *task)
{ {
unsigned long fsbase; unsigned long fsbase;
...@@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) ...@@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase; return gsbase;
} }
int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{ {
/* WARN_ON_ONCE(task == current);
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(fsbase >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
task->thread.fsbase = fsbase; task->thread.fsbase = fsbase;
if (task == current)
x86_fsbase_write_cpu(fsbase);
task->thread.fsindex = 0;
preempt_enable();
return 0;
} }
int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{ {
if (unlikely(gsbase >= TASK_SIZE_MAX)) WARN_ON_ONCE(task == current);
return -EPERM;
preempt_disable();
task->thread.gsbase = gsbase; task->thread.gsbase = gsbase;
if (task == current)
x86_gsbase_write_cpu_inactive(gsbase);
task->thread.gsindex = 0;
preempt_enable();
return 0;
} }
int copy_thread_tls(unsigned long clone_flags, unsigned long sp, int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
...@@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) ...@@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) { switch (option) {
case ARCH_SET_GS: { case ARCH_SET_GS: {
ret = x86_gsbase_write_task(task, arg2); if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* ARCH_SET_GS has always overwritten the index
* and the base. Zero is the most sensible value
* to put in the index, and is the only value that
* makes any sense if FSGSBASE is unavailable.
*/
if (task == current) {
loadseg(GS, 0);
x86_gsbase_write_cpu_inactive(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.gsbase.
*/
task->thread.gsbase = arg2;
} else {
task->thread.gsindex = 0;
x86_gsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_SET_FS: { case ARCH_SET_FS: {
ret = x86_fsbase_write_task(task, arg2); /*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* Set the selector to 0 for the same reason
* as %gs above.
*/
if (task == current) {
loadseg(FS, 0);
x86_fsbase_write_cpu(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.fsbase.
*/
task->thread.fsbase = arg2;
} else {
task->thread.fsindex = 0;
x86_fsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_GET_FS: { case ARCH_GET_FS: {
......
...@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child, ...@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
/* /*
* When changing the FS base, use the same * When changing the FS base, use do_arch_prctl_64()
* mechanism as for do_arch_prctl_64(). * to set the index to zero and to set the base
* as requested.
*/ */
if (child->thread.fsbase != value) if (child->thread.fsbase != value)
return x86_fsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_FS, value);
return 0; return 0;
case offsetof(struct user_regs_struct,gs_base): case offsetof(struct user_regs_struct,gs_base):
/* /*
...@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child, ...@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
if (child->thread.gsbase != value) if (child->thread.gsbase != value)
return x86_gsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_GS, value);
return 0; return 0;
#endif #endif
} }
......
...@@ -55,10 +55,10 @@ struct addr_marker { ...@@ -55,10 +55,10 @@ struct addr_marker {
enum address_markers_idx { enum address_markers_idx {
USER_SPACE_NR = 0, USER_SPACE_NR = 0,
KERNEL_SPACE_NR, KERNEL_SPACE_NR,
LOW_KERNEL_NR, #ifdef CONFIG_MODIFY_LDT_SYSCALL
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
LDT_NR, LDT_NR,
#endif #endif
LOW_KERNEL_NR,
VMALLOC_START_NR, VMALLOC_START_NR,
VMEMMAP_START_NR, VMEMMAP_START_NR,
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
...@@ -66,9 +66,6 @@ enum address_markers_idx { ...@@ -66,9 +66,6 @@ enum address_markers_idx {
KASAN_SHADOW_END_NR, KASAN_SHADOW_END_NR,
#endif #endif
CPU_ENTRY_AREA_NR, CPU_ENTRY_AREA_NR,
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
LDT_NR,
#endif
#ifdef CONFIG_X86_ESPFIX64 #ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR, ESPFIX_START_NR,
#endif #endif
...@@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx) ...@@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
* ffff800000000000 - ffff87ffffffffff is reserved for * A hole in the beginning of kernel address space reserved
* the hypervisor. * for a hypervisor.
*/ */
return (idx >= pgd_index(__PAGE_OFFSET) - 16) && return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
(idx < pgd_index(__PAGE_OFFSET)); (idx < pgd_index(GUARD_HOLE_END_ADDR));
#else #else
return false; return false;
#endif #endif
......
...@@ -285,20 +285,16 @@ static void cpa_flush_all(unsigned long cache) ...@@ -285,20 +285,16 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1); on_each_cpu(__cpa_flush_all, (void *) cache, 1);
} }
static bool __cpa_flush_range(unsigned long start, int numpages, int cache) static bool __inv_flush_all(int cache)
{ {
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start);
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache); cpa_flush_all(cache);
return true; return true;
} }
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); return false;
return !cache;
} }
static void cpa_flush_range(unsigned long start, int numpages, int cache) static void cpa_flush_range(unsigned long start, int numpages, int cache)
...@@ -306,7 +302,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) ...@@ -306,7 +302,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level; unsigned int i, level;
unsigned long addr; unsigned long addr;
if (__cpa_flush_range(start, numpages, cache)) WARN_ON(PAGE_ALIGN(start) != start);
if (__inv_flush_all(cache))
return;
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
if (!cache)
return; return;
/* /*
...@@ -332,7 +335,12 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start, ...@@ -332,7 +335,12 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
{ {
unsigned int i, level; unsigned int i, level;
if (__cpa_flush_range(baddr, numpages, cache)) if (__inv_flush_all(cache))
return;
flush_tlb_all();
if (!cache)
return; return;
/* /*
......
...@@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address) ...@@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
* for a "decoy" virtual address (bit 63 clear) passed to * for a "decoy" virtual address (bit 63 clear) passed to
* set_memory_X(). __pa() on a "decoy" address results in a * set_memory_X(). __pa() on a "decoy" address results in a
* physical address with bit 63 set. * physical address with bit 63 set.
*
* Decoy addresses are not present for 32-bit builds, see
* set_mce_nospec().
*/ */
if (IS_ENABLED(CONFIG_X86_64))
return address & __PHYSICAL_MASK; return address & __PHYSICAL_MASK;
return address;
} }
/* /*
...@@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, ...@@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
start = sanitize_phys(start); start = sanitize_phys(start);
end = sanitize_phys(end); end = sanitize_phys(end);
BUG_ON(start >= end); /* end is exclusive */ if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
return -EINVAL;
}
if (!pat_enabled()) { if (!pat_enabled()) {
/* This is identical to page table setting without PAT */ /* This is identical to page table setting without PAT */
......
...@@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, ...@@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
unsigned long limit) unsigned long limit)
{ {
int i, nr, flush = 0; int i, nr, flush = 0;
unsigned hole_low, hole_high; unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */ /* The limit is the last byte to be touched */
limit--; limit--;
BUG_ON(limit >= FIXADDR_TOP); BUG_ON(limit >= FIXADDR_TOP);
#ifdef CONFIG_X86_64
/* /*
* 64-bit has a great big hole in the middle of the address * 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these * space, which contains the Xen mappings.
* will end up making a zero-sized hole and so is a no-op.
*/ */
hole_low = pgd_index(USER_LIMIT); hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
hole_high = pgd_index(PAGE_OFFSET); hole_high = pgd_index(GUARD_HOLE_END_ADDR);
#endif
nr = pgd_index(limit) + 1; nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
......
...@@ -17,8 +17,10 @@ ...@@ -17,8 +17,10 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
struct bug_entry { #ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr; unsigned long bug_addr;
#else #else
...@@ -33,10 +35,8 @@ struct bug_entry { ...@@ -33,10 +35,8 @@ struct bug_entry {
unsigned short line; unsigned short line;
#endif #endif
unsigned short flags; unsigned short flags;
#endif /* CONFIG_GENERIC_BUG */
}; };
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_BUG
/* /*
* Don't use BUG() or BUG_ON() unless there's really no way out; one * Don't use BUG() or BUG_ON() unless there's really no way out; one
......
...@@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ...@@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements. * unique, to convince GCC not to merge duplicate inline asm statements.
*/ */
#define annotate_reachable() ({ \ #define annotate_reachable() ({ \
asm volatile("ANNOTATE_REACHABLE counter=%c0" \ asm volatile("%c0:\n\t" \
: : "i" (__COUNTER__)); \ ".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define annotate_unreachable() ({ \ #define annotate_unreachable() ({ \
asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ asm volatile("%c0:\n\t" \
: : "i" (__COUNTER__)); \ ".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
#else #else
#define annotate_reachable() #define annotate_reachable()
#define annotate_unreachable() #define annotate_unreachable()
...@@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off) ...@@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off); return (void *)((unsigned long)off + *off);
} }
#else /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef LINKER_SCRIPT
#ifdef CONFIG_STACK_VALIDATION
.macro ANNOTATE_UNREACHABLE counter:req
\counter:
.pushsection .discard.unreachable
.long \counter\()b -.
.popsection
.endm
.macro ANNOTATE_REACHABLE counter:req
\counter:
.pushsection .discard.reachable
.long \counter\()b -.
.popsection
.endm
.macro ASM_UNREACHABLE
999:
.pushsection .discard.unreachable
.long 999b - .
.popsection
.endm
#else /* CONFIG_STACK_VALIDATION */
.macro ANNOTATE_UNREACHABLE counter:req
.endm
.macro ANNOTATE_REACHABLE counter:req
.endm
.macro ASM_UNREACHABLE
.endm
#endif /* CONFIG_STACK_VALIDATION */
#endif /* LINKER_SCRIPT */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */ /* Compile time object size, -1 for unknown */
......
...@@ -115,9 +115,7 @@ __cc-option = $(call try-run,\ ...@@ -115,9 +115,7 @@ __cc-option = $(call try-run,\
# Do not attempt to build with gcc plugins during cc-option tests. # Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.) # (And this uses delayed resolution so the flags will be up to date.)
# In addition, do not include the asm macros which are built later. CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
# cc-option # cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
......
...@@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y ...@@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y
hostprogs-y := modpost mk_elfconfig hostprogs-y := modpost mk_elfconfig
always := $(hostprogs-y) empty.o always := $(hostprogs-y) empty.o
CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
modpost-objs := modpost.o file2alias.o sumversion.o modpost-objs := modpost.o file2alias.o sumversion.o
devicetable-offsets-file := devicetable-offsets.h devicetable-offsets-file := devicetable-offsets.h
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment