Commit 941d77c7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cpu_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Borislav Petkov:

 - Compute the purposeful misalignment of zen_untrain_ret automatically
   and assert __x86_return_thunk's alignment so that future changes to
   the symbol macros do not accidentally break them.

 - Remove CONFIG_X86_FEATURE_NAMES Kconfig option as its existence is
   pointless

* tag 'x86_cpu_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/retbleed: Add __x86_return_thunk alignment checks
  x86/cpu: Remove X86_FEATURE_NAMES
  x86/Kconfig: Make X86_FEATURE_NAMES non-configurable in prompt
parents 2c96136a f220125b
...@@ -294,7 +294,6 @@ config X86 ...@@ -294,7 +294,6 @@ config X86
select TRACE_IRQFLAGS_NMI_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT
select USER_STACKTRACE_SUPPORT select USER_STACKTRACE_SUPPORT
select HAVE_ARCH_KCSAN if X86_64 select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS
select HAVE_ARCH_NODE_DEV_GROUP if X86_SGX select HAVE_ARCH_NODE_DEV_GROUP if X86_SGX
select FUNCTION_ALIGNMENT_16B if X86_64 || X86_ALIGNMENT_16 select FUNCTION_ALIGNMENT_16B if X86_64 || X86_ALIGNMENT_16
...@@ -444,17 +443,6 @@ config SMP ...@@ -444,17 +443,6 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config X86_FEATURE_NAMES
bool "Processor feature human-readable names" if EMBEDDED
default y
help
This option compiles in a table of x86 feature bits and corresponding
names. This is required to support /proc/cpuinfo and a few kernel
messages. You can disable this to save space, at the expense of
making those few kernel messages show numeric feature bits instead.
If in doubt, say Y.
config X86_X2APIC config X86_X2APIC
bool "Support x2apic" bool "Support x2apic"
depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
......
...@@ -389,7 +389,7 @@ config IA32_FEAT_CTL ...@@ -389,7 +389,7 @@ config IA32_FEAT_CTL
config X86_VMX_FEATURE_NAMES config X86_VMX_FEATURE_NAMES
def_bool y def_bool y
depends on IA32_FEAT_CTL && X86_FEATURE_NAMES depends on IA32_FEAT_CTL
menuconfig PROCESSOR_SELECT menuconfig PROCESSOR_SELECT
bool "Supported processor vendors" if EXPERT bool "Supported processor vendors" if EXPERT
......
...@@ -55,14 +55,12 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include \ ...@@ -55,14 +55,12 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include \
-include include/generated/autoconf.h \ -include include/generated/autoconf.h \
-D__EXPORTED_HEADERS__ -D__EXPORTED_HEADERS__
ifdef CONFIG_X86_FEATURE_NAMES
$(obj)/cpu.o: $(obj)/cpustr.h $(obj)/cpu.o: $(obj)/cpustr.h
quiet_cmd_cpustr = CPUSTR $@ quiet_cmd_cpustr = CPUSTR $@
cmd_cpustr = $(obj)/mkcpustr > $@ cmd_cpustr = $(obj)/mkcpustr > $@
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr) $(call if_changed,cpustr)
endif
targets += cpustr.h targets += cpustr.h
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
......
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
*/ */
#include "boot.h" #include "boot.h"
#ifdef CONFIG_X86_FEATURE_NAMES
#include "cpustr.h" #include "cpustr.h"
#endif
static char *cpu_name(int level) static char *cpu_name(int level)
{ {
...@@ -35,7 +33,6 @@ static char *cpu_name(int level) ...@@ -35,7 +33,6 @@ static char *cpu_name(int level)
static void show_cap_strs(u32 *err_flags) static void show_cap_strs(u32 *err_flags)
{ {
int i, j; int i, j;
#ifdef CONFIG_X86_FEATURE_NAMES
const unsigned char *msg_strs = (const unsigned char *)x86_cap_strs; const unsigned char *msg_strs = (const unsigned char *)x86_cap_strs;
for (i = 0; i < NCAPINTS; i++) { for (i = 0; i < NCAPINTS; i++) {
u32 e = err_flags[i]; u32 e = err_flags[i];
...@@ -58,16 +55,6 @@ static void show_cap_strs(u32 *err_flags) ...@@ -58,16 +55,6 @@ static void show_cap_strs(u32 *err_flags)
e >>= 1; e >>= 1;
} }
} }
#else
for (i = 0; i < NCAPINTS; i++) {
u32 e = err_flags[i];
for (j = 0; j < 32; j++) {
if (e & 1)
printf("%d:%d ", i, j);
e >>= 1;
}
}
#endif
} }
int validate_cpu(void) int validate_cpu(void)
......
...@@ -38,15 +38,10 @@ enum cpuid_leafs ...@@ -38,15 +38,10 @@ enum cpuid_leafs
#define X86_CAP_FMT_NUM "%d:%d" #define X86_CAP_FMT_NUM "%d:%d"
#define x86_cap_flag_num(flag) ((flag) >> 5), ((flag) & 31) #define x86_cap_flag_num(flag) ((flag) >> 5), ((flag) & 31)
#ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32]; extern const char * const x86_power_flags[32];
#define X86_CAP_FMT "%s" #define X86_CAP_FMT "%s"
#define x86_cap_flag(flag) x86_cap_flags[flag] #define x86_cap_flag(flag) x86_cap_flags[flag]
#else
#define X86_CAP_FMT X86_CAP_FMT_NUM
#define x86_cap_flag x86_cap_flag_num
#endif
/* /*
* In order to save room, we index into this array by doing * In order to save room, we index into this array by doing
......
...@@ -27,7 +27,7 @@ obj-y += cpuid-deps.o ...@@ -27,7 +27,7 @@ obj-y += cpuid-deps.o
obj-y += umwait.o obj-y += umwait.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o obj-y += capflags.o powerflags.o
obj-$(CONFIG_IA32_FEAT_CTL) += feat_ctl.o obj-$(CONFIG_IA32_FEAT_CTL) += feat_ctl.o
ifdef CONFIG_CPU_SUP_INTEL ifdef CONFIG_CPU_SUP_INTEL
...@@ -54,7 +54,6 @@ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o ...@@ -54,7 +54,6 @@ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o
obj-$(CONFIG_ACRN_GUEST) += acrn.o obj-$(CONFIG_ACRN_GUEST) += acrn.o
ifdef CONFIG_X86_FEATURE_NAMES
quiet_cmd_mkcapflags = MKCAP $@ quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $@ $^ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $@ $^
...@@ -63,5 +62,4 @@ vmxfeature = $(src)/../../include/asm/vmxfeatures.h ...@@ -63,5 +62,4 @@ vmxfeature = $(src)/../../include/asm/vmxfeatures.h
$(obj)/capflags.c: $(cpufeature) $(vmxfeature) $(src)/mkcapflags.sh FORCE $(obj)/capflags.c: $(cpufeature) $(vmxfeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags) $(call if_changed,mkcapflags)
endif
targets += capflags.c targets += capflags.c
...@@ -1489,12 +1489,10 @@ static void __init cpu_parse_early_param(void) ...@@ -1489,12 +1489,10 @@ static void __init cpu_parse_early_param(void)
if (!kstrtouint(opt, 10, &bit)) { if (!kstrtouint(opt, 10, &bit)) {
if (bit < NCAPINTS * 32) { if (bit < NCAPINTS * 32) {
#ifdef CONFIG_X86_FEATURE_NAMES
/* empty-string, i.e., ""-defined feature flags */ /* empty-string, i.e., ""-defined feature flags */
if (!x86_cap_flags[bit]) if (!x86_cap_flags[bit])
pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit)); pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
else else
#endif
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
setup_clear_cpu_cap(bit); setup_clear_cpu_cap(bit);
...@@ -1507,7 +1505,6 @@ static void __init cpu_parse_early_param(void) ...@@ -1507,7 +1505,6 @@ static void __init cpu_parse_early_param(void)
continue; continue;
} }
#ifdef CONFIG_X86_FEATURE_NAMES
for (bit = 0; bit < 32 * NCAPINTS; bit++) { for (bit = 0; bit < 32 * NCAPINTS; bit++) {
if (!x86_cap_flag(bit)) if (!x86_cap_flag(bit))
continue; continue;
...@@ -1524,7 +1521,6 @@ static void __init cpu_parse_early_param(void) ...@@ -1524,7 +1521,6 @@ static void __init cpu_parse_early_param(void)
if (!found) if (!found)
pr_cont(" (unknown: %s)", opt); pr_cont(" (unknown: %s)", opt);
#endif
} }
pr_cont("\n"); pr_cont("\n");
......
...@@ -508,4 +508,8 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -508,4 +508,8 @@ INIT_PER_CPU(irq_stack_backing_store);
"fixed_percpu_data is not at start of per-cpu area"); "fixed_percpu_data is not at start of per-cpu area");
#endif #endif
#ifdef CONFIG_RETHUNK
. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned");
#endif
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
...@@ -143,7 +143,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) ...@@ -143,7 +143,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
* from re-poisioning the BTB prediction. * from re-poisioning the BTB prediction.
*/ */
.align 64 .align 64
.skip 63, 0xcc .skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment