Commit 6b188783 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Revert "x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation"

This reverts commit 233b9d7d which is
commit 53c613fe upstream.

It's not ready for the stable trees as there are major slowdowns
involved with this patch.
Reported-by: default avatarJiri Kosina <jkosina@suse.cz>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc:  "WoodhouseDavid" <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc:  "SchauflerCasey" <casey.schaufler@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 520d5649
...@@ -35,10 +35,12 @@ static void __init spectre_v2_select_mitigation(void); ...@@ -35,10 +35,12 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void); static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void); static void __init l1tf_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ /*
u64 x86_spec_ctrl_base; * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
* writes to SPEC_CTRL contain whatever reserved bits have been set.
*/
u64 __ro_after_init x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
static DEFINE_MUTEX(spec_ctrl_mutex);
/* /*
* The vendor and possibly platform specific bits which can be modified in * The vendor and possibly platform specific bits which can be modified in
...@@ -323,46 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) ...@@ -323,46 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd; return cmd;
} }
static bool stibp_needed(void)
{
if (spectre_v2_enabled == SPECTRE_V2_NONE)
return false;
if (!boot_cpu_has(X86_FEATURE_STIBP))
return false;
return true;
}
static void update_stibp_msr(void *info)
{
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}
void arch_smt_update(void)
{
u64 mask;
if (!stibp_needed())
return;
mutex_lock(&spec_ctrl_mutex);
mask = x86_spec_ctrl_base;
if (cpu_smt_control == CPU_SMT_ENABLED)
mask |= SPEC_CTRL_STIBP;
else
mask &= ~SPEC_CTRL_STIBP;
if (mask != x86_spec_ctrl_base) {
pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
cpu_smt_control == CPU_SMT_ENABLED ?
"Enabling" : "Disabling");
x86_spec_ctrl_base = mask;
on_each_cpu(update_stibp_msr, NULL, 1);
}
mutex_unlock(&spec_ctrl_mutex);
}
static void __init spectre_v2_select_mitigation(void) static void __init spectre_v2_select_mitigation(void)
{ {
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
...@@ -462,9 +424,6 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -462,9 +424,6 @@ static void __init spectre_v2_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n"); pr_info("Enabling Restricted Speculation for firmware calls\n");
} }
/* Enable STIBP if appropriate */
arch_smt_update();
} }
#undef pr_fmt #undef pr_fmt
...@@ -855,8 +814,6 @@ static ssize_t l1tf_show_state(char *buf) ...@@ -855,8 +814,6 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
int ret;
if (!boot_cpu_has_bug(bug)) if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n"); return sprintf(buf, "Not affected\n");
...@@ -874,12 +831,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr ...@@ -874,12 +831,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return sprintf(buf, "Mitigation: __user pointer sanitization\n"); return sprintf(buf, "Mitigation: __user pointer sanitization\n");
case X86_BUG_SPECTRE_V2: case X86_BUG_SPECTRE_V2:
ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
(x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
spectre_v2_module_string()); spectre_v2_module_string());
return ret;
case X86_BUG_SPEC_STORE_BYPASS: case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
......
...@@ -2026,12 +2026,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu) ...@@ -2026,12 +2026,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE); kobject_uevent(&dev->kobj, KOBJ_ONLINE);
} }
/*
* Architectures that need SMT-specific errata handling during SMT hotplug
* should override this.
*/
void __weak arch_smt_update(void) { };
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{ {
int cpu, ret = 0; int cpu, ret = 0;
...@@ -2058,10 +2052,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) ...@@ -2058,10 +2052,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
*/ */
cpuhp_offline_cpu_device(cpu); cpuhp_offline_cpu_device(cpu);
} }
if (!ret) { if (!ret)
cpu_smt_control = ctrlval; cpu_smt_control = ctrlval;
arch_smt_update();
}
cpu_maps_update_done(); cpu_maps_update_done();
return ret; return ret;
} }
...@@ -2072,7 +2064,6 @@ static int cpuhp_smt_enable(void) ...@@ -2072,7 +2064,6 @@ static int cpuhp_smt_enable(void)
cpu_maps_update_begin(); cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED; cpu_smt_control = CPU_SMT_ENABLED;
arch_smt_update();
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */ /* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment