Commit 8ecc4979 authored by Dominik Brodowski's avatar Dominik Brodowski Committed by Thomas Gleixner

x86/speculation: Simplify the CPU bug detection logic

Only CPUs which speculate can speculate. Therefore, it seems prudent
to test for cpu_no_speculation first and only then determine whether
a specific speculating CPU is susceptible to store bypass speculation.
This is underlined by all CPUs currently listed in cpu_no_speculation
were present in cpu_no_spec_store_bypass as well.
Signed-off-by: default avatarDominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: bp@suse.de
Cc: konrad.wilk@oracle.com
Link: https://lkml.kernel.org/r/20180522090539.GA24668@light.dominikbrodowski.net
parent 0aa48468
...@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { ...@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
{} {}
}; };
/* Only list CPUs which speculate but are non susceptible to SSB */
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
...@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { ...@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{ X86_VENDOR_CENTAUR, 5, },
{ X86_VENDOR_INTEL, 5, },
{ X86_VENDOR_NSC, 5, },
{ X86_VENDOR_AMD, 0x12, }, { X86_VENDOR_AMD, 0x12, },
{ X86_VENDOR_AMD, 0x11, }, { X86_VENDOR_AMD, 0x11, },
{ X86_VENDOR_AMD, 0x10, }, { X86_VENDOR_AMD, 0x10, },
{ X86_VENDOR_AMD, 0xf, }, { X86_VENDOR_AMD, 0xf, },
{ X86_VENDOR_ANY, 4, },
{} {}
}; };
...@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{ {
u64 ia32_cap = 0; u64 ia32_cap = 0;
if (x86_match_cpu(cpu_no_speculation))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
...@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!(ia32_cap & ARCH_CAP_SSB_NO)) !(ia32_cap & ARCH_CAP_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (x86_match_cpu(cpu_no_speculation))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (x86_match_cpu(cpu_no_meltdown)) if (x86_match_cpu(cpu_no_meltdown))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment