Commit 10ed2b11 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/cpu' into perf/core, to pick up dependent commits

We are going to fix perf-events fallout of changes in tip:x86/cpu,
so merge in that branch first.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 690ca3a3 2eda374e
...@@ -696,78 +696,78 @@ static const struct cstate_model srf_cstates __initconst = { ...@@ -696,78 +696,78 @@ static const struct cstate_model srf_cstates __initconst = {
static const struct x86_cpu_id intel_cstates_match[] __initconst = { static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), X86_MATCH_VFM(INTEL_NEHALEM, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates), X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates), X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates), X86_MATCH_VFM(INTEL_WESTMERE, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates), X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates), X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates), X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates), X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates), X86_MATCH_VFM(INTEL_IVYBRIDGE, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates), X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates), X86_MATCH_VFM(INTEL_HASWELL, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates), X86_MATCH_VFM(INTEL_HASWELL_X, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates), X86_MATCH_VFM(INTEL_HASWELL_G, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates), X86_MATCH_VFM(INTEL_HASWELL_L, &hswult_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates), X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &slm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates), X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &slm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates), X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &slm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates), X86_MATCH_VFM(INTEL_BROADWELL, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates), X86_MATCH_VFM(INTEL_BROADWELL_D, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates), X86_MATCH_VFM(INTEL_BROADWELL_G, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates), X86_MATCH_VFM(INTEL_BROADWELL_X, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates), X86_MATCH_VFM(INTEL_SKYLAKE_L, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates), X86_MATCH_VFM(INTEL_SKYLAKE, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates), X86_MATCH_VFM(INTEL_SKYLAKE_X, &snb_cstates),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates), X86_MATCH_VFM(INTEL_KABYLAKE_L, &hswult_cstates),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates), X86_MATCH_VFM(INTEL_KABYLAKE, &hswult_cstates),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates), X86_MATCH_VFM(INTEL_COMETLAKE_L, &hswult_cstates),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates), X86_MATCH_VFM(INTEL_COMETLAKE, &hswult_cstates),
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates), X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates), X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates), X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_TREMONT, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &srf_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &grr_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), X86_MATCH_VFM(INTEL_ICELAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &icx_cstates), X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &icx_cstates), X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), X86_MATCH_VFM(INTEL_TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), X86_MATCH_VFM(INTEL_TIGERLAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates), X86_MATCH_VFM(INTEL_ROCKETLAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates), X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates), X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates), X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates), X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates), X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates), X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates), X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates),
{ }, { },
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -1457,7 +1458,7 @@ void __init intel_pmu_lbr_init_atom(void) ...@@ -1457,7 +1458,7 @@ void __init intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze * to have an operational LBR which can freeze
* on PMU interrupt * on PMU interrupt
*/ */
if (boot_cpu_data.x86_model == 28 if (boot_cpu_data.x86_vfm == INTEL_ATOM_BONNELL
&& boot_cpu_data.x86_stepping < 10) { && boot_cpu_data.x86_stepping < 10) {
pr_cont("LBR disabled due to erratum"); pr_cont("LBR disabled due to erratum");
return; return;
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include "../perf_event.h" #include "../perf_event.h"
#include "pt.h" #include "pt.h"
...@@ -211,11 +211,11 @@ static int __init pt_pmu_hw_init(void) ...@@ -211,11 +211,11 @@ static int __init pt_pmu_hw_init(void)
} }
/* model-specific quirks */ /* model-specific quirks */
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_BROADWELL: case INTEL_BROADWELL:
case INTEL_FAM6_BROADWELL_D: case INTEL_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G: case INTEL_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X: case INTEL_BROADWELL_X:
/* not setting BRANCH_EN will #GP, erratum BDM106 */ /* not setting BRANCH_EN will #GP, erratum BDM106 */
pt_pmu.branch_en_always_on = true; pt_pmu.branch_en_always_on = true;
break; break;
......
...@@ -1829,56 +1829,56 @@ static const struct intel_uncore_init_fun generic_uncore_init __initconst = { ...@@ -1829,56 +1829,56 @@ static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
}; };
static const struct x86_cpu_id intel_uncore_match[] __initconst = { static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init), X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init), X86_MATCH_VFM(INTEL_NEHALEM, &nhm_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init), X86_MATCH_VFM(INTEL_WESTMERE, &nhm_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init), X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init), X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init), X86_MATCH_VFM(INTEL_IVYBRIDGE, &ivb_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init), X86_MATCH_VFM(INTEL_HASWELL, &hsw_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init), X86_MATCH_VFM(INTEL_HASWELL_L, &hsw_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init), X86_MATCH_VFM(INTEL_HASWELL_G, &hsw_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init), X86_MATCH_VFM(INTEL_BROADWELL, &bdw_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init), X86_MATCH_VFM(INTEL_BROADWELL_G, &bdw_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init), X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snbep_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init), X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhmex_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init), X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhmex_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init), X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ivbep_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init), X86_MATCH_VFM(INTEL_HASWELL_X, &hswep_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init), X86_MATCH_VFM(INTEL_BROADWELL_X, &bdx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init), X86_MATCH_VFM(INTEL_BROADWELL_D, &bdx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init), X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init), X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init), X86_MATCH_VFM(INTEL_SKYLAKE, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init), X86_MATCH_VFM(INTEL_SKYLAKE_L, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init), X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init), X86_MATCH_VFM(INTEL_KABYLAKE_L, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init), X86_MATCH_VFM(INTEL_KABYLAKE, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init), X86_MATCH_VFM(INTEL_COMETLAKE_L, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init), X86_MATCH_VFM(INTEL_COMETLAKE, &skl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init), X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init), X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init), X86_MATCH_VFM(INTEL_ICELAKE, &icl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init), X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init), X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init), X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init), X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init), X86_MATCH_VFM(INTEL_ROCKETLAKE, &rkl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init), X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init), X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init), X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init), X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init), X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &mtl_uncore_init), X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &snr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
{}, {},
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Nehalem-EX/Westmere-EX uncore support */ /* Nehalem-EX/Westmere-EX uncore support */
#include <asm/cpu_device_id.h>
#include "uncore.h" #include "uncore.h"
/* NHM-EX event control */ /* NHM-EX event control */
...@@ -1217,7 +1218,7 @@ static struct intel_uncore_type *nhmex_msr_uncores[] = { ...@@ -1217,7 +1218,7 @@ static struct intel_uncore_type *nhmex_msr_uncores[] = {
void nhmex_uncore_cpu_init(void) void nhmex_uncore_cpu_init(void)
{ {
if (boot_cpu_data.x86_model == 46) if (boot_cpu_data.x86_vfm == INTEL_NEHALEM_EX)
uncore_nhmex = true; uncore_nhmex = true;
else else
nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* SandyBridge-EP/IvyTown uncore support */ /* SandyBridge-EP/IvyTown uncore support */
#include <asm/cpu_device_id.h>
#include "uncore.h" #include "uncore.h"
#include "uncore_discovery.h" #include "uncore_discovery.h"
...@@ -3285,7 +3286,7 @@ void bdx_uncore_cpu_init(void) ...@@ -3285,7 +3286,7 @@ void bdx_uncore_cpu_init(void)
uncore_msr_uncores = bdx_msr_uncores; uncore_msr_uncores = bdx_msr_uncores;
/* Detect systems with no SBOXes */ /* Detect systems with no SBOXes */
if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
...@@ -5394,7 +5395,7 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) ...@@ -5394,7 +5395,7 @@ static int icx_iio_get_topology(struct intel_uncore_type *type)
static void icx_iio_set_mapping(struct intel_uncore_type *type) static void icx_iio_set_mapping(struct intel_uncore_type *type)
{ {
/* Detect ICX-D system. This case is not supported */ /* Detect ICX-D system. This case is not supported */
if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
return; return;
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include "probe.h" #include "probe.h"
enum perf_msr_id { enum perf_msr_id {
...@@ -43,75 +43,75 @@ static bool test_intel(int idx, void *data) ...@@ -43,75 +43,75 @@ static bool test_intel(int idx, void *data)
boot_cpu_data.x86 != 6) boot_cpu_data.x86 != 6)
return false; return false;
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_NEHALEM: case INTEL_NEHALEM:
case INTEL_FAM6_NEHALEM_G: case INTEL_NEHALEM_G:
case INTEL_FAM6_NEHALEM_EP: case INTEL_NEHALEM_EP:
case INTEL_FAM6_NEHALEM_EX: case INTEL_NEHALEM_EX:
case INTEL_FAM6_WESTMERE: case INTEL_WESTMERE:
case INTEL_FAM6_WESTMERE_EP: case INTEL_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX: case INTEL_WESTMERE_EX:
case INTEL_FAM6_SANDYBRIDGE: case INTEL_SANDYBRIDGE:
case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_SANDYBRIDGE_X:
case INTEL_FAM6_IVYBRIDGE: case INTEL_IVYBRIDGE:
case INTEL_FAM6_IVYBRIDGE_X: case INTEL_IVYBRIDGE_X:
case INTEL_FAM6_HASWELL: case INTEL_HASWELL:
case INTEL_FAM6_HASWELL_X: case INTEL_HASWELL_X:
case INTEL_FAM6_HASWELL_L: case INTEL_HASWELL_L:
case INTEL_FAM6_HASWELL_G: case INTEL_HASWELL_G:
case INTEL_FAM6_BROADWELL: case INTEL_BROADWELL:
case INTEL_FAM6_BROADWELL_D: case INTEL_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G: case INTEL_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X: case INTEL_BROADWELL_X:
case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_SAPPHIRERAPIDS_X:
case INTEL_FAM6_EMERALDRAPIDS_X: case INTEL_EMERALDRAPIDS_X:
case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_GRANITERAPIDS_X:
case INTEL_FAM6_GRANITERAPIDS_D: case INTEL_GRANITERAPIDS_D:
case INTEL_FAM6_ATOM_SILVERMONT: case INTEL_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_ATOM_SILVERMONT_D:
case INTEL_FAM6_ATOM_AIRMONT: case INTEL_ATOM_AIRMONT:
case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_D: case INTEL_ATOM_GOLDMONT_D:
case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_ATOM_TREMONT_D: case INTEL_ATOM_TREMONT_D:
case INTEL_FAM6_ATOM_TREMONT: case INTEL_ATOM_TREMONT:
case INTEL_FAM6_ATOM_TREMONT_L: case INTEL_ATOM_TREMONT_L:
case INTEL_FAM6_XEON_PHI_KNL: case INTEL_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM: case INTEL_XEON_PHI_KNM:
if (idx == PERF_MSR_SMI) if (idx == PERF_MSR_SMI)
return true; return true;
break; break;
case INTEL_FAM6_SKYLAKE_L: case INTEL_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE: case INTEL_SKYLAKE:
case INTEL_FAM6_SKYLAKE_X: case INTEL_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_L: case INTEL_KABYLAKE_L:
case INTEL_FAM6_KABYLAKE: case INTEL_KABYLAKE:
case INTEL_FAM6_COMETLAKE_L: case INTEL_COMETLAKE_L:
case INTEL_FAM6_COMETLAKE: case INTEL_COMETLAKE:
case INTEL_FAM6_ICELAKE_L: case INTEL_ICELAKE_L:
case INTEL_FAM6_ICELAKE: case INTEL_ICELAKE:
case INTEL_FAM6_ICELAKE_X: case INTEL_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D: case INTEL_ICELAKE_D:
case INTEL_FAM6_TIGERLAKE_L: case INTEL_TIGERLAKE_L:
case INTEL_FAM6_TIGERLAKE: case INTEL_TIGERLAKE:
case INTEL_FAM6_ROCKETLAKE: case INTEL_ROCKETLAKE:
case INTEL_FAM6_ALDERLAKE: case INTEL_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L: case INTEL_ALDERLAKE_L:
case INTEL_FAM6_ATOM_GRACEMONT: case INTEL_ATOM_GRACEMONT:
case INTEL_FAM6_RAPTORLAKE: case INTEL_RAPTORLAKE:
case INTEL_FAM6_RAPTORLAKE_P: case INTEL_RAPTORLAKE_P:
case INTEL_FAM6_RAPTORLAKE_S: case INTEL_RAPTORLAKE_S:
case INTEL_FAM6_METEORLAKE: case INTEL_METEORLAKE:
case INTEL_FAM6_METEORLAKE_L: case INTEL_METEORLAKE_L:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true; return true;
break; break;
......
...@@ -2,6 +2,39 @@ ...@@ -2,6 +2,39 @@
#ifndef _ASM_X86_CPU_DEVICE_ID #ifndef _ASM_X86_CPU_DEVICE_ID
#define _ASM_X86_CPU_DEVICE_ID #define _ASM_X86_CPU_DEVICE_ID
/*
* Can't use <linux/bitfield.h> because it generates expressions that
* cannot be used in structure initializers. Bitfield construction
* here must match the union in struct cpuinfo_86:
* union {
* struct {
* __u8 x86_model;
* __u8 x86;
* __u8 x86_vendor;
* __u8 x86_reserved;
* };
* __u32 x86_vfm;
* };
*/
#define VFM_MODEL_BIT 0
#define VFM_FAMILY_BIT 8
#define VFM_VENDOR_BIT 16
#define VFM_RSVD_BIT 24
#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
#define VFM_MAKE(_vendor, _family, _model) ( \
((_model) << VFM_MODEL_BIT) | \
((_family) << VFM_FAMILY_BIT) | \
((_vendor) << VFM_VENDOR_BIT) \
)
/* /*
* Declare drivers belonging to specific x86 CPUs * Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions * Similar in spirit to pci_device_id and related PCI functions
...@@ -49,6 +82,16 @@ ...@@ -49,6 +82,16 @@
.driver_data = (unsigned long) _data \ .driver_data = (unsigned long) _data \
} }
#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
_steppings, _feature, _data) { \
.vendor = _vendor, \
.family = _family, \
.model = _model, \
.steppings = _steppings, \
.feature = _feature, \
.driver_data = (unsigned long) _data \
}
/** /**
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
...@@ -164,6 +207,56 @@ ...@@ -164,6 +207,56 @@
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
steppings, X86_FEATURE_ANY, data) steppings, X86_FEATURE_ANY, data)
/**
* X86_MATCH_VFM - Match encoded vendor/family/model
* @vfm: Encoded 8-bits each for vendor, family, model
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
*
* Stepping and feature are set to wildcards
*/
#define X86_MATCH_VFM(vfm, data) \
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
VFM_VENDOR(vfm), \
VFM_FAMILY(vfm), \
VFM_MODEL(vfm), \
X86_STEPPING_ANY, X86_FEATURE_ANY, data)
/**
* X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
* @vfm: Encoded 8-bits each for vendor, family, model
* @steppings: Bitmask of steppings to match
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
*
* feature is set to wildcard
*/
#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
VFM_VENDOR(vfm), \
VFM_FAMILY(vfm), \
VFM_MODEL(vfm), \
steppings, X86_FEATURE_ANY, data)
/**
* X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
* @vfm: Encoded 8-bits each for vendor, family, model
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
*
* Steppings is set to wildcard
*/
#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
VFM_VENDOR(vfm), \
VFM_FAMILY(vfm), \
VFM_MODEL(vfm), \
X86_STEPPING_ANY, feature, data)
/* /*
* Match specific microcode revisions. * Match specific microcode revisions.
* *
...@@ -190,6 +283,14 @@ struct x86_cpu_desc { ...@@ -190,6 +283,14 @@ struct x86_cpu_desc {
.x86_microcode_rev = (revision), \ .x86_microcode_rev = (revision), \
} }
#define AMD_CPU_DESC(fam, model, stepping, revision) { \
.x86_family = (fam), \
.x86_vendor = X86_VENDOR_AMD, \
.x86_model = (model), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
......
...@@ -40,137 +40,221 @@ ...@@ -40,137 +40,221 @@
* their own names :-( * their own names :-(
*/ */
#define IFM(_fam, _model) VFM_MAKE(X86_VENDOR_INTEL, _fam, _model)
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */ /* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
#define INTEL_FAM6_ANY X86_MODEL_ANY #define INTEL_FAM6_ANY X86_MODEL_ANY
/* Wildcard match for FAM6 so X86_MATCH_VFM(ANY) works */
#define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
#define INTEL_FAM6_CORE_YONAH 0x0E #define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_CORE_YONAH IFM(6, 0x0E)
#define INTEL_FAM6_CORE2_MEROM 0x0F #define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_CORE2_MEROM IFM(6, 0x0F)
#define INTEL_FAM6_CORE2_MEROM_L 0x16 #define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_CORE2_MEROM_L IFM(6, 0x16)
#define INTEL_FAM6_CORE2_PENRYN 0x17 #define INTEL_FAM6_CORE2_PENRYN 0x17
#define INTEL_CORE2_PENRYN IFM(6, 0x17)
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
#define INTEL_CORE2_DUNNINGTON IFM(6, 0x1D)
#define INTEL_FAM6_NEHALEM 0x1E #define INTEL_FAM6_NEHALEM 0x1E
#define INTEL_NEHALEM IFM(6, 0x1E)
#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ #define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */
#define INTEL_NEHALEM_G IFM(6, 0x1F) /* Auburndale / Havendale */
#define INTEL_FAM6_NEHALEM_EP 0x1A #define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_NEHALEM_EP IFM(6, 0x1A)
#define INTEL_FAM6_NEHALEM_EX 0x2E #define INTEL_FAM6_NEHALEM_EX 0x2E
#define INTEL_NEHALEM_EX IFM(6, 0x2E)
#define INTEL_FAM6_WESTMERE 0x25 #define INTEL_FAM6_WESTMERE 0x25
#define INTEL_WESTMERE IFM(6, 0x25)
#define INTEL_FAM6_WESTMERE_EP 0x2C #define INTEL_FAM6_WESTMERE_EP 0x2C
#define INTEL_WESTMERE_EP IFM(6, 0x2C)
#define INTEL_FAM6_WESTMERE_EX 0x2F #define INTEL_FAM6_WESTMERE_EX 0x2F
#define INTEL_WESTMERE_EX IFM(6, 0x2F)
#define INTEL_FAM6_SANDYBRIDGE 0x2A #define INTEL_FAM6_SANDYBRIDGE 0x2A
#define INTEL_SANDYBRIDGE IFM(6, 0x2A)
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D #define INTEL_FAM6_SANDYBRIDGE_X 0x2D
#define INTEL_SANDYBRIDGE_X IFM(6, 0x2D)
#define INTEL_FAM6_IVYBRIDGE 0x3A #define INTEL_FAM6_IVYBRIDGE 0x3A
#define INTEL_IVYBRIDGE IFM(6, 0x3A)
#define INTEL_FAM6_IVYBRIDGE_X 0x3E #define INTEL_FAM6_IVYBRIDGE_X 0x3E
#define INTEL_IVYBRIDGE_X IFM(6, 0x3E)
#define INTEL_FAM6_HASWELL 0x3C #define INTEL_FAM6_HASWELL 0x3C
#define INTEL_HASWELL IFM(6, 0x3C)
#define INTEL_FAM6_HASWELL_X 0x3F #define INTEL_FAM6_HASWELL_X 0x3F
#define INTEL_HASWELL_X IFM(6, 0x3F)
#define INTEL_FAM6_HASWELL_L 0x45 #define INTEL_FAM6_HASWELL_L 0x45
#define INTEL_HASWELL_L IFM(6, 0x45)
#define INTEL_FAM6_HASWELL_G 0x46 #define INTEL_FAM6_HASWELL_G 0x46
#define INTEL_HASWELL_G IFM(6, 0x46)
#define INTEL_FAM6_BROADWELL 0x3D #define INTEL_FAM6_BROADWELL 0x3D
#define INTEL_BROADWELL IFM(6, 0x3D)
#define INTEL_FAM6_BROADWELL_G 0x47 #define INTEL_FAM6_BROADWELL_G 0x47
#define INTEL_BROADWELL_G IFM(6, 0x47)
#define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_FAM6_BROADWELL_X 0x4F
#define INTEL_BROADWELL_X IFM(6, 0x4F)
#define INTEL_FAM6_BROADWELL_D 0x56 #define INTEL_FAM6_BROADWELL_D 0x56
#define INTEL_BROADWELL_D IFM(6, 0x56)
#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */ #define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */
#define INTEL_SKYLAKE_L IFM(6, 0x4E) /* Sky Lake */
#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */ #define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */
#define INTEL_SKYLAKE IFM(6, 0x5E) /* Sky Lake */
#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */ #define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */
#define INTEL_SKYLAKE_X IFM(6, 0x55) /* Sky Lake */
/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */ /* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */
/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */ /* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */
#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */ #define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */
#define INTEL_KABYLAKE_L IFM(6, 0x8E) /* Sky Lake */
/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */ /* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */
/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */ /* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */
/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */ /* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */
#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */ #define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */
#define INTEL_KABYLAKE IFM(6, 0x9E) /* Sky Lake */
/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */ /* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */
#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */ #define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */
#define INTEL_COMETLAKE IFM(6, 0xA5) /* Sky Lake */
#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */ #define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */
#define INTEL_COMETLAKE_L IFM(6, 0xA6) /* Sky Lake */
#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */ #define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */
#define INTEL_CANNONLAKE_L IFM(6, 0x66) /* Palm Cove */
#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */ #define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */
#define INTEL_ICELAKE_X IFM(6, 0x6A) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */ #define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */
#define INTEL_ICELAKE_D IFM(6, 0x6C) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */ #define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */
#define INTEL_ICELAKE IFM(6, 0x7D) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */ #define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
#define INTEL_ICELAKE_L IFM(6, 0x7E) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */ #define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
#define INTEL_ICELAKE_NNPI IFM(6, 0x9D) /* Sunny Cove */
#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */ #define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
#define INTEL_ROCKETLAKE IFM(6, 0xA7) /* Cypress Cove */
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */ #define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
#define INTEL_TIGERLAKE_L IFM(6, 0x8C) /* Willow Cove */
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */ #define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
#define INTEL_TIGERLAKE IFM(6, 0x8D) /* Willow Cove */
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */ #define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
#define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */
#define INTEL_FAM6_EMERALDRAPIDS_X 0xCF #define INTEL_FAM6_EMERALDRAPIDS_X 0xCF
#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF)
#define INTEL_FAM6_GRANITERAPIDS_X 0xAD #define INTEL_FAM6_GRANITERAPIDS_X 0xAD
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
#define INTEL_FAM6_GRANITERAPIDS_D 0xAE #define INTEL_FAM6_GRANITERAPIDS_D 0xAE
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
/* "Hybrid" Processors (P-Core/E-Core) */ /* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */ #define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_ALDERLAKE IFM(6, 0x97) /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
#define INTEL_ALDERLAKE_L IFM(6, 0x9A) /* Golden Cove / Gracemont */
#define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */ #define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */
#define INTEL_RAPTORLAKE IFM(6, 0xB7) /* Raptor Cove / Enhanced Gracemont */
#define INTEL_FAM6_RAPTORLAKE_P 0xBA #define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_RAPTORLAKE_P IFM(6, 0xBA)
#define INTEL_FAM6_RAPTORLAKE_S 0xBF #define INTEL_FAM6_RAPTORLAKE_S 0xBF
#define INTEL_RAPTORLAKE_S IFM(6, 0xBF)
#define INTEL_FAM6_METEORLAKE 0xAC #define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_METEORLAKE IFM(6, 0xAC)
#define INTEL_FAM6_METEORLAKE_L 0xAA #define INTEL_FAM6_METEORLAKE_L 0xAA
#define INTEL_METEORLAKE_L IFM(6, 0xAA)
#define INTEL_FAM6_ARROWLAKE_H 0xC5 #define INTEL_FAM6_ARROWLAKE_H 0xC5
#define INTEL_ARROWLAKE_H IFM(6, 0xC5)
#define INTEL_FAM6_ARROWLAKE 0xC6 #define INTEL_FAM6_ARROWLAKE 0xC6
#define INTEL_ARROWLAKE IFM(6, 0xC6)
#define INTEL_FAM6_ARROWLAKE_U 0xB5 #define INTEL_FAM6_ARROWLAKE_U 0xB5
#define INTEL_ARROWLAKE_U IFM(6, 0xB5)
#define INTEL_FAM6_LUNARLAKE_M 0xBD #define INTEL_FAM6_LUNARLAKE_M 0xBD
#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
/* "Small Core" Processors (Atom/E-Core) */ /* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ #define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
#define INTEL_ATOM_BONNELL_MID IFM(6, 0x26) /* Silverthorne, Lincroft */
#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */ #define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
#define INTEL_ATOM_SALTWELL IFM(6, 0x36) /* Cedarview */
#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */ #define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
#define INTEL_ATOM_SALTWELL_MID IFM(6, 0x27) /* Penwell */
#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ #define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
#define INTEL_ATOM_SALTWELL_TABLET IFM(6, 0x35) /* Cloverview */
#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ #define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
#define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */ #define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */
#define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */
#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ #define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
#define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
#define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */
#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ #define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
#define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */
#define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */ #define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */
#define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ #define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */
#define INTEL_ATOM_GOLDMONT_D IFM(6, 0x5F) /* Denverton */
/* Note: the micro-architecture is "Goldmont Plus" */ /* Note: the micro-architecture is "Goldmont Plus" */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_ATOM_GOLDMONT_PLUS IFM(6, 0x7A) /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ #define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */
#define INTEL_ATOM_TREMONT_D IFM(6, 0x86) /* Jacobsville */
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ #define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
#define INTEL_ATOM_TREMONT IFM(6, 0x96) /* Elkhart Lake */
#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ #define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
#define INTEL_ATOM_TREMONT_L IFM(6, 0x9C) /* Jasper Lake */
#define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */ #define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */
#define INTEL_ATOM_GRACEMONT IFM(6, 0xBE) /* Alderlake N */
#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */ #define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */
#define INTEL_ATOM_CRESTMONT_X IFM(6, 0xAF) /* Sierra Forest */
#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */ #define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */
#define INTEL_ATOM_CRESTMONT IFM(6, 0xB6) /* Grand Ridge */
#define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */ #define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */
#define INTEL_ATOM_DARKMONT_X IFM(6, 0xDD) /* Clearwater Forest */
/* Xeon Phi */ /* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ #define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
#define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */
/* Family 5 */ /* Family 5 */
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */ #define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
#endif /* _ASM_X86_INTEL_FAMILY_H */ #endif /* _ASM_X86_INTEL_FAMILY_H */
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */ #define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */
#define MCG_EXT_P BIT_ULL(9) /* Extended registers available */ #define MCG_EXT_P BIT_ULL(9) /* Extended registers available */
#define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */ #define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */
#define MCG_SEAM_NR BIT_ULL(12) /* MCG_STATUS_SEAM_NR supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16 #define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */ #define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */ #define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */
#define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */ #define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */
#define MCG_STATUS_SEAM_NR BIT_ULL(12) /* Machine check inside SEAM non-root mode */
/* MCG_EXT_CTL register defines */ /* MCG_EXT_CTL register defines */
#define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */ #define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */
......
...@@ -108,9 +108,23 @@ struct cpuinfo_topology { ...@@ -108,9 +108,23 @@ struct cpuinfo_topology {
}; };
struct cpuinfo_x86 { struct cpuinfo_x86 {
__u8 x86; /* CPU family */ union {
__u8 x86_vendor; /* CPU vendor */ /*
__u8 x86_model; * The particular ordering (low-to-high) of (vendor,
* family, model) is done in case range of models, like
* it is usually done on AMD, need to be compared.
*/
struct {
__u8 x86_model;
/* CPU family */
__u8 x86;
/* CPU vendor */
__u8 x86_vendor;
__u8 x86_reserved;
};
/* combined vendor, family, model */
__u32 x86_vfm;
};
__u8 x86_stepping; __u8 x86_stepping;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* Number of 4K pages in DTLB/ITLB combined(in pages): */ /* Number of 4K pages in DTLB/ITLB combined(in pages): */
......
...@@ -62,7 +62,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o ...@@ -62,7 +62,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += pci-dma.o quirks.o kdebugfs.o
obj-y += alternative.o i8253.o hw_breakpoint.o obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += resource.o obj-y += resource.o
......
...@@ -497,32 +497,32 @@ static struct clock_event_device lapic_clockevent = { ...@@ -497,32 +497,32 @@ static struct clock_event_device lapic_clockevent = {
static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static const struct x86_cpu_id deadline_match[] __initconst = { static const struct x86_cpu_id deadline_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */ X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020), X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011), X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e), X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c), X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003), X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136), X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014), X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0), X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22), X86_MATCH_VFM(INTEL_HASWELL, 0x22),
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20), X86_MATCH_VFM(INTEL_HASWELL_L, 0x20),
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17), X86_MATCH_VFM(INTEL_HASWELL_G, 0x17),
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25), X86_MATCH_VFM(INTEL_BROADWELL, 0x25),
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17), X86_MATCH_VFM(INTEL_BROADWELL_G, 0x17),
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2), X86_MATCH_VFM(INTEL_SKYLAKE_L, 0xb2),
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2), X86_MATCH_VFM(INTEL_SKYLAKE, 0xb2),
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52), X86_MATCH_VFM(INTEL_KABYLAKE_L, 0x52),
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52), X86_MATCH_VFM(INTEL_KABYLAKE, 0x52),
{}, {},
}; };
......
...@@ -178,13 +178,16 @@ static int x2apic_prepare_cpu(unsigned int cpu) ...@@ -178,13 +178,16 @@ static int x2apic_prepare_cpu(unsigned int cpu)
u32 phys_apicid = apic->cpu_present_to_apicid(cpu); u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
u32 cluster = apic_cluster(phys_apicid); u32 cluster = apic_cluster(phys_apicid);
u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf)); u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
int node = cpu_to_node(cpu);
x86_cpu_to_logical_apicid[cpu] = logical_apicid; x86_cpu_to_logical_apicid[cpu] = logical_apicid;
if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0) if (alloc_clustermask(cpu, cluster, node) < 0)
return -ENOMEM; return -ENOMEM;
if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cacheinfo.h> #include <asm/cacheinfo.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h> #include <asm/spec-ctrl.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/numa.h> #include <asm/numa.h>
...@@ -794,6 +795,11 @@ static void init_amd_bd(struct cpuinfo_x86 *c) ...@@ -794,6 +795,11 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
clear_rdrand_cpuid_bit(c); clear_rdrand_cpuid_bit(c);
} }
static const struct x86_cpu_desc erratum_1386_microcode[] = {
AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
};
static void fix_erratum_1386(struct cpuinfo_x86 *c) static void fix_erratum_1386(struct cpuinfo_x86 *c)
{ {
/* /*
...@@ -803,7 +809,13 @@ static void fix_erratum_1386(struct cpuinfo_x86 *c) ...@@ -803,7 +809,13 @@ static void fix_erratum_1386(struct cpuinfo_x86 *c)
* *
* Affected parts all have no supervisor XSAVE states, meaning that * Affected parts all have no supervisor XSAVE states, meaning that
* the XSAVEC instruction (which works fine) is equivalent. * the XSAVEC instruction (which works fine) is equivalent.
*
* Clear the feature flag only on microcode revisions which
* don't have the fix.
*/ */
if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode))
return;
clear_cpu_cap(c, X86_FEATURE_XSAVES); clear_cpu_cap(c, X86_FEATURE_XSAVES);
} }
......
...@@ -124,25 +124,24 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) ...@@ -124,25 +124,24 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
return true; return true;
} }
#define X86_MATCH(model) \ #define X86_MATCH(vfm) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, NULL)
INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = { static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
X86_MATCH(XEON_PHI_KNL), X86_MATCH(INTEL_XEON_PHI_KNL),
X86_MATCH(XEON_PHI_KNM), X86_MATCH(INTEL_XEON_PHI_KNM),
{} {}
}; };
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = { static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
X86_MATCH(SKYLAKE_X), X86_MATCH(INTEL_SKYLAKE_X),
{} {}
}; };
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = { static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
X86_MATCH(ATOM_GOLDMONT), X86_MATCH(INTEL_ATOM_GOLDMONT),
X86_MATCH(ATOM_GOLDMONT_D), X86_MATCH(INTEL_ATOM_GOLDMONT_D),
X86_MATCH(ATOM_GOLDMONT_PLUS), X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS),
{} {}
}; };
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/vmx.h> #include <asm/vmx.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -2391,20 +2391,20 @@ static void override_cache_bits(struct cpuinfo_x86 *c) ...@@ -2391,20 +2391,20 @@ static void override_cache_bits(struct cpuinfo_x86 *c)
if (c->x86 != 6) if (c->x86 != 6)
return; return;
switch (c->x86_model) { switch (c->x86_vfm) {
case INTEL_FAM6_NEHALEM: case INTEL_NEHALEM:
case INTEL_FAM6_WESTMERE: case INTEL_WESTMERE:
case INTEL_FAM6_SANDYBRIDGE: case INTEL_SANDYBRIDGE:
case INTEL_FAM6_IVYBRIDGE: case INTEL_IVYBRIDGE:
case INTEL_FAM6_HASWELL: case INTEL_HASWELL:
case INTEL_FAM6_HASWELL_L: case INTEL_HASWELL_L:
case INTEL_FAM6_HASWELL_G: case INTEL_HASWELL_G:
case INTEL_FAM6_BROADWELL: case INTEL_BROADWELL:
case INTEL_FAM6_BROADWELL_G: case INTEL_BROADWELL_G:
case INTEL_FAM6_SKYLAKE_L: case INTEL_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE: case INTEL_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L: case INTEL_KABYLAKE_L:
case INTEL_FAM6_KABYLAKE: case INTEL_KABYLAKE:
if (c->x86_cache_bits < 44) if (c->x86_cache_bits < 44)
c->x86_cache_bits = 44; c->x86_cache_bits = 44;
break; break;
......
This diff is collapsed.
...@@ -228,6 +228,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c) ...@@ -228,6 +228,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c)
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n"); pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED; mktme_status = MKTME_DISABLED;
clear_cpu_cap(c, X86_FEATURE_TME);
return; return;
} }
......
...@@ -204,12 +204,12 @@ static int intel_epb_offline(unsigned int cpu) ...@@ -204,12 +204,12 @@ static int intel_epb_offline(unsigned int cpu)
} }
static const struct x86_cpu_id intel_epb_normal[] = { static const struct x86_cpu_id intel_epb_normal[] = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, X86_MATCH_VFM(INTEL_ALDERLAKE_L,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE), ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, X86_MATCH_VFM(INTEL_ATOM_GRACEMONT,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE), ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, X86_MATCH_VFM(INTEL_RAPTORLAKE_P,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE), ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
{} {}
}; };
......
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
* *
* A typical table entry would be to match a specific CPU * A typical table entry would be to match a specific CPU
* *
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL, * X86_MATCH_VFM_FEATURE(INTEL_BROADWELL, X86_FEATURE_ANY, NULL);
* X86_FEATURE_ANY, NULL);
* *
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY, * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
* %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor) * %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
...@@ -26,7 +25,7 @@ ...@@ -26,7 +25,7 @@
* asm/cpu_device_id.h contains a set of useful macros which are shortcuts * asm/cpu_device_id.h contains a set of useful macros which are shortcuts
* for various common selections. The above can be shortened to: * for various common selections. The above can be shortened to:
* *
* X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL); * X86_MATCH_VFM(INTEL_BROADWELL, NULL);
* *
* Arrays used to match for this should also be declared using * Arrays used to match for this should also be declared using
* MODULE_DEVICE_TABLE(x86cpu, ...) * MODULE_DEVICE_TABLE(x86cpu, ...)
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <asm/fred.h> #include <asm/fred.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -1593,6 +1593,24 @@ noinstr void do_machine_check(struct pt_regs *regs) ...@@ -1593,6 +1593,24 @@ noinstr void do_machine_check(struct pt_regs *regs)
else else
queue_task_work(&m, msg, kill_me_maybe); queue_task_work(&m, msg, kill_me_maybe);
} else if (m.mcgstatus & MCG_STATUS_SEAM_NR) {
/*
* Saved RIP on stack makes it look like the machine check
* was taken in the kernel on the instruction following
* the entry to SEAM mode. But MCG_STATUS_SEAM_NR indicates
* that the machine check was taken inside SEAM non-root
* mode. CPU core has already marked that guest as dead.
* It is OK for the kernel to resume execution at the
* apparent point of the machine check as the fault did
* not occur there. Mark the page as poisoned so it won't
* be added to free list when the guest is terminated.
*/
if (mce_usable_address(&m)) {
struct page *p = pfn_to_online_page(m.addr >> PAGE_SHIFT);
if (p)
SetPageHWPoison(p);
}
} else { } else {
/* /*
* Handle an MCE which has happened in kernel space but from * Handle an MCE which has happened in kernel space but from
...@@ -1930,14 +1948,14 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) ...@@ -1930,14 +1948,14 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
cfg->bootlog = 0; cfg->bootlog = 0;
if (c->x86 == 6 && c->x86_model == 45) if (c->x86_vfm == INTEL_SANDYBRIDGE_X)
mce_flags.snb_ifu_quirk = 1; mce_flags.snb_ifu_quirk = 1;
/* /*
* Skylake, Cascacde Lake and Cooper Lake require a quirk on * Skylake, Cascacde Lake and Cooper Lake require a quirk on
* rep movs. * rep movs.
*/ */
if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X) if (c->x86_vfm == INTEL_SKYLAKE_X)
mce_flags.skx_repmov_quirk = 1; mce_flags.skx_repmov_quirk = 1;
} }
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/mce.h> #include <asm/mce.h>
...@@ -455,10 +455,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c) ...@@ -455,10 +455,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
{ {
u64 error_control; u64 error_control;
switch (c->x86_model) { switch (c->x86_vfm) {
case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_SANDYBRIDGE_X:
case INTEL_FAM6_IVYBRIDGE_X: case INTEL_IVYBRIDGE_X:
case INTEL_FAM6_HASWELL_X: case INTEL_HASWELL_X:
if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control)) if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
return; return;
error_control |= 2; error_control |= 2;
...@@ -484,12 +484,11 @@ bool intel_filter_mce(struct mce *m) ...@@ -484,12 +484,11 @@ bool intel_filter_mce(struct mce *m)
struct cpuinfo_x86 *c = &boot_cpu_data; struct cpuinfo_x86 *c = &boot_cpu_data;
/* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */ /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
if ((c->x86 == 6) && if ((c->x86_vfm == INTEL_HASWELL ||
((c->x86_model == INTEL_FAM6_HASWELL) || c->x86_vfm == INTEL_HASWELL_L ||
(c->x86_model == INTEL_FAM6_HASWELL_L) || c->x86_vfm == INTEL_BROADWELL ||
(c->x86_model == INTEL_FAM6_BROADWELL) || c->x86_vfm == INTEL_HASWELL_G ||
(c->x86_model == INTEL_FAM6_HASWELL_G) || c->x86_vfm == INTEL_SKYLAKE_X) &&
(c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
(m->bank == 0) && (m->bank == 0) &&
((m->status & 0xa0000000ffffffff) == 0x80000000000f0005)) ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
return true; return true;
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/insn-eval.h> #include <asm/insn-eval.h>
...@@ -39,20 +39,20 @@ static struct severity { ...@@ -39,20 +39,20 @@ static struct severity {
u64 mask; u64 mask;
u64 result; u64 result;
unsigned char sev; unsigned char sev;
unsigned char mcgmask; unsigned short mcgmask;
unsigned char mcgres; unsigned short mcgres;
unsigned char ser; unsigned char ser;
unsigned char context; unsigned char context;
unsigned char excp; unsigned char excp;
unsigned char covered; unsigned char covered;
unsigned char cpu_model; unsigned int cpu_vfm;
unsigned char cpu_minstepping; unsigned char cpu_minstepping;
unsigned char bank_lo, bank_hi; unsigned char bank_lo, bank_hi;
char *msg; char *msg;
} severities[] = { } severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h #define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s #define VFM_STEPPING(m, s) .cpu_vfm = m, .cpu_minstepping = s
#define KERNEL .context = IN_KERNEL #define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER #define USER .context = IN_USER
#define KERNEL_RECOV .context = IN_KERNEL_RECOV #define KERNEL_RECOV .context = IN_KERNEL_RECOV
...@@ -128,7 +128,7 @@ static struct severity { ...@@ -128,7 +128,7 @@ static struct severity {
MCESEV( MCESEV(
AO, "Uncorrected Patrol Scrub Error", AO, "Uncorrected Patrol Scrub Error",
SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) VFM_STEPPING(INTEL_SKYLAKE_X, 4), BANK_RANGE(13, 18)
), ),
/* ignore OVER for UCNA */ /* ignore OVER for UCNA */
...@@ -173,6 +173,18 @@ static struct severity { ...@@ -173,6 +173,18 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
USER USER
), ),
MCESEV(
AR, "Data load error in SEAM non-root mode",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
KERNEL
),
MCESEV(
AR, "Instruction fetch error in SEAM non-root mode",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
KERNEL
),
MCESEV( MCESEV(
PANIC, "Data load in unrecoverable area of kernel", PANIC, "Data load in unrecoverable area of kernel",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
...@@ -386,7 +398,7 @@ static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char ...@@ -386,7 +398,7 @@ static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char
continue; continue;
if (s->excp && excp != s->excp) if (s->excp && excp != s->excp)
continue; continue;
if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) if (s->cpu_vfm && boot_cpu_data.x86_vfm != s->cpu_vfm)
continue; continue;
if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
continue; continue;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -577,8 +577,7 @@ static bool is_blacklisted(unsigned int cpu) ...@@ -577,8 +577,7 @@ static bool is_blacklisted(unsigned int cpu)
* This behavior is documented in item BDF90, #334165 (Intel Xeon * This behavior is documented in item BDF90, #334165 (Intel Xeon
* Processor E7-8800/4800 v4 Product Family). * Processor E7-8800/4800 v4 Product Family).
*/ */
if (c->x86 == 6 && if (c->x86_vfm == INTEL_BROADWELL_X &&
c->x86_model == INTEL_FAM6_BROADWELL_X &&
c->x86_stepping == 0x01 && c->x86_stepping == 0x01 &&
llc_size_per_core > 2621440 && llc_size_per_core > 2621440 &&
c->microcode < 0x0b000021) { c->microcode < 0x0b000021) {
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/cacheinfo.h> #include <linux/cacheinfo.h>
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/resctrl.h> #include <asm/resctrl.h>
#include "internal.h" #include "internal.h"
...@@ -821,18 +821,18 @@ static __init bool get_rdt_mon_resources(void) ...@@ -821,18 +821,18 @@ static __init bool get_rdt_mon_resources(void)
static __init void __check_quirks_intel(void) static __init void __check_quirks_intel(void)
{ {
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_HASWELL_X: case INTEL_HASWELL_X:
if (!rdt_options[RDT_FLAG_L3_CAT].force_off) if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
cache_alloc_hsw_probe(); cache_alloc_hsw_probe();
break; break;
case INTEL_FAM6_SKYLAKE_X: case INTEL_SKYLAKE_X:
if (boot_cpu_data.x86_stepping <= 4) if (boot_cpu_data.x86_stepping <= 4)
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
else else
set_rdt_options("!l3cat"); set_rdt_options("!l3cat");
fallthrough; fallthrough;
case INTEL_FAM6_BROADWELL_X: case INTEL_BROADWELL_X:
intel_rdt_mbm_apply_quirk(); intel_rdt_mbm_apply_quirk();
break; break;
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/resctrl.h> #include <asm/resctrl.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
...@@ -88,8 +88,8 @@ static u64 get_prefetch_disable_bits(void) ...@@ -88,8 +88,8 @@ static u64 get_prefetch_disable_bits(void)
boot_cpu_data.x86 != 6) boot_cpu_data.x86 != 6)
return 0; return 0;
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_BROADWELL_X: case INTEL_BROADWELL_X:
/* /*
* SDM defines bits of MSR_MISC_FEATURE_CONTROL register * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
* as: * as:
...@@ -100,8 +100,8 @@ static u64 get_prefetch_disable_bits(void) ...@@ -100,8 +100,8 @@ static u64 get_prefetch_disable_bits(void)
* 63:4 Reserved * 63:4 Reserved
*/ */
return 0xF; return 0xF;
case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_ATOM_GOLDMONT_PLUS:
/* /*
* SDM defines bits of MSR_MISC_FEATURE_CONTROL register * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
* as: * as:
...@@ -1084,9 +1084,9 @@ static int measure_l2_residency(void *_plr) ...@@ -1084,9 +1084,9 @@ static int measure_l2_residency(void *_plr)
* L2_HIT 02H * L2_HIT 02H
* L2_MISS 10H * L2_MISS 10H
*/ */
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_ATOM_GOLDMONT_PLUS:
perf_miss_attr.config = X86_CONFIG(.event = 0xd1, perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
.umask = 0x10); .umask = 0x10);
perf_hit_attr.config = X86_CONFIG(.event = 0xd1, perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
...@@ -1123,8 +1123,8 @@ static int measure_l3_residency(void *_plr) ...@@ -1123,8 +1123,8 @@ static int measure_l3_residency(void *_plr)
* MISS 41H * MISS 41H
*/ */
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_vfm) {
case INTEL_FAM6_BROADWELL_X: case INTEL_BROADWELL_X:
/* On BDW the hit event counts references, not hits */ /* On BDW the hit event counts references, not hits */
perf_hit_attr.config = X86_CONFIG(.event = 0x2e, perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
.umask = 0x4f); .umask = 0x4f);
...@@ -1142,7 +1142,7 @@ static int measure_l3_residency(void *_plr) ...@@ -1142,7 +1142,7 @@ static int measure_l3_residency(void *_plr)
*/ */
counts.miss_after -= counts.miss_before; counts.miss_after -= counts.miss_before;
if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) { if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) {
/* /*
* On BDW references and misses are counted, need to adjust. * On BDW references and misses are counted, need to adjust.
* Sometimes the "hits" counter is a bit more than the * Sometimes the "hits" counter is a bit more than the
......
...@@ -58,7 +58,7 @@ static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id) ...@@ -58,7 +58,7 @@ static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id)
tscan->amd_node_id = node_id; tscan->amd_node_id = node_id;
} }
static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb) static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
{ {
struct { struct {
// eax // eax
...@@ -86,7 +86,7 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb) ...@@ -86,7 +86,7 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
* If leaf 0xb is available, then the domain shifts are set * If leaf 0xb is available, then the domain shifts are set
* already and nothing to do here. * already and nothing to do here.
*/ */
if (!has_0xb) { if (!has_topoext) {
/* /*
* Leaf 0x80000008 set the CORE domain shift already. * Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it. * Update the SMT domain, but do not propagate it.
...@@ -169,21 +169,24 @@ static void topoext_fixup(struct topo_scan *tscan) ...@@ -169,21 +169,24 @@ static void topoext_fixup(struct topo_scan *tscan)
static void parse_topology_amd(struct topo_scan *tscan) static void parse_topology_amd(struct topo_scan *tscan)
{ {
bool has_0xb = false; bool has_topoext = false;
/* /*
* If the extended topology leaf 0x8000_001e is available * If the extended topology leaf 0x8000_001e is available
* try to get SMT and CORE shift from leaf 0xb first, then * try to get SMT, CORE, TILE, and DIE shifts from extended
* try to get the CORE shift from leaf 0x8000_0008. * CPUID leaf 0x8000_0026 on supported processors first. If
* extended CPUID leaf 0x8000_0026 is not supported, try to
* get SMT and CORE shift from leaf 0xb first, then try to
* get the CORE shift from leaf 0x8000_0008.
*/ */
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT)) if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
has_0xb = cpu_parse_topology_ext(tscan); has_topoext = cpu_parse_topology_ext(tscan);
if (!has_0xb && !parse_8000_0008(tscan)) if (!has_topoext && !parse_8000_0008(tscan))
return; return;
/* Prefer leaf 0x8000001e if available */ /* Prefer leaf 0x8000001e if available */
if (parse_8000_001e(tscan, has_0xb)) if (parse_8000_001e(tscan, has_topoext))
return; return;
/* Try the NODEID MSR */ /* Try the NODEID MSR */
......
...@@ -13,7 +13,10 @@ enum topo_types { ...@@ -13,7 +13,10 @@ enum topo_types {
CORE_TYPE = 2, CORE_TYPE = 2,
MAX_TYPE_0B = 3, MAX_TYPE_0B = 3,
MODULE_TYPE = 3, MODULE_TYPE = 3,
AMD_CCD_TYPE = 3,
TILE_TYPE = 4, TILE_TYPE = 4,
AMD_SOCKET_TYPE = 4,
MAX_TYPE_80000026 = 5,
DIE_TYPE = 5, DIE_TYPE = 5,
DIEGRP_TYPE = 6, DIEGRP_TYPE = 6,
MAX_TYPE_1F = 7, MAX_TYPE_1F = 7,
...@@ -32,6 +35,13 @@ static const unsigned int topo_domain_map_0b_1f[MAX_TYPE_1F] = { ...@@ -32,6 +35,13 @@ static const unsigned int topo_domain_map_0b_1f[MAX_TYPE_1F] = {
[DIEGRP_TYPE] = TOPO_DIEGRP_DOMAIN, [DIEGRP_TYPE] = TOPO_DIEGRP_DOMAIN,
}; };
static const unsigned int topo_domain_map_80000026[MAX_TYPE_80000026] = {
[SMT_TYPE] = TOPO_SMT_DOMAIN,
[CORE_TYPE] = TOPO_CORE_DOMAIN,
[AMD_CCD_TYPE] = TOPO_TILE_DOMAIN,
[AMD_SOCKET_TYPE] = TOPO_DIE_DOMAIN,
};
static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf, static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf,
unsigned int *last_dom) unsigned int *last_dom)
{ {
...@@ -56,6 +66,7 @@ static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf, ...@@ -56,6 +66,7 @@ static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf,
switch (leaf) { switch (leaf) {
case 0x0b: maxtype = MAX_TYPE_0B; map = topo_domain_map_0b_1f; break; case 0x0b: maxtype = MAX_TYPE_0B; map = topo_domain_map_0b_1f; break;
case 0x1f: maxtype = MAX_TYPE_1F; map = topo_domain_map_0b_1f; break; case 0x1f: maxtype = MAX_TYPE_1F; map = topo_domain_map_0b_1f; break;
case 0x80000026: maxtype = MAX_TYPE_80000026; map = topo_domain_map_80000026; break;
default: return false; default: return false;
} }
...@@ -125,6 +136,10 @@ bool cpu_parse_topology_ext(struct topo_scan *tscan) ...@@ -125,6 +136,10 @@ bool cpu_parse_topology_ext(struct topo_scan *tscan)
if (tscan->c->cpuid_level >= 0x1f && parse_topology_leaf(tscan, 0x1f)) if (tscan->c->cpuid_level >= 0x1f && parse_topology_leaf(tscan, 0x1f))
return true; return true;
/* AMD: Try leaf 0x80000026 first. */
if (tscan->c->extended_cpuid_level >= 0x80000026 && parse_topology_leaf(tscan, 0x80000026))
return true;
/* Intel/AMD: Fall back to leaf 0xB if available */ /* Intel/AMD: Fall back to leaf 0xB if available */
return tscan->c->cpuid_level >= 0x0b && parse_topology_leaf(tscan, 0x0b); return tscan->c->cpuid_level >= 0x0b && parse_topology_leaf(tscan, 0x0b);
} }
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/cpu.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/efi.h> #include <linux/efi.h>
...@@ -1218,3 +1219,10 @@ static int __init register_kernel_offset_dumper(void) ...@@ -1218,3 +1219,10 @@ static int __init register_kernel_offset_dumper(void)
return 0; return 0;
} }
__initcall(register_kernel_offset_dumper); __initcall(register_kernel_offset_dumper);
#ifdef CONFIG_HOTPLUG_CPU
bool arch_cpu_is_hotpluggable(int cpu)
{
return cpu > 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
...@@ -938,7 +938,7 @@ static int snp_set_vmsa(void *va, bool vmsa) ...@@ -938,7 +938,7 @@ static int snp_set_vmsa(void *va, bool vmsa)
#define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2) #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
#define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3) #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
static void *snp_alloc_vmsa_page(void) static void *snp_alloc_vmsa_page(int cpu)
{ {
struct page *p; struct page *p;
...@@ -950,7 +950,7 @@ static void *snp_alloc_vmsa_page(void) ...@@ -950,7 +950,7 @@ static void *snp_alloc_vmsa_page(void)
* *
* Allocate an 8k page which is also 8k-aligned. * Allocate an 8k page which is also 8k-aligned.
*/ */
p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
if (!p) if (!p)
return NULL; return NULL;
...@@ -1019,7 +1019,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1019,7 +1019,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* #VMEXIT of that vCPU would wipe out all of the settings being done * #VMEXIT of that vCPU would wipe out all of the settings being done
* here. * here.
*/ */
vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(); vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu);
if (!vmsa) if (!vmsa)
return -ENOMEM; return -ENOMEM;
...@@ -1341,7 +1341,7 @@ static void __init alloc_runtime_data(int cpu) ...@@ -1341,7 +1341,7 @@ static void __init alloc_runtime_data(int cpu)
{ {
struct sev_es_runtime_data *data; struct sev_es_runtime_data *data;
data = memblock_alloc(sizeof(*data), PAGE_SIZE); data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
if (!data) if (!data)
panic("Can't allocate SEV-ES runtime data"); panic("Can't allocate SEV-ES runtime data");
......
...@@ -438,9 +438,9 @@ static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) ...@@ -438,9 +438,9 @@ static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
*/ */
static const struct x86_cpu_id intel_cod_cpu[] = { static const struct x86_cpu_id intel_cod_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */ X86_MATCH_VFM(INTEL_HASWELL_X, 0), /* COD */
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */ X86_MATCH_VFM(INTEL_BROADWELL_X, 0), /* COD */
X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */ X86_MATCH_VFM(INTEL_ANY, 1), /* SNC */
{} {}
}; };
...@@ -1033,20 +1033,22 @@ static __init void disable_smp(void) ...@@ -1033,20 +1033,22 @@ static __init void disable_smp(void)
void __init smp_prepare_cpus_common(void) void __init smp_prepare_cpus_common(void)
{ {
unsigned int i; unsigned int cpu, node;
/* Mark all except the boot CPU as hotpluggable */ /* Mark all except the boot CPU as hotpluggable */
for_each_possible_cpu(i) { for_each_possible_cpu(cpu) {
if (i) if (cpu)
per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids; per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids;
} }
for_each_possible_cpu(i) { for_each_possible_cpu(cpu) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); node = cpu_to_node(cpu);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL); zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node);
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node);
zalloc_cpumask_var(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL); zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node);
zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
} }
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
......
/*
* Populate sysfs with topology information
*
* Written by: Matthew Dobson, IBM Corporation
* Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <colpatch@us.ibm.com>
*/
#include <linux/interrupt.h>
#include <linux/nodemask.h>
#include <linux/export.h>
#include <linux/mmzone.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
#include <asm/cpu.h>
#ifdef CONFIG_HOTPLUG_CPU
bool arch_cpu_is_hotpluggable(int cpu)
{
return cpu > 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <asm/geode.h> #include <asm/geode.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/intel-family.h> #include <asm/cpu_device_id.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
...@@ -682,7 +682,7 @@ unsigned long native_calibrate_tsc(void) ...@@ -682,7 +682,7 @@ unsigned long native_calibrate_tsc(void)
* clock. * clock.
*/ */
if (crystal_khz == 0 && if (crystal_khz == 0 &&
boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D) boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
crystal_khz = 25000; crystal_khz = 25000;
/* /*
...@@ -713,7 +713,7 @@ unsigned long native_calibrate_tsc(void) ...@@ -713,7 +713,7 @@ unsigned long native_calibrate_tsc(void)
* For Atom SoCs TSC is the only reliable clocksource. * For Atom SoCs TSC is the only reliable clocksource.
* Mark TSC reliable so no watchdog on it. * Mark TSC reliable so no watchdog on it.
*/ */
if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT) if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -147,13 +147,13 @@ static const struct freq_desc freq_desc_lgm = { ...@@ -147,13 +147,13 @@ static const struct freq_desc freq_desc_lgm = {
}; };
static const struct x86_cpu_id tsc_msr_cpu_ids[] = { static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &freq_desc_pnw), X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &freq_desc_pnw),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_TABLET,&freq_desc_clv), X86_MATCH_VFM(INTEL_ATOM_SALTWELL_TABLET, &freq_desc_clv),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &freq_desc_byt), X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &freq_desc_byt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &freq_desc_tng), X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &freq_desc_tng),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &freq_desc_cht), X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &freq_desc_cht),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &freq_desc_ann), X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &freq_desc_ann),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_NP, &freq_desc_lgm), X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &freq_desc_lgm),
{} {}
}; };
......
...@@ -261,21 +261,17 @@ static void __init probe_page_size_mask(void) ...@@ -261,21 +261,17 @@ static void __init probe_page_size_mask(void)
} }
} }
#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
.family = 6, \
.model = _model, \
}
/* /*
* INVLPG may not properly flush Global entries * INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled. * on these CPUs when PCIDs are enabled.
*/ */
static const struct x86_cpu_id invlpg_miss_ids[] = { static const struct x86_cpu_id invlpg_miss_ids[] = {
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), X86_MATCH_VFM(INTEL_ALDERLAKE, 0),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0),
INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ), X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), X86_MATCH_VFM(INTEL_RAPTORLAKE, 0),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0),
{} {}
}; };
......
...@@ -9,6 +9,14 @@ ...@@ -9,6 +9,14 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/mce.h> #include <asm/mce.h>
/*
* MCE Event Record.
*
* Only very relevant and transient information which cannot be
* gathered from a system by any other means or which can only be
* acquired arduously should be added to this record.
*/
TRACE_EVENT(mce_record, TRACE_EVENT(mce_record,
TP_PROTO(struct mce *m), TP_PROTO(struct mce *m),
...@@ -25,6 +33,7 @@ TRACE_EVENT(mce_record, ...@@ -25,6 +33,7 @@ TRACE_EVENT(mce_record,
__field( u64, ipid ) __field( u64, ipid )
__field( u64, ip ) __field( u64, ip )
__field( u64, tsc ) __field( u64, tsc )
__field( u64, ppin )
__field( u64, walltime ) __field( u64, walltime )
__field( u32, cpu ) __field( u32, cpu )
__field( u32, cpuid ) __field( u32, cpuid )
...@@ -33,6 +42,7 @@ TRACE_EVENT(mce_record, ...@@ -33,6 +42,7 @@ TRACE_EVENT(mce_record,
__field( u8, cs ) __field( u8, cs )
__field( u8, bank ) __field( u8, bank )
__field( u8, cpuvendor ) __field( u8, cpuvendor )
__field( u32, microcode )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -45,6 +55,7 @@ TRACE_EVENT(mce_record, ...@@ -45,6 +55,7 @@ TRACE_EVENT(mce_record,
__entry->ipid = m->ipid; __entry->ipid = m->ipid;
__entry->ip = m->ip; __entry->ip = m->ip;
__entry->tsc = m->tsc; __entry->tsc = m->tsc;
__entry->ppin = m->ppin;
__entry->walltime = m->time; __entry->walltime = m->time;
__entry->cpu = m->extcpu; __entry->cpu = m->extcpu;
__entry->cpuid = m->cpuid; __entry->cpuid = m->cpuid;
...@@ -53,20 +64,26 @@ TRACE_EVENT(mce_record, ...@@ -53,20 +64,26 @@ TRACE_EVENT(mce_record,
__entry->cs = m->cs; __entry->cs = m->cs;
__entry->bank = m->bank; __entry->bank = m->bank;
__entry->cpuvendor = m->cpuvendor; __entry->cpuvendor = m->cpuvendor;
__entry->microcode = m->microcode;
), ),
TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x",
__entry->cpu, __entry->cpu,
__entry->mcgcap, __entry->mcgstatus, __entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status, __entry->bank, __entry->status,
__entry->ipid, __entry->ipid,
__entry->addr, __entry->misc, __entry->synd, __entry->addr,
__entry->misc,
__entry->synd,
__entry->cs, __entry->ip, __entry->cs, __entry->ip,
__entry->tsc, __entry->tsc,
__entry->cpuvendor, __entry->cpuid, __entry->ppin,
__entry->cpuvendor,
__entry->cpuid,
__entry->walltime, __entry->walltime,
__entry->socketid, __entry->socketid,
__entry->apicid) __entry->apicid,
__entry->microcode)
); );
#endif /* _TRACE_MCE_H */ #endif /* _TRACE_MCE_H */
......
...@@ -103,21 +103,6 @@ static void clearhandler(int sig) ...@@ -103,21 +103,6 @@ static void clearhandler(int sig)
#define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26)
#define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27)
static inline void check_cpuid_xsave(void)
{
uint32_t eax, ebx, ecx, edx;
/*
* CPUID.1:ECX.XSAVE[bit 26] enumerates general
* support for the XSAVE feature set, including
* XGETBV.
*/
__cpuid_count(1, 0, eax, ebx, ecx, edx);
if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK))
fatal_error("cpuid: no CPU xsave support");
if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK))
fatal_error("cpuid: no OS xsave support");
}
static uint32_t xbuf_size; static uint32_t xbuf_size;
...@@ -350,6 +335,7 @@ enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED }; ...@@ -350,6 +335,7 @@ enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED };
/* arch_prctl() and sigaltstack() test */ /* arch_prctl() and sigaltstack() test */
#define ARCH_GET_XCOMP_SUPP 0x1021
#define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_GET_XCOMP_PERM 0x1022
#define ARCH_REQ_XCOMP_PERM 0x1023 #define ARCH_REQ_XCOMP_PERM 0x1023
...@@ -928,8 +914,15 @@ static void test_ptrace(void) ...@@ -928,8 +914,15 @@ static void test_ptrace(void)
int main(void) int main(void)
{ {
/* Check hardware availability at first */ unsigned long features;
check_cpuid_xsave(); long rc;
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features);
if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) {
ksft_print_msg("no AMX support\n");
return KSFT_SKIP;
}
check_cpuid_xtiledata(); check_cpuid_xtiledata();
init_stashed_xsave(); init_stashed_xsave();
......
...@@ -1183,7 +1183,7 @@ int main(int argc, char **argv) ...@@ -1183,7 +1183,7 @@ int main(int argc, char **argv)
if (!cpu_has_lam()) { if (!cpu_has_lam()) {
ksft_print_msg("Unsupported LAM feature!\n"); ksft_print_msg("Unsupported LAM feature!\n");
return -1; return KSFT_SKIP;
} }
while ((c = getopt(argc, argv, "ht:")) != -1) { while ((c = getopt(argc, argv, "ht:")) != -1) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment