Commit d7ec8d8c authored by Peter Zijlstra (Intel)'s avatar Peter Zijlstra (Intel) Committed by Greg Kroah-Hartman

perf/x86/intel: Implement support for TSX Force Abort

commit 400816f6 upstream

Skylake (and later) will receive a microcode update to address a TSX
errata. This microcode will, on execution of a TSX instruction
(speculative or not) use (clobber) PMC3. This update will also provide
a new MSR to change this behaviour along with a CPUID bit to enumerate
the presence of this new MSR.

When the MSR gets set; the microcode will no longer use PMC3 but will
Force Abort every TSX transaction (upon executing COMMIT).

When TSX Force Abort (TFA) is allowed (default); the MSR gets set when
PMC3 gets scheduled and cleared when, after scheduling, PMC3 is
unused.

When TFA is not allowed; clear PMC3 from all constraints such that it
will not get used.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0e6487a0
...@@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added) ...@@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added)
intel_pmu_enable_all(added); intel_pmu_enable_all(added);
} }
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
{
u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
if (cpuc->tfa_shadow != val) {
cpuc->tfa_shadow = val;
wrmsrl(MSR_TSX_FORCE_ABORT, val);
}
}
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{
/*
* We're going to use PMC3, make sure TFA is set before we touch it.
*/
if (cntr == 3 && !cpuc->is_fake)
intel_set_tfa(cpuc, true);
}
static void intel_tfa_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* If we find PMC3 is no longer used when we enable the PMU, we can
* clear TFA.
*/
if (!test_bit(3, cpuc->active_mask))
intel_set_tfa(cpuc, false);
intel_pmu_enable_all(added);
}
static inline u64 intel_pmu_get_status(void) static inline u64 intel_pmu_get_status(void)
{ {
u64 status; u64 status;
...@@ -3218,6 +3251,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, ...@@ -3218,6 +3251,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c; return c;
} }
static bool allow_tsx_force_abort = true;
static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
/*
* Without TFA we must not use PMC3.
*/
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
c = dyn_constraint(cpuc, c, idx);
c->idxmsk64 &= ~(1ULL << 3);
c->weight--;
}
return c;
}
/* /*
* Broadwell: * Broadwell:
* *
...@@ -3312,13 +3365,15 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) ...@@ -3312,13 +3365,15 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
goto err; goto err;
} }
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->constraint_list) if (!cpuc->constraint_list)
goto err_shared_regs; goto err_shared_regs;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
cpuc->excl_cntrs = allocate_excl_cntrs(cpu); cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs) if (!cpuc->excl_cntrs)
goto err_constraint_list; goto err_constraint_list;
...@@ -3425,9 +3480,10 @@ static void free_excl_cntrs(struct cpu_hw_events *cpuc) ...@@ -3425,9 +3480,10 @@ static void free_excl_cntrs(struct cpu_hw_events *cpuc)
if (c->core_id == -1 || --c->refcnt == 0) if (c->core_id == -1 || --c->refcnt == 0)
kfree(c); kfree(c);
cpuc->excl_cntrs = NULL; cpuc->excl_cntrs = NULL;
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
} }
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
} }
static void intel_pmu_cpu_dying(int cpu) static void intel_pmu_cpu_dying(int cpu)
...@@ -3912,8 +3968,11 @@ static struct attribute *intel_pmu_caps_attrs[] = { ...@@ -3912,8 +3968,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
NULL NULL
}; };
DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
static struct attribute *intel_pmu_attrs[] = { static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr, &dev_attr_freeze_on_smi.attr,
NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
NULL, NULL,
}; };
...@@ -4369,6 +4428,15 @@ __init int intel_pmu_init(void) ...@@ -4369,6 +4428,15 @@ __init int intel_pmu_init(void)
x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl( intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
}
pr_cont("Skylake events, "); pr_cont("Skylake events, ");
name = "skylake"; name = "skylake";
break; break;
......
...@@ -238,6 +238,11 @@ struct cpu_hw_events { ...@@ -238,6 +238,11 @@ struct cpu_hw_events {
struct intel_excl_cntrs *excl_cntrs; struct intel_excl_cntrs *excl_cntrs;
int excl_thread_id; /* 0 or 1 */ int excl_thread_id; /* 0 or 1 */
/*
* SKL TSX_FORCE_ABORT shadow
*/
u64 tfa_shadow;
/* /*
* AMD specific bits * AMD specific bits
*/ */
...@@ -672,6 +677,7 @@ do { \ ...@@ -672,6 +677,7 @@ do { \
#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
#define EVENT_VAR(_id) event_attr_##_id #define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment