Commit 799571bf authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86/intel/lbr: Add the function pointers for LBR save and restore

The MSRs of Architectural LBR are different from previous model-specific
LBR. Perf has to implement different functions to save and restore them.

The function pointers for LBR save and restore are introduced. Perf
should initialize the corresponding functions at boot time.

The generic optimizations, e.g. avoiding restore LBR if no one else
touched them, still apply for Architectural LBRs. The related codes are
not moved to model-specific functions.

Current model-specific LBR functions are set as default.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-5-git-send-email-kan.liang@linux.intel.com
parent c301b1d8
...@@ -3981,6 +3981,8 @@ static __initconst const struct x86_pmu core_pmu = { ...@@ -3981,6 +3981,8 @@ static __initconst const struct x86_pmu core_pmu = {
.lbr_reset = intel_pmu_lbr_reset_64, .lbr_reset = intel_pmu_lbr_reset_64,
.lbr_read = intel_pmu_lbr_read_64, .lbr_read = intel_pmu_lbr_read_64,
.lbr_save = intel_pmu_lbr_save,
.lbr_restore = intel_pmu_lbr_restore,
}; };
static __initconst const struct x86_pmu intel_pmu = { static __initconst const struct x86_pmu intel_pmu = {
...@@ -4029,6 +4031,8 @@ static __initconst const struct x86_pmu intel_pmu = { ...@@ -4029,6 +4031,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.lbr_reset = intel_pmu_lbr_reset_64, .lbr_reset = intel_pmu_lbr_reset_64,
.lbr_read = intel_pmu_lbr_read_64, .lbr_read = intel_pmu_lbr_read_64,
.lbr_save = intel_pmu_lbr_save,
.lbr_restore = intel_pmu_lbr_restore,
}; };
static __init void intel_clovertown_quirk(void) static __init void intel_clovertown_quirk(void)
......
...@@ -323,31 +323,13 @@ static inline u64 rdlbr_to(unsigned int idx) ...@@ -323,31 +323,13 @@ static inline u64 rdlbr_to(unsigned int idx)
return val; return val;
} }
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) void intel_pmu_lbr_restore(void *ctx)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx;
int i; int i;
unsigned lbr_idx, mask; unsigned lbr_idx, mask;
u64 tos; u64 tos = task_ctx->tos;
if (task_ctx->lbr_callstack_users == 0 ||
task_ctx->lbr_stack_state == LBR_NONE) {
intel_pmu_lbr_reset();
return;
}
tos = task_ctx->tos;
/*
* Does not restore the LBR registers, if
* - No one else touched them, and
* - Did not enter C6
*/
if ((task_ctx == cpuc->last_task_ctx) &&
(task_ctx->log_id == cpuc->last_log_id) &&
rdlbr_from(tos)) {
task_ctx->lbr_stack_state = LBR_NONE;
return;
}
mask = x86_pmu.lbr_nr - 1; mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) { for (i = 0; i < task_ctx->valid_lbrs; i++) {
...@@ -368,24 +350,48 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) ...@@ -368,24 +350,48 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
} }
wrmsrl(x86_pmu.lbr_tos, tos); wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
if (cpuc->lbr_select) if (cpuc->lbr_select)
wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
} }
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned lbr_idx, mask; u64 tos;
u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) { if (task_ctx->lbr_callstack_users == 0 ||
task_ctx->lbr_stack_state == LBR_NONE) {
intel_pmu_lbr_reset();
return;
}
tos = task_ctx->tos;
/*
* Does not restore the LBR registers, if
* - No one else touched them, and
* - Did not enter C6
*/
if ((task_ctx == cpuc->last_task_ctx) &&
(task_ctx->log_id == cpuc->last_log_id) &&
rdlbr_from(tos)) {
task_ctx->lbr_stack_state = LBR_NONE; task_ctx->lbr_stack_state = LBR_NONE;
return; return;
} }
x86_pmu.lbr_restore(task_ctx);
task_ctx->lbr_stack_state = LBR_NONE;
}
void intel_pmu_lbr_save(void *ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx;
unsigned lbr_idx, mask;
u64 tos, from;
int i;
mask = x86_pmu.lbr_nr - 1; mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos(); tos = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) { for (i = 0; i < x86_pmu.lbr_nr; i++) {
...@@ -400,13 +406,26 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) ...@@ -400,13 +406,26 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
} }
task_ctx->valid_lbrs = i; task_ctx->valid_lbrs = i;
task_ctx->tos = tos; task_ctx->tos = tos;
if (cpuc->lbr_select)
rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (task_ctx->lbr_callstack_users == 0) {
task_ctx->lbr_stack_state = LBR_NONE;
return;
}
x86_pmu.lbr_save(task_ctx);
task_ctx->lbr_stack_state = LBR_VALID; task_ctx->lbr_stack_state = LBR_VALID;
cpuc->last_task_ctx = task_ctx; cpuc->last_task_ctx = task_ctx;
cpuc->last_log_id = ++task_ctx->log_id; cpuc->last_log_id = ++task_ctx->log_id;
if (cpuc->lbr_select)
rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
} }
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
......
...@@ -695,6 +695,8 @@ struct x86_pmu { ...@@ -695,6 +695,8 @@ struct x86_pmu {
void (*lbr_reset)(void); void (*lbr_reset)(void);
void (*lbr_read)(struct cpu_hw_events *cpuc); void (*lbr_read)(struct cpu_hw_events *cpuc);
void (*lbr_save)(void *ctx);
void (*lbr_restore)(void *ctx);
/* /*
* Intel PT/LBR/BTS are exclusive * Intel PT/LBR/BTS are exclusive
...@@ -1090,6 +1092,10 @@ void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); ...@@ -1090,6 +1092,10 @@ void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
void intel_pmu_lbr_save(void *ctx);
void intel_pmu_lbr_restore(void *ctx);
void intel_pmu_lbr_init_core(void); void intel_pmu_lbr_init_core(void);
void intel_pmu_lbr_init_nhm(void); void intel_pmu_lbr_init_nhm(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment