Commit 43cf7631 authored by Yan, Zheng's avatar Yan, Zheng Committed by Ingo Molnar

perf/x86/intel: Introduce setup_pebs_sample_data()

Move code that sets up the PEBS sample data to a separate function.
Signed-off-by: default avatarYan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: default avatarKan Liang <kan.liang@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1430940834-8964-3-git-send-email-kan.liang@intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 851559e3
...@@ -853,8 +853,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) ...@@ -853,8 +853,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
return txn; return txn;
} }
static void __intel_pmu_pebs_event(struct perf_event *event, static void setup_pebs_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs) struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data,
struct pt_regs *regs)
{ {
#define PERF_X86_EVENT_PEBS_HSW_PREC \ #define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \ (PERF_X86_EVENT_PEBS_ST_HSW | \
...@@ -866,30 +868,25 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -866,30 +868,25 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
*/ */
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_hsw *pebs = __pebs; struct pebs_record_hsw *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
u64 sample_type; u64 sample_type;
int fll, fst, dsrc; int fll, fst, dsrc;
int fl = event->hw.flags; int fl = event->hw.flags;
if (!intel_pmu_save_and_restart(event))
return;
sample_type = event->attr.sample_type; sample_type = event->attr.sample_type;
dsrc = sample_type & PERF_SAMPLE_DATA_SRC; dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
fll = fl & PERF_X86_EVENT_PEBS_LDLAT; fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(data, 0, event->hw.last_period);
data.period = event->hw.last_period; data->period = event->hw.last_period;
/* /*
* Use latency for weight (only avail with PEBS-LL) * Use latency for weight (only avail with PEBS-LL)
*/ */
if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
data.weight = pebs->lat; data->weight = pebs->lat;
/* /*
* data.data_src encodes the data source * data.data_src encodes the data source
...@@ -902,7 +899,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -902,7 +899,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
val = precise_datala_hsw(event, pebs->dse); val = precise_datala_hsw(event, pebs->dse);
else if (fst) else if (fst)
val = precise_store_data(pebs->dse); val = precise_store_data(pebs->dse);
data.data_src.val = val; data->data_src.val = val;
} }
/* /*
...@@ -915,58 +912,70 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -915,58 +912,70 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs. * A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/ */
regs = *iregs; *regs = *iregs;
regs.flags = pebs->flags; regs->flags = pebs->flags;
set_linear_ip(&regs, pebs->ip); set_linear_ip(regs, pebs->ip);
regs.bp = pebs->bp; regs->bp = pebs->bp;
regs.sp = pebs->sp; regs->sp = pebs->sp;
if (sample_type & PERF_SAMPLE_REGS_INTR) { if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs.ax = pebs->ax; regs->ax = pebs->ax;
regs.bx = pebs->bx; regs->bx = pebs->bx;
regs.cx = pebs->cx; regs->cx = pebs->cx;
regs.dx = pebs->dx; regs->dx = pebs->dx;
regs.si = pebs->si; regs->si = pebs->si;
regs.di = pebs->di; regs->di = pebs->di;
regs.bp = pebs->bp; regs->bp = pebs->bp;
regs.sp = pebs->sp; regs->sp = pebs->sp;
regs.flags = pebs->flags; regs->flags = pebs->flags;
#ifndef CONFIG_X86_32 #ifndef CONFIG_X86_32
regs.r8 = pebs->r8; regs->r8 = pebs->r8;
regs.r9 = pebs->r9; regs->r9 = pebs->r9;
regs.r10 = pebs->r10; regs->r10 = pebs->r10;
regs.r11 = pebs->r11; regs->r11 = pebs->r11;
regs.r12 = pebs->r12; regs->r12 = pebs->r12;
regs.r13 = pebs->r13; regs->r13 = pebs->r13;
regs.r14 = pebs->r14; regs->r14 = pebs->r14;
regs.r15 = pebs->r15; regs->r15 = pebs->r15;
#endif #endif
} }
if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
regs.ip = pebs->real_ip; regs->ip = pebs->real_ip;
regs.flags |= PERF_EFLAGS_EXACT; regs->flags |= PERF_EFLAGS_EXACT;
} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
regs.flags |= PERF_EFLAGS_EXACT; regs->flags |= PERF_EFLAGS_EXACT;
else else
regs.flags &= ~PERF_EFLAGS_EXACT; regs->flags &= ~PERF_EFLAGS_EXACT;
if ((sample_type & PERF_SAMPLE_ADDR) && if ((sample_type & PERF_SAMPLE_ADDR) &&
x86_pmu.intel_cap.pebs_format >= 1) x86_pmu.intel_cap.pebs_format >= 1)
data.addr = pebs->dla; data->addr = pebs->dla;
if (x86_pmu.intel_cap.pebs_format >= 2) { if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */ /* Only set the TSX weight when no memory weight. */
if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
data.weight = intel_hsw_weight(pebs); data->weight = intel_hsw_weight(pebs);
if (sample_type & PERF_SAMPLE_TRANSACTION) if (sample_type & PERF_SAMPLE_TRANSACTION)
data.txn = intel_hsw_transaction(pebs); data->txn = intel_hsw_transaction(pebs);
} }
if (has_branch_stack(event)) if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack; data->br_stack = &cpuc->lbr_stack;
}
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
struct perf_sample_data data;
struct pt_regs regs;
if (!intel_pmu_save_and_restart(event))
return;
setup_pebs_sample_data(event, iregs, __pebs, &data, &regs);
if (perf_event_overflow(event, &data, &regs)) if (perf_event_overflow(event, &data, &regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment