Commit 9dfa9a5c authored by Peter Zijlstra's avatar Peter Zijlstra

perf/x86: Reduce stack usage for x86_pmu::drain_pebs()

intel_pmu_drain_pebs_*() is typically called from handle_pmi_common(),
both have an on-stack struct perf_sample_data, which is *big*. Rewire
things so that drain_pebs() can use the one handle_pmi_common() has.
Reported-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20201030151955.054099690@infradead.org
parent 267fb273
...@@ -2630,7 +2630,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -2630,7 +2630,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
u64 pebs_enabled = cpuc->pebs_enabled; u64 pebs_enabled = cpuc->pebs_enabled;
handled++; handled++;
x86_pmu.drain_pebs(regs); x86_pmu.drain_pebs(regs, &data);
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/* /*
......
...@@ -670,7 +670,9 @@ int intel_pmu_drain_bts_buffer(void) ...@@ -670,7 +670,9 @@ int intel_pmu_drain_bts_buffer(void)
static inline void intel_pmu_drain_pebs_buffer(void) static inline void intel_pmu_drain_pebs_buffer(void)
{ {
x86_pmu.drain_pebs(NULL); struct perf_sample_data data;
x86_pmu.drain_pebs(NULL, &data);
} }
/* /*
...@@ -1719,19 +1721,20 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) ...@@ -1719,19 +1721,20 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
return 0; return 0;
} }
static void __intel_pmu_pebs_event(struct perf_event *event, static __always_inline void
struct pt_regs *iregs, __intel_pmu_pebs_event(struct perf_event *event,
void *base, void *top, struct pt_regs *iregs,
int bit, int count, struct perf_sample_data *data,
void (*setup_sample)(struct perf_event *, void *base, void *top,
struct pt_regs *, int bit, int count,
void *, void (*setup_sample)(struct perf_event *,
struct perf_sample_data *, struct pt_regs *,
struct pt_regs *)) void *,
struct perf_sample_data *,
struct pt_regs *))
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct perf_sample_data data;
struct x86_perf_regs perf_regs; struct x86_perf_regs perf_regs;
struct pt_regs *regs = &perf_regs.regs; struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit); void *at = get_next_pebs_record_by_bit(base, top, bit);
...@@ -1752,14 +1755,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -1752,14 +1755,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
iregs = &dummy_iregs; iregs = &dummy_iregs;
while (count > 1) { while (count > 1) {
setup_sample(event, iregs, at, &data, regs); setup_sample(event, iregs, at, data, regs);
perf_event_output(event, &data, regs); perf_event_output(event, data, regs);
at += cpuc->pebs_record_size; at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit); at = get_next_pebs_record_by_bit(at, top, bit);
count--; count--;
} }
setup_sample(event, iregs, at, &data, regs); setup_sample(event, iregs, at, data, regs);
if (iregs == &dummy_iregs) { if (iregs == &dummy_iregs) {
/* /*
* The PEBS records may be drained in the non-overflow context, * The PEBS records may be drained in the non-overflow context,
...@@ -1767,18 +1770,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -1767,18 +1770,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* last record the same as other PEBS records, and doesn't * last record the same as other PEBS records, and doesn't
* invoke the generic overflow handler. * invoke the generic overflow handler.
*/ */
perf_event_output(event, &data, regs); perf_event_output(event, data, regs);
} else { } else {
/* /*
* All but the last records are processed. * All but the last records are processed.
* The last one is left to be able to call the overflow handler. * The last one is left to be able to call the overflow handler.
*/ */
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }
} }
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
...@@ -1812,7 +1815,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) ...@@ -1812,7 +1815,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
return; return;
} }
__intel_pmu_pebs_event(event, iregs, at, top, 0, n, __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
setup_pebs_fixed_sample_data); setup_pebs_fixed_sample_data);
} }
...@@ -1835,7 +1838,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int ...@@ -1835,7 +1838,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
} }
} }
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
...@@ -1942,14 +1945,14 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ...@@ -1942,14 +1945,14 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
} }
if (counts[bit]) { if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base, __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit], top, bit, counts[bit],
setup_pebs_fixed_sample_data); setup_pebs_fixed_sample_data);
} }
} }
} }
static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
...@@ -1997,7 +2000,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) ...@@ -1997,7 +2000,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
if (WARN_ON_ONCE(!event->attr.precise_ip)) if (WARN_ON_ONCE(!event->attr.precise_ip))
continue; continue;
__intel_pmu_pebs_event(event, iregs, base, __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit], top, bit, counts[bit],
setup_pebs_adaptive_sample_data); setup_pebs_adaptive_sample_data);
} }
......
...@@ -727,7 +727,7 @@ struct x86_pmu { ...@@ -727,7 +727,7 @@ struct x86_pmu {
int pebs_record_size; int pebs_record_size;
int pebs_buffer_size; int pebs_buffer_size;
int max_pebs_events; int max_pebs_events;
void (*drain_pebs)(struct pt_regs *regs); void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
struct event_constraint *pebs_constraints; struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event); void (*pebs_aliases)(struct perf_event *event);
unsigned long large_pebs_flags; unsigned long large_pebs_flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment