Commit aff3d91a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, x86: Change x86_pmu.{enable,disable} calling convention

Pass the full perf_event into the x86_pmu functions so that those may
make use of more than the hw_perf_event, and while doing this, remove the
superfluous second argument.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
LKML-Reference: <20100304140100.165166129@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cc2ad4ba
...@@ -133,8 +133,8 @@ struct x86_pmu { ...@@ -133,8 +133,8 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *); int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void); void (*disable_all)(void);
void (*enable_all)(void); void (*enable_all)(void);
void (*enable)(struct hw_perf_event *, int); void (*enable)(struct perf_event *);
void (*disable)(struct hw_perf_event *, int); void (*disable)(struct perf_event *);
unsigned eventsel; unsigned eventsel;
unsigned perfctr; unsigned perfctr;
u64 (*event_map)(int); u64 (*event_map)(int);
...@@ -845,7 +845,7 @@ void hw_perf_enable(void) ...@@ -845,7 +845,7 @@ void hw_perf_enable(void)
set_bit(hwc->idx, cpuc->active_mask); set_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = event; cpuc->events[hwc->idx] = event;
x86_pmu.enable(hwc, hwc->idx); x86_pmu.enable(event);
perf_event_update_userpage(event); perf_event_update_userpage(event);
} }
cpuc->n_added = 0; cpuc->n_added = 0;
...@@ -858,15 +858,16 @@ void hw_perf_enable(void) ...@@ -858,15 +858,16 @@ void hw_perf_enable(void)
x86_pmu.enable_all(); x86_pmu.enable_all();
} }
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
{ {
(void)checking_wrmsrl(hwc->config_base + idx, (void)checking_wrmsrl(hwc->config_base + hwc->idx,
hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
} }
static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) static inline void x86_pmu_disable_event(struct perf_event *event)
{ {
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config); struct hw_perf_event *hwc = &event->hw;
(void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
} }
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
...@@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event) ...@@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event)
return ret; return ret;
} }
static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) static void x86_pmu_enable_event(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled) if (cpuc->enabled)
__x86_pmu_enable_event(hwc, idx); __x86_pmu_enable_event(&event->hw);
} }
/* /*
...@@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event) ...@@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event)
static int x86_pmu_start(struct perf_event *event) static int x86_pmu_start(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; if (event->hw.idx == -1)
if (hwc->idx == -1)
return -EAGAIN; return -EAGAIN;
x86_perf_event_set_period(event); x86_perf_event_set_period(event);
x86_pmu.enable(hwc, hwc->idx); x86_pmu.enable(event);
return 0; return 0;
} }
...@@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) ...@@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event)
cpuc->events[hwc->idx] != event)) cpuc->events[hwc->idx] != event))
return; return;
x86_pmu.enable(hwc, hwc->idx); x86_pmu.enable(event);
} }
void perf_event_print_debug(void) void perf_event_print_debug(void)
...@@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event) ...@@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event)
* could reenable again: * could reenable again:
*/ */
clear_bit(idx, cpuc->active_mask); clear_bit(idx, cpuc->active_mask);
x86_pmu.disable(hwc, idx); x86_pmu.disable(event);
/* /*
* Drain the remaining delta count out of a event * Drain the remaining delta count out of a event
...@@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) ...@@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
continue; continue;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, 1, &data, regs))
x86_pmu.disable(hwc, idx); x86_pmu.disable(event);
} }
if (handled) if (handled)
......
...@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) ...@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
} }
static inline void static inline void
intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{ {
int idx = __idx - X86_PMC_IDX_FIXED; int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask; u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4); mask = 0xfULL << (idx * 4);
...@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) ...@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
} }
static inline void static inline void
intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) intel_pmu_disable_event(struct perf_event *event)
{ {
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts(); intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer(); intel_pmu_drain_bts_buffer();
return; return;
} }
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc, idx); intel_pmu_disable_fixed(hwc);
return; return;
} }
x86_pmu_disable_event(hwc, idx); x86_pmu_disable_event(event);
} }
static inline void static inline void
intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{ {
int idx = __idx - X86_PMC_IDX_FIXED; int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask; u64 ctrl_val, bits, mask;
int err; int err;
...@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) ...@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
err = checking_wrmsrl(hwc->config_base, ctrl_val); err = checking_wrmsrl(hwc->config_base, ctrl_val);
} }
static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) static void intel_pmu_enable_event(struct perf_event *event)
{ {
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__get_cpu_var(cpu_hw_events).enabled) if (!__get_cpu_var(cpu_hw_events).enabled)
return; return;
...@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
} }
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc, idx); intel_pmu_enable_fixed(hwc);
return; return;
} }
__x86_pmu_enable_event(hwc, idx); __x86_pmu_enable_event(hwc);
} }
/* /*
...@@ -771,7 +775,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -771,7 +775,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
data.period = event->hw.last_period; data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, 1, &data, regs))
intel_pmu_disable_event(&event->hw, bit); intel_pmu_disable_event(event);
} }
intel_pmu_ack_status(ack); intel_pmu_ack_status(ack);
......
...@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void) ...@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void)
} }
static inline void static inline void
p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) p6_pmu_disable_event(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT; u64 val = P6_NOP_EVENT;
if (cpuc->enabled) if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE; val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base + idx, val); (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
} }
static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) static void p6_pmu_enable_event(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val; u64 val;
val = hwc->config; val = hwc->config;
if (cpuc->enabled) if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE; val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base + idx, val); (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
} }
static __initconst struct x86_pmu p6_pmu = { static __initconst struct x86_pmu p6_pmu = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment