Commit ee06094f authored by Ingo Molnar's avatar Ingo Molnar

perfcounters: restructure x86 counter math

Impact: restructure code

Change counter math from absolute values to clear delta logic.

We try to extract elapsed deltas from the raw hw counter - and put
that into the generic counter.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9b194e83
...@@ -643,7 +643,7 @@ config X86_UP_IOAPIC ...@@ -643,7 +643,7 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC config X86_LOCAL_APIC
def_bool y def_bool y
depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
select HAVE_PERF_COUNTERS select HAVE_PERF_COUNTERS if (!M386 && !M486)
config X86_IO_APIC config X86_IO_APIC
def_bool y def_bool y
......
This diff is collapsed.
...@@ -91,14 +91,16 @@ struct perf_counter_hw_event { ...@@ -91,14 +91,16 @@ struct perf_counter_hw_event {
* struct hw_perf_counter - performance counter hardware details: * struct hw_perf_counter - performance counter hardware details:
*/ */
struct hw_perf_counter { struct hw_perf_counter {
#ifdef CONFIG_PERF_COUNTERS
u64 config; u64 config;
unsigned long config_base; unsigned long config_base;
unsigned long counter_base; unsigned long counter_base;
int nmi; int nmi;
unsigned int idx; unsigned int idx;
u64 prev_count; atomic64_t prev_count;
u64 irq_period; u64 irq_period;
s32 next_count; atomic64_t period_left;
#endif
}; };
/* /*
...@@ -140,17 +142,15 @@ enum perf_counter_active_state { ...@@ -140,17 +142,15 @@ enum perf_counter_active_state {
* struct perf_counter - performance counter kernel representation: * struct perf_counter - performance counter kernel representation:
*/ */
struct perf_counter { struct perf_counter {
#ifdef CONFIG_PERF_COUNTERS
struct list_head list_entry; struct list_head list_entry;
struct list_head sibling_list; struct list_head sibling_list;
struct perf_counter *group_leader; struct perf_counter *group_leader;
const struct hw_perf_counter_ops *hw_ops; const struct hw_perf_counter_ops *hw_ops;
enum perf_counter_active_state state; enum perf_counter_active_state state;
#if BITS_PER_LONG == 64
atomic64_t count; atomic64_t count;
#else
atomic_t count32[2];
#endif
struct perf_counter_hw_event hw_event; struct perf_counter_hw_event hw_event;
struct hw_perf_counter hw; struct hw_perf_counter hw;
...@@ -172,6 +172,7 @@ struct perf_counter { ...@@ -172,6 +172,7 @@ struct perf_counter {
struct perf_data *irqdata; struct perf_data *irqdata;
struct perf_data *usrdata; struct perf_data *usrdata;
struct perf_data data[2]; struct perf_data data[2];
#endif
}; };
/** /**
...@@ -220,8 +221,6 @@ extern void perf_counter_notify(struct pt_regs *regs); ...@@ -220,8 +221,6 @@ extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
extern u64 hw_perf_save_disable(void); extern u64 hw_perf_save_disable(void);
extern void hw_perf_restore(u64 ctrl); extern void hw_perf_restore(u64 ctrl);
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
extern u64 atomic64_counter_read(struct perf_counter *counter);
extern int perf_counter_task_disable(void); extern int perf_counter_task_disable(void);
extern int perf_counter_task_enable(void); extern int perf_counter_task_enable(void);
......
...@@ -47,64 +47,6 @@ u64 __weak hw_perf_save_disable(void) { return 0; } ...@@ -47,64 +47,6 @@ u64 __weak hw_perf_save_disable(void) { return 0; }
void __weak hw_perf_restore(u64 ctrl) { } void __weak hw_perf_restore(u64 ctrl) { }
void __weak hw_perf_counter_setup(void) { } void __weak hw_perf_counter_setup(void) { }
#if BITS_PER_LONG == 64
/*
* Read the cached counter in counter safe against cross CPU / NMI
* modifications. 64 bit version - no complications.
*/
static inline u64 perf_counter_read_safe(struct perf_counter *counter)
{
return (u64) atomic64_read(&counter->count);
}
void atomic64_counter_set(struct perf_counter *counter, u64 val)
{
atomic64_set(&counter->count, val);
}
u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic64_read(&counter->count);
}
#else
/*
* Read the cached counter in counter safe against cross CPU / NMI
* modifications. 32 bit version.
*/
static u64 perf_counter_read_safe(struct perf_counter *counter)
{
u32 cntl, cnth;
local_irq_disable();
do {
cnth = atomic_read(&counter->count32[1]);
cntl = atomic_read(&counter->count32[0]);
} while (cnth != atomic_read(&counter->count32[1]));
local_irq_enable();
return cntl | ((u64) cnth) << 32;
}
void atomic64_counter_set(struct perf_counter *counter, u64 val64)
{
u32 *val32 = (void *)&val64;
atomic_set(counter->count32 + 0, *(val32 + 0));
atomic_set(counter->count32 + 1, *(val32 + 1));
}
u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic_read(counter->count32 + 0) |
(u64) atomic_read(counter->count32 + 1) << 32;
}
#endif
static void static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{ {
...@@ -280,11 +222,11 @@ static void __perf_install_in_context(void *info) ...@@ -280,11 +222,11 @@ static void __perf_install_in_context(void *info)
ctx->nr_counters++; ctx->nr_counters++;
if (cpuctx->active_oncpu < perf_max_counters) { if (cpuctx->active_oncpu < perf_max_counters) {
counter->hw_ops->hw_perf_counter_enable(counter);
counter->state = PERF_COUNTER_STATE_ACTIVE; counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; counter->oncpu = cpu;
ctx->nr_active++; ctx->nr_active++;
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
counter->hw_ops->hw_perf_counter_enable(counter);
} }
if (!ctx->task && cpuctx->max_pertask) if (!ctx->task && cpuctx->max_pertask)
...@@ -624,7 +566,7 @@ static u64 perf_counter_read(struct perf_counter *counter) ...@@ -624,7 +566,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
__hw_perf_counter_read, counter, 1); __hw_perf_counter_read, counter, 1);
} }
return perf_counter_read_safe(counter); return atomic64_read(&counter->count);
} }
/* /*
...@@ -921,7 +863,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter) ...@@ -921,7 +863,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
atomic64_counter_set(counter, cpu_clock(cpu)); atomic64_set(&counter->count, cpu_clock(cpu));
} }
static const struct hw_perf_counter_ops perf_ops_cpu_clock = { static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
...@@ -940,7 +882,7 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter) ...@@ -940,7 +882,7 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter)
static void task_clock_perf_counter_read(struct perf_counter *counter) static void task_clock_perf_counter_read(struct perf_counter *counter)
{ {
atomic64_counter_set(counter, current->se.sum_exec_runtime); atomic64_set(&counter->count, current->se.sum_exec_runtime);
} }
static const struct hw_perf_counter_ops perf_ops_task_clock = { static const struct hw_perf_counter_ops perf_ops_task_clock = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment