Commit 1c432d89 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Rename enums

Rename the perf enums to be in the 'perf_' namespace and strictly
enumerate the ABI bits.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 729ff5e2
......@@ -24,24 +24,21 @@
/*
* attr.type
*/
enum perf_event_types {
enum perf_type_id {
PERF_TYPE_HARDWARE = 0,
PERF_TYPE_SOFTWARE = 1,
PERF_TYPE_TRACEPOINT = 2,
PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4,
/*
* available TYPE space, raw is the max value.
*/
PERF_TYPE_RAW = 128,
PERF_TYPE_MAX, /* non ABI */
};
/*
* Generalized performance counter event types, used by the attr.event_id
* parameter of the sys_perf_counter_open() syscall:
*/
enum attr_ids {
enum perf_hw_id {
/*
* Common hardware events, generalized by the kernel:
*/
......@@ -53,7 +50,7 @@ enum attr_ids {
PERF_COUNT_BRANCH_MISSES = 5,
PERF_COUNT_BUS_CYCLES = 6,
PERF_HW_EVENTS_MAX = 7,
PERF_HW_EVENTS_MAX, /* non ABI */
};
/*
......@@ -63,30 +60,30 @@ enum attr_ids {
* { read, write, prefetch } x
* { accesses, misses }
*/
enum hw_cache_id {
PERF_COUNT_HW_CACHE_L1D,
PERF_COUNT_HW_CACHE_L1I,
PERF_COUNT_HW_CACHE_L2,
PERF_COUNT_HW_CACHE_DTLB,
PERF_COUNT_HW_CACHE_ITLB,
PERF_COUNT_HW_CACHE_BPU,
enum perf_hw_cache_id {
PERF_COUNT_HW_CACHE_L1D = 0,
PERF_COUNT_HW_CACHE_L1I = 1,
PERF_COUNT_HW_CACHE_L2 = 2,
PERF_COUNT_HW_CACHE_DTLB = 3,
PERF_COUNT_HW_CACHE_ITLB = 4,
PERF_COUNT_HW_CACHE_BPU = 5,
PERF_COUNT_HW_CACHE_MAX,
PERF_COUNT_HW_CACHE_MAX, /* non ABI */
};
enum hw_cache_op_id {
PERF_COUNT_HW_CACHE_OP_READ,
PERF_COUNT_HW_CACHE_OP_WRITE,
PERF_COUNT_HW_CACHE_OP_PREFETCH,
enum perf_hw_cache_op_id {
PERF_COUNT_HW_CACHE_OP_READ = 0,
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
PERF_COUNT_HW_CACHE_OP_MAX,
PERF_COUNT_HW_CACHE_OP_MAX, /* non ABI */
};
enum hw_cache_op_result_id {
PERF_COUNT_HW_CACHE_RESULT_ACCESS,
PERF_COUNT_HW_CACHE_RESULT_MISS,
enum perf_hw_cache_op_result_id {
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
PERF_COUNT_HW_CACHE_RESULT_MAX,
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non ABI */
};
/*
......@@ -95,7 +92,7 @@ enum hw_cache_op_result_id {
* physical and sw events of the kernel (and allow the profiling of them as
* well):
*/
enum sw_event_ids {
enum perf_sw_ids {
PERF_COUNT_CPU_CLOCK = 0,
PERF_COUNT_TASK_CLOCK = 1,
PERF_COUNT_PAGE_FAULTS = 2,
......@@ -104,7 +101,7 @@ enum sw_event_ids {
PERF_COUNT_PAGE_FAULTS_MIN = 5,
PERF_COUNT_PAGE_FAULTS_MAJ = 6,
PERF_SW_EVENTS_MAX = 7,
PERF_SW_EVENTS_MAX, /* non ABI */
};
/*
......
......@@ -3162,7 +3162,7 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
}
static int perf_swcounter_match(struct perf_counter *counter,
enum perf_event_types type,
enum perf_type_id type,
u32 event, struct pt_regs *regs)
{
if (!perf_swcounter_is_counting(counter))
......@@ -3194,7 +3194,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
}
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
enum perf_event_types type, u32 event,
enum perf_type_id type, u32 event,
u64 nr, int nmi, struct pt_regs *regs,
u64 addr)
{
......@@ -3225,7 +3225,7 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
return &cpuctx->recursion[0];
}
static void __perf_swcounter_event(enum perf_event_types type, u32 event,
static void __perf_swcounter_event(enum perf_type_id type, u32 event,
u64 nr, int nmi, struct pt_regs *regs,
u64 addr)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment