Commit 184f412c authored by Ingo Molnar's avatar Ingo Molnar

perf, x86: Clean up event constraints code a bit

- Remove stray debug code
 - Improve ugly macros a bit
 - Remove some whitespace damage
 - (Also fix up some accumulated damage in perf_event.h)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Stephane Eranian <eranian@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
parent 6c9687ab
...@@ -93,24 +93,19 @@ struct cpu_hw_events { ...@@ -93,24 +93,19 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
}; };
#define EVENT_CONSTRAINT(c, n, m) { \ #define EVENT_CONSTRAINT(c, n, m) { \
{ .idxmsk64[0] = (n) }, \ { .idxmsk64[0] = (n) }, \
.code = (c), \ .code = (c), \
.cmask = (m), \ .cmask = (m), \
.weight = HWEIGHT64((u64)(n)), \ .weight = HWEIGHT64((u64)(n)), \
} }
#define INTEL_EVENT_CONSTRAINT(c, n) \ #define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) #define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
#define FIXED_EVENT_CONSTRAINT(c, n) \ #define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
#define EVENT_CONSTRAINT_END \ #define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->cmask; (e)++)
/* /*
* struct x86_pmu - generic x86 pmu * struct x86_pmu - generic x86 pmu
...@@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (test_bit(hwc->idx, used_mask)) if (test_bit(hwc->idx, used_mask))
break; break;
#if 0
pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
smp_processor_id(),
hwc->config,
hwc->idx,
assign ? 'y' : 'n');
#endif
set_bit(hwc->idx, used_mask); set_bit(hwc->idx, used_mask);
if (assign) if (assign)
assign[i] = hwc->idx; assign[i] = hwc->idx;
...@@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (j == X86_PMC_IDX_MAX) if (j == X86_PMC_IDX_MAX)
break; break;
#if 0
pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
smp_processor_id(),
hwc->config,
j,
assign ? 'y' : 'n');
#endif
set_bit(j, used_mask); set_bit(j, used_mask);
if (assign) if (assign)
...@@ -2596,9 +2575,9 @@ static const struct pmu pmu = { ...@@ -2596,9 +2575,9 @@ static const struct pmu pmu = {
* validate a single event group * validate a single event group
* *
* validation include: * validation include:
* - check events are compatible which each other * - check events are compatible which each other
* - events do not compete for the same counter * - events do not compete for the same counter
* - number of events <= number of counters * - number of events <= number of counters
* *
* validation ensures the group can be loaded onto the * validation ensures the group can be loaded onto the
* PMU if it was the only group available. * PMU if it was the only group available.
......
...@@ -290,7 +290,7 @@ struct perf_event_mmap_page { ...@@ -290,7 +290,7 @@ struct perf_event_mmap_page {
}; };
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
...@@ -356,8 +356,8 @@ enum perf_event_type { ...@@ -356,8 +356,8 @@ enum perf_event_type {
* u64 stream_id; * u64 stream_id;
* }; * };
*/ */
PERF_RECORD_THROTTLE = 5, PERF_RECORD_THROTTLE = 5,
PERF_RECORD_UNTHROTTLE = 6, PERF_RECORD_UNTHROTTLE = 6,
/* /*
* struct { * struct {
...@@ -371,10 +371,10 @@ enum perf_event_type { ...@@ -371,10 +371,10 @@ enum perf_event_type {
/* /*
* struct { * struct {
* struct perf_event_header header; * struct perf_event_header header;
* u32 pid, tid; * u32 pid, tid;
* *
* struct read_format values; * struct read_format values;
* }; * };
*/ */
PERF_RECORD_READ = 8, PERF_RECORD_READ = 8,
...@@ -412,7 +412,7 @@ enum perf_event_type { ...@@ -412,7 +412,7 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW * char data[size];}&& PERF_SAMPLE_RAW
* }; * };
*/ */
PERF_RECORD_SAMPLE = 9, PERF_RECORD_SAMPLE = 9,
PERF_RECORD_MAX, /* non-ABI */ PERF_RECORD_MAX, /* non-ABI */
}; };
...@@ -752,8 +752,7 @@ extern int perf_max_events; ...@@ -752,8 +752,7 @@ extern int perf_max_events;
extern const struct pmu *hw_perf_event_init(struct perf_event *event); extern const struct pmu *hw_perf_event_init(struct perf_event *event);
extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task, extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
struct task_struct *next);
extern void perf_event_task_tick(struct task_struct *task); extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child); extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child);
...@@ -853,8 +852,7 @@ extern int sysctl_perf_event_mlock; ...@@ -853,8 +852,7 @@ extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate; extern int sysctl_perf_event_sample_rate;
extern void perf_event_init(void); extern void perf_event_init(void);
extern void perf_tp_event(int event_id, u64 addr, u64 count, extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
void *record, int entry_size);
extern void perf_bp_event(struct perf_event *event, void *data); extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags #ifndef perf_misc_flags
...@@ -895,13 +893,13 @@ static inline void ...@@ -895,13 +893,13 @@ static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { } struct pt_regs *regs, u64 addr) { }
static inline void static inline void
perf_bp_event(struct perf_event *event, void *data) { } perf_bp_event(struct perf_event *event, void *data) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { } static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { } static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment