Commit 57c0c15b authored by Ingo Molnar's avatar Ingo Molnar

perf: Tidy up after the big rename

 - provide compatibility Kconfig entry for existing PERF_COUNTERS .config's

 - provide courtesy copy of old perf_counter.h, for user-space projects

 - small indentation fixups

 - fix up MAINTAINERS

 - fix small x86 printout fallout

 - fix up small PowerPC comment fallout (use 'counter' as in register)
Reviewed-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cdd6c482
...@@ -4000,7 +4000,7 @@ S: Maintained ...@@ -4000,7 +4000,7 @@ S: Maintained
F: include/linux/delayacct.h F: include/linux/delayacct.h
F: kernel/delayacct.c F: kernel/delayacct.c
PERFORMANCE COUNTER SUBSYSTEM PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl> M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org> M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu> M: Ingo Molnar <mingo@elte.hu>
......
...@@ -122,7 +122,7 @@ struct paca_struct { ...@@ -122,7 +122,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */ u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */ u8 io_sync; /* writel() needs spin_unlock sync */
u8 perf_event_pending; /* PM interrupt while soft-disabled */ u8 perf_event_pending; /* PM interrupt while soft-disabled */
/* Stuff for accurate time accounting */ /* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */ u64 user_time; /* accumulated usermode TB ticks */
......
...@@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); ...@@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu; struct power_pmu *ppmu;
/* /*
* Normally, to ignore kernel events we set the FCS (freeze events * Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the * in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor * hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors), * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
...@@ -159,7 +159,7 @@ void perf_event_print_debug(void) ...@@ -159,7 +159,7 @@ void perf_event_print_debug(void)
} }
/* /*
* Read one performance monitor event (PMC). * Read one performance monitor counter (PMC).
*/ */
static unsigned long read_pmc(int idx) static unsigned long read_pmc(int idx)
{ {
...@@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event) ...@@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event)
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The events are only 32 bits wide */ /* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful; delta = (val - prev) & 0xfffffffful;
atomic64_add(delta, &event->count); atomic64_add(delta, &event->count);
atomic64_sub(delta, &event->hw.period_left); atomic64_sub(delta, &event->hw.period_left);
...@@ -543,7 +543,7 @@ void hw_perf_disable(void) ...@@ -543,7 +543,7 @@ void hw_perf_disable(void)
} }
/* /*
* Set the 'freeze events' bit. * Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been * The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events * executed and the PMU has frozen the events
* before we return. * before we return.
...@@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
} }
/* /*
* A event has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted. * here so there is no possibility of being interrupted.
*/ */
...@@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs) ...@@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
/* /*
* Reset MMCR0 to its normal value. This will set PMXE and * Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze events) and PMAO (perf mon alert occurred) * clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again. * and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until * XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt. * we get back out of this interrupt.
......
...@@ -2081,13 +2081,13 @@ void __init init_hw_perf_events(void) ...@@ -2081,13 +2081,13 @@ void __init init_hw_perf_events(void)
perf_events_lapic_init(); perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
pr_info("... version: %d\n", x86_pmu.version); pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits); pr_info("... bit width: %d\n", x86_pmu.event_bits);
pr_info("... generic events: %d\n", x86_pmu.num_events); pr_info("... generic registers: %d\n", x86_pmu.num_events);
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
pr_info("... event mask: %016Lx\n", perf_event_mask); pr_info("... event mask: %016Lx\n", perf_event_mask);
} }
static inline void x86_pmu_read(struct perf_event *event) static inline void x86_pmu_read(struct perf_event *event)
......
This diff is collapsed.
/* /*
* Performance events: * Performance events:
* *
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
* *
* Data type definitions, declarations, prototypes. * Data type definitions, declarations, prototypes.
* *
* Started by: Thomas Gleixner and Ingo Molnar * Started by: Thomas Gleixner and Ingo Molnar
* *
* For licencing details see kernel-base/COPYING * For licencing details see kernel-base/COPYING
*/ */
#ifndef _LINUX_PERF_EVENT_H #ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H
...@@ -131,19 +131,19 @@ enum perf_event_sample_format { ...@@ -131,19 +131,19 @@ enum perf_event_sample_format {
* as specified by attr.read_format: * as specified by attr.read_format:
* *
* struct read_format { * struct read_format {
* { u64 value; * { u64 value;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED * { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING * { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 id; } && PERF_FORMAT_ID * { u64 id; } && PERF_FORMAT_ID
* } && !PERF_FORMAT_GROUP * } && !PERF_FORMAT_GROUP
* *
* { u64 nr; * { u64 nr;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED * { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING * { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 value; * { u64 value;
* { u64 id; } && PERF_FORMAT_ID * { u64 id; } && PERF_FORMAT_ID
* } cntr[nr]; * } cntr[nr];
* } && PERF_FORMAT_GROUP * } && PERF_FORMAT_GROUP
* }; * };
*/ */
enum perf_event_read_format { enum perf_event_read_format {
...@@ -152,7 +152,7 @@ enum perf_event_read_format { ...@@ -152,7 +152,7 @@ enum perf_event_read_format {
PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3, PERF_FORMAT_GROUP = 1U << 3,
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
}; };
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
...@@ -216,8 +216,8 @@ struct perf_event_attr { ...@@ -216,8 +216,8 @@ struct perf_event_attr {
* Ioctls that can be done on a perf event fd: * Ioctls that can be done on a perf event fd:
*/ */
#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
#define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_RESET _IO ('$', 3)
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
...@@ -314,9 +314,9 @@ enum perf_event_type { ...@@ -314,9 +314,9 @@ enum perf_event_type {
/* /*
* struct { * struct {
* struct perf_event_header header; * struct perf_event_header header;
* u64 id; * u64 id;
* u64 lost; * u64 lost;
* }; * };
*/ */
PERF_RECORD_LOST = 2, PERF_RECORD_LOST = 2,
...@@ -383,23 +383,23 @@ enum perf_event_type { ...@@ -383,23 +383,23 @@ enum perf_event_type {
* { u64 id; } && PERF_SAMPLE_ID * { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU * { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD * { u64 period; } && PERF_SAMPLE_PERIOD
* *
* { struct read_format values; } && PERF_SAMPLE_READ * { struct read_format values; } && PERF_SAMPLE_READ
* *
* { u64 nr, * { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* *
* # * #
* # The RAW record below is opaque data wrt the ABI * # The RAW record below is opaque data wrt the ABI
* # * #
* # That is, the ABI doesn't make any promises wrt to * # That is, the ABI doesn't make any promises wrt to
* # the stability of its content, it may vary depending * # the stability of its content, it may vary depending
* # on event_id, hardware, kernel version and phase of * # on event, hardware, kernel version and phase of
* # the moon. * # the moon.
* # * #
* # In other words, PERF_SAMPLE_RAW contents are not an ABI. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
* # * #
* *
* { u32 size; * { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW * char data[size];}&& PERF_SAMPLE_RAW
...@@ -503,10 +503,10 @@ struct pmu { ...@@ -503,10 +503,10 @@ struct pmu {
* enum perf_event_active_state - the states of a event * enum perf_event_active_state - the states of a event
*/ */
enum perf_event_active_state { enum perf_event_active_state {
PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1, PERF_EVENT_STATE_ACTIVE = 1,
}; };
struct file; struct file;
...@@ -529,7 +529,7 @@ struct perf_mmap_data { ...@@ -529,7 +529,7 @@ struct perf_mmap_data {
long watermark; /* wakeup watermark */ long watermark; /* wakeup watermark */
struct perf_event_mmap_page *user_page; struct perf_event_mmap_page *user_page;
void *data_pages[0]; void *data_pages[0];
}; };
...@@ -694,14 +694,14 @@ struct perf_cpu_context { ...@@ -694,14 +694,14 @@ struct perf_cpu_context {
}; };
struct perf_output_handle { struct perf_output_handle {
struct perf_event *event; struct perf_event *event;
struct perf_mmap_data *data; struct perf_mmap_data *data;
unsigned long head; unsigned long head;
unsigned long offset; unsigned long offset;
int nmi; int nmi;
int sample; int sample;
int locked; int locked;
unsigned long flags; unsigned long flags;
}; };
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
...@@ -829,22 +829,22 @@ static inline void ...@@ -829,22 +829,22 @@ static inline void
perf_event_task_sched_out(struct task_struct *task, perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu) { } struct task_struct *next, int cpu) { }
static inline void static inline void
perf_event_task_tick(struct task_struct *task, int cpu) { } perf_event_task_tick(struct task_struct *task, int cpu) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_do_pending(void) { } static inline void perf_event_do_pending(void) { }
static inline void perf_event_print_debug(void) { } static inline void perf_event_print_debug(void) { }
static inline void perf_disable(void) { } static inline void perf_disable(void) { }
static inline void perf_enable(void) { } static inline void perf_enable(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline void static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { } struct pt_regs *regs, u64 addr) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { } static inline void perf_event_init(void) { }
......
...@@ -920,26 +920,31 @@ config HAVE_PERF_EVENTS ...@@ -920,26 +920,31 @@ config HAVE_PERF_EVENTS
help help
See tools/perf/design.txt for details. See tools/perf/design.txt for details.
menu "Performance Counters" menu "Kernel Performance Events And Counters"
config PERF_EVENTS config PERF_EVENTS
bool "Kernel Performance Counters" bool "Kernel performance events and counters"
default y if PROFILING default y if (PROFILING || PERF_COUNTERS)
depends on HAVE_PERF_EVENTS depends on HAVE_PERF_EVENTS
select ANON_INODES select ANON_INODES
help help
Enable kernel support for performance counter hardware. Enable kernel support for various performance events provided
by software and hardware.
Performance counters are special hardware registers available Software events are supported either build-in or via the
on most modern CPUs. These registers count the number of certain use of generic tracepoints.
Most modern CPUs support performance events via performance
counter registers. These registers count the number of certain
types of hw events: such as instructions executed, cachemisses types of hw events: such as instructions executed, cachemisses
suffered, or branches mis-predicted - without slowing down the suffered, or branches mis-predicted - without slowing down the
kernel or applications. These registers can also trigger interrupts kernel or applications. These registers can also trigger interrupts
when a threshold number of events have passed - and can thus be when a threshold number of events have passed - and can thus be
used to profile the code that runs on that CPU. used to profile the code that runs on that CPU.
The Linux Performance Counter subsystem provides an abstraction of The Linux Performance Event subsystem provides an abstraction of
these hardware capabilities, available via a system call. It these software and hardware cevent apabilities, available via a
system call and used by the "perf" utility in tools/perf/. It
provides per task and per CPU counters, and it provides event provides per task and per CPU counters, and it provides event
capabilities on top of those. capabilities on top of those.
...@@ -950,14 +955,26 @@ config EVENT_PROFILE ...@@ -950,14 +955,26 @@ config EVENT_PROFILE
depends on PERF_EVENTS && EVENT_TRACING depends on PERF_EVENTS && EVENT_TRACING
default y default y
help help
Allow the use of tracepoints as software performance counters. Allow the use of tracepoints as software performance events.
When this is enabled, you can create perf counters based on When this is enabled, you can create perf events based on
tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
found in debugfs://tracing/events/*/*/id. (The -e/--events found in debugfs://tracing/events/*/*/id. (The -e/--events
option to the perf tool can parse and interpret symbolic option to the perf tool can parse and interpret symbolic
tracepoints, in the subsystem:tracepoint_name format.) tracepoints, in the subsystem:tracepoint_name format.)
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
help
This config has been obsoleted by the PERF_EVENTS
config option - please see that one for details.
It has no effect on the kernel whether you enable
it or not, it is a compatibility placeholder.
Say N if unsure.
endmenu endmenu
config VM_EVENT_COUNTERS config VM_EVENT_COUNTERS
......
/* /*
* Performance event core code * Performance events core code:
* *
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* *
* For licensing details see kernel-base/COPYING * For licensing details see kernel-base/COPYING
*/ */
#include <linux/fs.h> #include <linux/fs.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment