Commit 667832da authored by Russell King's avatar Russell King

Merge branch 'perf/updates' of...

Merge branch 'perf/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
parents 3587b1b0 2ac29a14
......@@ -21,4 +21,9 @@
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif /* __ARM_PERF_EVENT_H__ */
......@@ -67,19 +67,19 @@ struct arm_pmu {
cpumask_t active_irqs;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
u32 (*read_counter)(struct perf_event *event);
void (*write_counter)(struct perf_event *event, u32 val);
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*request_irq)(irq_handler_t handler);
void (*free_irq)(void);
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
......@@ -93,15 +93,11 @@ struct arm_pmu {
extern const struct dev_pm_ops armpmu_dev_pm_ops;
int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
int armpmu_register(struct arm_pmu *armpmu, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
u64 armpmu_event_update(struct perf_event *event);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
int armpmu_event_set_period(struct perf_event *event);
int armpmu_map_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
......
......@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
return -ENOENT;
}
int
armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
int armpmu_event_set_period(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
......@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
local64_set(&hwc->prev_count, (u64)-left);
armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
u64 armpmu_event_update(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = armpmu->read_counter(idx);
new_raw_count = armpmu->read_counter(event);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
......@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
armpmu_event_update(event, hwc, hwc->idx);
armpmu_event_update(event);
}
static void
......@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
armpmu_event_update(event, hwc, hwc->idx);
armpmu->disable(event);
armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static void
armpmu_start(struct perf_event *event, int flags)
static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
......@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
armpmu_event_set_period(event, hwc, hwc->idx);
armpmu->enable(hwc, hwc->idx);
armpmu_event_set_period(event);
armpmu->enable(event);
}
static void
......@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, hwc);
idx = armpmu->get_event_idx(hw_events, event);
if (idx < 0) {
err = idx;
goto out;
......@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
* sure it is disabled.
*/
event->hw.idx = idx;
armpmu->disable(hwc, idx);
armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
......@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
return armpmu->get_event_idx(hw_events, event) >= 0;
}
static int
......@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
armpmu->free_irq();
armpmu->free_irq(armpmu);
pm_runtime_put_sync(&armpmu->plat_device->dev);
}
......@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return -ENODEV;
pm_runtime_get_sync(&pmu_device->dev);
err = armpmu->request_irq(armpmu_dispatch_irq);
err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
armpmu_release_hardware(armpmu);
return err;
......@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start();
armpmu->start(armpmu);
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
armpmu->stop();
armpmu->stop(armpmu);
}
#ifdef CONFIG_PM_RUNTIME
......@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu)
};
}
int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
int armpmu_register(struct arm_pmu *armpmu, int type)
{
armpmu_init(armpmu);
pm_runtime_enable(&armpmu->plat_device->dev);
pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events);
return perf_pmu_register(&armpmu->pmu, name, type);
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
/*
......@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
......@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stackframe fr;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr;
fr.pc = regs->ARM_pc;
walk_stackframe(&fr, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}
......@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/cputype.h>
......@@ -45,7 +46,7 @@ const char *perf_pmu_name(void)
if (!cpu_pmu)
return NULL;
return cpu_pmu->pmu.name;
return cpu_pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
......@@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
return &__get_cpu_var(cpu_hw_events);
}
static void cpu_pmu_free_irq(void)
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
......@@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void)
}
}
static int cpu_pmu_request_irq(irq_handler_t handler)
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
......@@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu && cpu_pmu->reset)
on_each_cpu(cpu_pmu->reset, NULL, 1);
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
/*
......@@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
return NOTIFY_DONE;
if (cpu_pmu && cpu_pmu->reset)
cpu_pmu->reset(NULL);
cpu_pmu->reset(cpu_pmu);
else
return NOTIFY_DONE;
return NOTIFY_OK;
}
......@@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
/*
* CPU PMU identification and probing.
*/
static struct arm_pmu *__devinit probe_current_pmu(void)
static int __devinit probe_current_pmu(struct arm_pmu *pmu)
{
struct arm_pmu *pmu = NULL;
int cpu = get_cpu();
unsigned long cpuid = read_cpuid_id();
unsigned long implementor = (cpuid & 0xFF000000) >> 24;
unsigned long part_number = (cpuid & 0xFFF0);
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
......@@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */
pmu = armv6pmu_init();
ret = armv6pmu_init(pmu);
break;
case 0xB020: /* ARM11mpcore */
pmu = armv6mpcore_pmu_init();
ret = armv6mpcore_pmu_init(pmu);
break;
case 0xC080: /* Cortex-A8 */
pmu = armv7_a8_pmu_init();
ret = armv7_a8_pmu_init(pmu);
break;
case 0xC090: /* Cortex-A9 */
pmu = armv7_a9_pmu_init();
ret = armv7_a9_pmu_init(pmu);
break;
case 0xC050: /* Cortex-A5 */
pmu = armv7_a5_pmu_init();
ret = armv7_a5_pmu_init(pmu);
break;
case 0xC0F0: /* Cortex-A15 */
pmu = armv7_a15_pmu_init();
ret = armv7_a15_pmu_init(pmu);
break;
case 0xC070: /* Cortex-A7 */
pmu = armv7_a7_pmu_init();
ret = armv7_a7_pmu_init(pmu);
break;
}
/* Intel CPUs [xscale]. */
......@@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
part_number = (cpuid >> 13) & 0x7;
switch (part_number) {
case 1:
pmu = xscale1pmu_init();
ret = xscale1pmu_init(pmu);
break;
case 2:
pmu = xscale2pmu_init();
ret = xscale2pmu_init(pmu);
break;
}
}
put_cpu();
return pmu;
return ret;
}
static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct arm_pmu *(*init_fn)(void);
int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;
if (cpu_pmu) {
pr_info("attempt to register multiple PMU devices!");
return -ENOSPC;
}
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
pr_info("failed to allocate PMU device!");
return -ENOMEM;
}
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
init_fn = of_id->data;
cpu_pmu = init_fn();
ret = init_fn(pmu);
} else {
cpu_pmu = probe_current_pmu();
ret = probe_current_pmu(pmu);
}
if (!cpu_pmu)
return -ENODEV;
if (ret) {
pr_info("failed to register PMU devices!");
kfree(pmu);
return ret;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&cpu_pmu_hotplug_notifier);
armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
armpmu_register(cpu_pmu, PERF_TYPE_RAW);
return 0;
}
......@@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = {
static int __init register_pmu_driver(void)
{
return platform_driver_register(&cpu_pmu_driver);
int err;
err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
if (err)
return err;
err = platform_driver_register(&cpu_pmu_driver);
if (err)
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
return err;
}
device_initcall(register_pmu_driver);
......@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}
static inline u32
armv6pmu_read_counter(int counter)
static inline u32 armv6pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
unsigned long value = 0;
if (ARMV6_CYCLE_COUNTER == counter)
......@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
return value;
}
static inline void
armv6pmu_write_counter(int counter,
u32 value)
static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
else if (ARMV6_COUNTER0 == counter)
......@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
static void armv6pmu_enable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
......@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct pmu_hw_events *cpuc;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
struct pt_regs *regs;
int idx;
......@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
*/
armv6_pmcr_write(pmcr);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
......@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx))
if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(hwc, idx);
cpu_pmu->disable(event);
}
/*
......@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
return IRQ_HANDLED;
}
static void
armv6pmu_start(void)
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
......@@ -540,8 +542,7 @@ armv6pmu_start(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
armv6pmu_stop(void)
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
......@@ -555,10 +556,11 @@ armv6pmu_stop(void)
static int
armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Always place a cycle counter into the cycle counter. */
if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
......@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
static void
armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
......@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
static void armv6mpcore_pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, flags, evt = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
......@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event)
&armv6_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6pmu = {
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.map_event = armv6_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit armv6pmu_init(void)
static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
{
return &armv6pmu;
cpu_pmu->name = "v6";
cpu_pmu->handle_irq = armv6pmu_handle_irq;
cpu_pmu->enable = armv6pmu_enable_event;
cpu_pmu->disable = armv6pmu_disable_event;
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
/*
......@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
&armv6mpcore_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6mpcore_pmu = {
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.map_event = armv6mpcore_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{
return &armv6mpcore_pmu;
cpu_pmu->name = "v6mpcore";
cpu_pmu->handle_irq = armv6pmu_handle_irq;
cpu_pmu->enable = armv6pmu_enable_event;
cpu_pmu->disable = armv6mpcore_pmu_disable_event;
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
#else
static struct arm_pmu *__devinit armv6pmu_init(void)
static int armv6pmu_init(struct arm_pmu *cpu_pmu)
{
return NULL;
return -ENODEV;
}
static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{
return NULL;
return -ENODEV;
}
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
This diff is collapsed.
This diff is collapsed.
......@@ -57,8 +57,6 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])
if (IS_ERR(omap_pmu_dev))
return PTR_ERR(omap_pmu_dev);
pm_runtime_enable(&omap_pmu_dev->dev);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment