Commit f0d1bc47 authored by Will Deacon's avatar Will Deacon

ARM: pmu: remove unused reservation mechanism

The PMU reservation mechanism was originally intended to allow OProfile
and perf-events to co-ordinate over access to the CPU PMU. Since then,
OProfile for ARM has moved to using perf as its backend, so the
reservation code is no longer used.

This patch removes the reservation code for the CPU PMU on ARM.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 50243efd
...@@ -595,7 +595,6 @@ M: Will Deacon <will.deacon@arm.com> ...@@ -595,7 +595,6 @@ M: Will Deacon <will.deacon@arm.com>
S: Maintained S: Maintained
F: arch/arm/kernel/perf_event* F: arch/arm/kernel/perf_event*
F: arch/arm/oprofile/common.c F: arch/arm/oprofile/common.c
F: arch/arm/kernel/pmu.c
F: arch/arm/include/asm/pmu.h F: arch/arm/include/asm/pmu.h
F: arch/arm/kernel/hw_breakpoint.c F: arch/arm/kernel/hw_breakpoint.c
F: arch/arm/include/asm/hw_breakpoint.h F: arch/arm/include/asm/hw_breakpoint.h
......
...@@ -1179,12 +1179,6 @@ config XSCALE_PMU ...@@ -1179,12 +1179,6 @@ config XSCALE_PMU
depends on CPU_XSCALE depends on CPU_XSCALE
default y default y
config CPU_HAS_PMU
depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU)
default y
bool
config MULTI_IRQ_HANDLER config MULTI_IRQ_HANDLER
bool bool
help help
...@@ -1757,7 +1751,7 @@ config HIGHPTE ...@@ -1757,7 +1751,7 @@ config HIGHPTE
config HW_PERF_EVENTS config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events" bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && CPU_HAS_PMU depends on PERF_EVENTS
default y default y
help help
Enable hardware performance counter support for perf events. If Enable hardware performance counter support for perf events. If
......
...@@ -49,40 +49,6 @@ struct arm_pmu_platdata { ...@@ -49,40 +49,6 @@ struct arm_pmu_platdata {
int (*runtime_suspend)(struct device *dev); int (*runtime_suspend)(struct device *dev);
}; };
#ifdef CONFIG_CPU_HAS_PMU
/**
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
* Returns 0 on success or -EBUSY if the lock is already held.
*/
extern int
reserve_pmu(enum arm_pmu_type type);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
*/
extern void
release_pmu(enum arm_pmu_type type);
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
static inline int
reserve_pmu(enum arm_pmu_type type)
{
return -ENODEV;
}
static inline void
release_pmu(enum arm_pmu_type type) { }
#endif /* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS #ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */ /* The events for a given PMU register set. */
......
...@@ -69,7 +69,6 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o ...@@ -69,7 +69,6 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
......
...@@ -377,7 +377,6 @@ armpmu_release_hardware(struct arm_pmu *armpmu) ...@@ -377,7 +377,6 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
} }
pm_runtime_put_sync(&pmu_device->dev); pm_runtime_put_sync(&pmu_device->dev);
release_pmu(armpmu->type);
} }
static int static int
...@@ -391,12 +390,6 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -391,12 +390,6 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
if (!pmu_device) if (!pmu_device)
return -ENODEV; return -ENODEV;
err = reserve_pmu(armpmu->type);
if (err) {
pr_warning("unable to reserve pmu\n");
return err;
}
plat = dev_get_platdata(&pmu_device->dev); plat = dev_get_platdata(&pmu_device->dev);
if (plat && plat->handle_irq) if (plat && plat->handle_irq)
handle_irq = armpmu_platform_irq; handle_irq = armpmu_platform_irq;
...@@ -706,7 +699,6 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) ...@@ -706,7 +699,6 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
raw_spin_lock_init(&events->pmu_lock); raw_spin_lock_init(&events->pmu_lock);
} }
armpmu->get_hw_events = armpmu_get_cpu_events; armpmu->get_hw_events = armpmu_get_cpu_events;
armpmu->type = ARM_PMU_DEVICE_CPU;
} }
/* /*
......
/*
* linux/arch/arm/kernel/pmu.c
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
* Copyright (C) 2010 ARM Ltd, Will Deacon
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/pmu.h>
/*
* PMU locking to ensure mutual exclusion between different subsystems.
*/
static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
int
reserve_pmu(enum arm_pmu_type type)
{
return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
}
EXPORT_SYMBOL_GPL(reserve_pmu);
void
release_pmu(enum arm_pmu_type type)
{
clear_bit_unlock(type, pmu_lock);
}
EXPORT_SYMBOL_GPL(release_pmu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment