Commit 3d33345a authored by Hendrik Brueckner's avatar Hendrik Brueckner Committed by Martin Schwidefsky

s390/cpum_cf: prepare for in-kernel counter measurements

Prepare the counter facility support to be used by other in-kernel
users.  The first step introduces the __kernel_cpumcf_begin() and
__kernel_cpumcf_end() functions to reserve the counter facility
for doing measurements and to release after the measurements are
done.
Signed-off-by: default avatarHendrik Brueckner <brueckner@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 30e145f8
...@@ -49,4 +49,7 @@ static inline void ctr_set_stop(u64 *state, int ctr_set) ...@@ -49,4 +49,7 @@ static inline void ctr_set_stop(u64 *state, int ctr_set)
*state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
} }
int __kernel_cpumcf_begin(void);
void __kernel_cpumcf_end(void);
#endif /* _ASM_S390_CPU_MCF_H */ #endif /* _ASM_S390_CPU_MCF_H */
...@@ -236,25 +236,45 @@ static void setup_pmc_cpu(void *flags) ...@@ -236,25 +236,45 @@ static void setup_pmc_cpu(void *flags)
lcctl(0); lcctl(0);
} }
/* Initialize the CPU-measurement facility */ /* Reserve/release functions for sharing perf hardware */
static int reserve_pmc_hardware(void) static DEFINE_SPINLOCK(cpumcf_owner_lock);
static void *cpumcf_owner;
/* Initialize the CPU-measurement counter facility */
int __kernel_cpumcf_begin(void)
{ {
int flags = PMC_INIT; int flags = PMC_INIT;
int err = 0;
spin_lock(&cpumcf_owner_lock);
if (cpumcf_owner)
err = -EBUSY;
else
cpumcf_owner = __builtin_return_address(0);
spin_unlock(&cpumcf_owner_lock);
if (err)
return err;
on_each_cpu(setup_pmc_cpu, &flags, 1); on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0; return 0;
} }
EXPORT_SYMBOL(__kernel_cpumcf_begin);
/* Release the CPU-measurement facility */ /* Release the CPU-measurement counter facility */
static void release_pmc_hardware(void) void __kernel_cpumcf_end(void)
{ {
int flags = PMC_RELEASE; int flags = PMC_RELEASE;
on_each_cpu(setup_pmc_cpu, &flags, 1); on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
spin_lock(&cpumcf_owner_lock);
cpumcf_owner = NULL;
spin_unlock(&cpumcf_owner_lock);
} }
EXPORT_SYMBOL(__kernel_cpumcf_end);
/* Release the PMU if event is the last perf event */ /* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event) static void hw_perf_event_destroy(struct perf_event *event)
...@@ -262,7 +282,7 @@ static void hw_perf_event_destroy(struct perf_event *event) ...@@ -262,7 +282,7 @@ static void hw_perf_event_destroy(struct perf_event *event)
if (!atomic_add_unless(&num_events, -1, 1)) { if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex); mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0) if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware(); __kernel_cpumcf_end();
mutex_unlock(&pmc_reserve_mutex); mutex_unlock(&pmc_reserve_mutex);
} }
} }
...@@ -363,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -363,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Initialize for using the CPU-measurement counter facility */ /* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) { if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex); mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
err = -EBUSY; err = -EBUSY;
else else
atomic_inc(&num_events); atomic_inc(&num_events);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment