Commit 7f5ac1a0 authored by Hendrik Brueckner's avatar Hendrik Brueckner Committed by Martin Schwidefsky

s390/cpum_cf: move common functions into a separate file

Move common functions of the couter facility support into a separate
file.
Signed-off-by: default avatarHendrik Brueckner <brueckner@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 869f4f98
......@@ -77,7 +77,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
......
......@@ -14,28 +14,8 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
#include <asm/ctl_reg.h>
#include <asm/irq.h>
#include <asm/cpu_mcf.h>
/* Per-CPU event structure for the counter facility */
DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
.ctr_set = {
[CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
},
.alert = ATOMIC64_INIT(0),
.state = 0,
.flags = 0,
.txn_flags = 0,
};
/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
static bool cpum_cf_initalized;
static enum cpumf_ctr_set get_counter_set(u64 event)
{
int set = CPUMF_CTR_SET_MAX;
......@@ -180,122 +160,6 @@ static atomic_t num_events = ATOMIC_INIT(0);
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/* CPU-measurement alerts for the counter facility */
static void cpumf_measurement_alert(struct ext_code ext_code,
unsigned int alert, unsigned long unused)
{
struct cpu_cf_events *cpuhw;
if (!(alert & CPU_MF_INT_CF_MASK))
return;
inc_irq_stat(IRQEXT_CMC);
cpuhw = this_cpu_ptr(&cpu_cf_events);
/* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. */
if (!(cpuhw->flags & PMU_F_RESERVED))
return;
/* counter authorization change alert */
if (alert & CPU_MF_INT_CF_CACA)
qctri(&cpuhw->info);
/* loss of counter data alert */
if (alert & CPU_MF_INT_CF_LCDA)
pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
/* loss of MT counter data alert */
if (alert & CPU_MF_INT_CF_MTDA)
pr_warn("CPU[%i] MT counter data was lost\n",
smp_processor_id());
/* store alert for special handling by in-kernel users */
atomic64_or(alert, &cpuhw->alert);
}
#define PMC_INIT 0
#define PMC_RELEASE 1
static void setup_pmc_cpu(void *flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
switch (*((int *) flags)) {
case PMC_INIT:
memset(&cpuhw->info, 0, sizeof(cpuhw->info));
qctri(&cpuhw->info);
cpuhw->flags |= PMU_F_RESERVED;
break;
case PMC_RELEASE:
cpuhw->flags &= ~PMU_F_RESERVED;
break;
}
/* Disable CPU counter sets */
lcctl(0);
}
bool kernel_cpumcf_avail(void)
{
return cpum_cf_initalized;
}
EXPORT_SYMBOL(kernel_cpumcf_avail);
/* Reserve/release functions for sharing perf hardware */
static DEFINE_SPINLOCK(cpumcf_owner_lock);
static void *cpumcf_owner;
/* Initialize the CPU-measurement counter facility */
int __kernel_cpumcf_begin(void)
{
int flags = PMC_INIT;
int err = 0;
spin_lock(&cpumcf_owner_lock);
if (cpumcf_owner)
err = -EBUSY;
else
cpumcf_owner = __builtin_return_address(0);
spin_unlock(&cpumcf_owner_lock);
if (err)
return err;
on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
EXPORT_SYMBOL(__kernel_cpumcf_begin);
/* Obtain the CPU-measurement alerts for the counter facility */
unsigned long kernel_cpumcf_alert(int clear)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
unsigned long alert;
alert = atomic64_read(&cpuhw->alert);
if (clear)
atomic64_set(&cpuhw->alert, 0);
return alert;
}
EXPORT_SYMBOL(kernel_cpumcf_alert);
/* Release the CPU-measurement counter facility */
void __kernel_cpumcf_end(void)
{
int flags = PMC_RELEASE;
on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
spin_lock(&cpumcf_owner_lock);
cpumcf_owner = NULL;
spin_unlock(&cpumcf_owner_lock);
}
EXPORT_SYMBOL(__kernel_cpumcf_end);
/* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event)
{
......@@ -672,59 +536,17 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
static int cpumf_pmf_setup(unsigned int cpu, int flags)
{
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
return 0;
}
static int s390_pmu_online_cpu(unsigned int cpu)
{
return cpumf_pmf_setup(cpu, PMC_INIT);
}
static int s390_pmu_offline_cpu(unsigned int cpu)
{
return cpumf_pmf_setup(cpu, PMC_RELEASE);
}
static int __init cpumf_pmu_init(void)
{
int rc;
if (!cpum_cf_avail())
if (!kernel_cpumcf_avail())
return -ENODEV;
/* clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters */
ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (rc) {
pr_err("Registering for CPU-measurement alerts "
"failed with rc=%i\n", rc);
return rc;
}
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
return rc;
}
rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
"perf/s390/cf:online",
s390_pmu_online_cpu, s390_pmu_offline_cpu);
if (!rc)
cpum_cf_initalized = true;
return rc;
}
early_initcall(cpumf_pmu_init);
subsys_initcall(cpumf_pmu_init);
// SPDX-License-Identifier: GPL-2.0
/*
* CPU-Measurement Counter Facility Support - Common Layer
*
* Copyright IBM Corp. 2019
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
*/
#define KMSG_COMPONENT "cpum_cf_common"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
#include <asm/ctl_reg.h>
#include <asm/irq.h>
#include <asm/cpu_mcf.h>
/* Per-CPU event structure for the counter facility */
DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
.ctr_set = {
[CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
[CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
},
.alert = ATOMIC64_INIT(0),
.state = 0,
.flags = 0,
.txn_flags = 0,
};
/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
static bool cpum_cf_initalized;
/* CPU-measurement alerts for the counter facility */
static void cpumf_measurement_alert(struct ext_code ext_code,
unsigned int alert, unsigned long unused)
{
struct cpu_cf_events *cpuhw;
if (!(alert & CPU_MF_INT_CF_MASK))
return;
inc_irq_stat(IRQEXT_CMC);
cpuhw = this_cpu_ptr(&cpu_cf_events);
/* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. */
if (!(cpuhw->flags & PMU_F_RESERVED))
return;
/* counter authorization change alert */
if (alert & CPU_MF_INT_CF_CACA)
qctri(&cpuhw->info);
/* loss of counter data alert */
if (alert & CPU_MF_INT_CF_LCDA)
pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
/* loss of MT counter data alert */
if (alert & CPU_MF_INT_CF_MTDA)
pr_warn("CPU[%i] MT counter data was lost\n",
smp_processor_id());
/* store alert for special handling by in-kernel users */
atomic64_or(alert, &cpuhw->alert);
}
#define PMC_INIT 0
#define PMC_RELEASE 1
static void cpum_cf_setup_cpu(void *flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
switch (*((int *) flags)) {
case PMC_INIT:
memset(&cpuhw->info, 0, sizeof(cpuhw->info));
qctri(&cpuhw->info);
cpuhw->flags |= PMU_F_RESERVED;
break;
case PMC_RELEASE:
cpuhw->flags &= ~PMU_F_RESERVED;
break;
}
/* Disable CPU counter sets */
lcctl(0);
}
bool kernel_cpumcf_avail(void)
{
return cpum_cf_initalized;
}
EXPORT_SYMBOL(kernel_cpumcf_avail);
/* Reserve/release functions for sharing perf hardware */
static DEFINE_SPINLOCK(cpumcf_owner_lock);
static void *cpumcf_owner;
/* Initialize the CPU-measurement counter facility */
int __kernel_cpumcf_begin(void)
{
int flags = PMC_INIT;
int err = 0;
spin_lock(&cpumcf_owner_lock);
if (cpumcf_owner)
err = -EBUSY;
else
cpumcf_owner = __builtin_return_address(0);
spin_unlock(&cpumcf_owner_lock);
if (err)
return err;
on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
EXPORT_SYMBOL(__kernel_cpumcf_begin);
/* Obtain the CPU-measurement alerts for the counter facility */
unsigned long kernel_cpumcf_alert(int clear)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
unsigned long alert;
alert = atomic64_read(&cpuhw->alert);
if (clear)
atomic64_set(&cpuhw->alert, 0);
return alert;
}
EXPORT_SYMBOL(kernel_cpumcf_alert);
/* Release the CPU-measurement counter facility */
void __kernel_cpumcf_end(void)
{
int flags = PMC_RELEASE;
on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
spin_lock(&cpumcf_owner_lock);
cpumcf_owner = NULL;
spin_unlock(&cpumcf_owner_lock);
}
EXPORT_SYMBOL(__kernel_cpumcf_end);
static int cpum_cf_setup(unsigned int cpu, int flags)
{
local_irq_disable();
cpum_cf_setup_cpu(&flags);
local_irq_enable();
return 0;
}
static int cpum_cf_online_cpu(unsigned int cpu)
{
return cpum_cf_setup(cpu, PMC_INIT);
}
static int cpum_cf_offline_cpu(unsigned int cpu)
{
return cpum_cf_setup(cpu, PMC_RELEASE);
}
static int __init cpum_cf_init(void)
{
int rc;
if (!cpum_cf_avail())
return -ENODEV;
/* clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters */
ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (rc) {
pr_err("Registering for CPU-measurement alerts "
"failed with rc=%i\n", rc);
return rc;
}
rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
"perf/s390/cf:online",
cpum_cf_online_cpu, cpum_cf_offline_cpu);
if (!rc)
cpum_cf_initalized = true;
return rc;
}
early_initcall(cpum_cf_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment