Commit 95ca792c authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86: Convert the core to the hotplug state machine

Replace the perf_notifier() install mechanism, which invokes magically
the callback on the current CPU. Convert the hardware specific
callbacks which are invoked from the x86 perf core to return proper
error codes instead of totally pointless NOTIFY_BAD return values.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAnna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153333.670720553@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 00e16c3d
......@@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu)
WARN_ON_ONCE(cpuc->amd_nb);
if (!x86_pmu.amd_nb_constraints)
return NOTIFY_OK;
return 0;
cpuc->amd_nb = amd_alloc_nb(cpu);
if (!cpuc->amd_nb)
return NOTIFY_BAD;
return -ENOMEM;
return NOTIFY_OK;
return 0;
}
static void amd_pmu_cpu_starting(int cpu)
......
......@@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler);
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
static int
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
static int x86_pmu_prepare_cpu(unsigned int cpu)
{
unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int i, ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
cpuc->kfree_on_online[i] = NULL;
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
break;
int i;
case CPU_ONLINE:
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
kfree(cpuc->kfree_on_online[i]);
cpuc->kfree_on_online[i] = NULL;
}
break;
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
cpuc->kfree_on_online[i] = NULL;
if (x86_pmu.cpu_prepare)
return x86_pmu.cpu_prepare(cpu);
return 0;
}
case CPU_DYING:
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
break;
static int x86_pmu_dead_cpu(unsigned int cpu)
{
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
return 0;
}
case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
break;
static int x86_pmu_online_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int i;
default:
break;
for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
kfree(cpuc->kfree_on_online[i]);
cpuc->kfree_on_online[i] = NULL;
}
return 0;
}
return ret;
static int x86_pmu_starting_cpu(unsigned int cpu)
{
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
return 0;
}
static int x86_pmu_dying_cpu(unsigned int cpu)
{
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
return 0;
}
static void __init pmu_check_apic(void)
......@@ -1764,10 +1764,39 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(x86_pmu_notifier);
/*
* Install callbacks. Core will call them for each online
* cpu.
*/
err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
if (err)
return err;
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
"AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
x86_pmu_dying_cpu);
if (err)
goto out;
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
x86_pmu_online_cpu, NULL);
if (err)
goto out1;
err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (err)
goto out2;
return 0;
out2:
cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
out1:
cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
out:
cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
return err;
}
early_initcall(init_hw_perf_events);
......
......@@ -3050,7 +3050,7 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
return 0;
err_constraint_list:
kfree(cpuc->constraint_list);
......@@ -3061,7 +3061,7 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->shared_regs = NULL;
err:
return NOTIFY_BAD;
return -ENOMEM;
}
static void intel_pmu_cpu_starting(int cpu)
......
......@@ -5,6 +5,7 @@ enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
......@@ -17,6 +18,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
......@@ -24,6 +26,7 @@ enum cpuhp_state {
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment