Commit 503d3291 authored by Zhouyi Zhou's avatar Zhouyi Zhou Committed by Ingo Molnar

perf/x86/amd: Try to fix some mem allocation failure handling

According to Peter's advice, put the failure handling to a goto chain.
Compiled in x86_64, could you check if there is anything that I missed.
Signed-off-by: default avatarZhouyi Zhou <yizhouzhou@ict.ac.cn>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1402459743-20513-1-git-send-email-zhouzhouyi@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8b5b584d
...@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) ...@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
cpu_to_node(cpu)); cpu_to_node(cpu));
} }
static void amd_uncore_cpu_up_prepare(unsigned int cpu) static int amd_uncore_cpu_up_prepare(unsigned int cpu)
{ {
struct amd_uncore *uncore; struct amd_uncore *uncore_nb = NULL, *uncore_l2;
if (amd_uncore_nb) { if (amd_uncore_nb) {
uncore = amd_uncore_alloc(cpu); uncore_nb = amd_uncore_alloc(cpu);
uncore->cpu = cpu; if (!uncore_nb)
uncore->num_counters = NUM_COUNTERS_NB; goto fail;
uncore->rdpmc_base = RDPMC_BASE_NB; uncore_nb->cpu = cpu;
uncore->msr_base = MSR_F15H_NB_PERF_CTL; uncore_nb->num_counters = NUM_COUNTERS_NB;
uncore->active_mask = &amd_nb_active_mask; uncore_nb->rdpmc_base = RDPMC_BASE_NB;
uncore->pmu = &amd_nb_pmu; uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore; uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
} }
if (amd_uncore_l2) { if (amd_uncore_l2) {
uncore = amd_uncore_alloc(cpu); uncore_l2 = amd_uncore_alloc(cpu);
uncore->cpu = cpu; if (!uncore_l2)
uncore->num_counters = NUM_COUNTERS_L2; goto fail;
uncore->rdpmc_base = RDPMC_BASE_L2; uncore_l2->cpu = cpu;
uncore->msr_base = MSR_F16H_L2I_PERF_CTL; uncore_l2->num_counters = NUM_COUNTERS_L2;
uncore->active_mask = &amd_l2_active_mask; uncore_l2->rdpmc_base = RDPMC_BASE_L2;
uncore->pmu = &amd_l2_pmu; uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore; uncore_l2->active_mask = &amd_l2_active_mask;
uncore_l2->pmu = &amd_l2_pmu;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
} }
return 0;
fail:
kfree(uncore_nb);
return -ENOMEM;
} }
static struct amd_uncore * static struct amd_uncore *
...@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) ...@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
if (!--uncore->refcnt) if (!--uncore->refcnt)
kfree(uncore); kfree(uncore);
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL; *per_cpu_ptr(uncores, cpu) = NULL;
} }
static void amd_uncore_cpu_dead(unsigned int cpu) static void amd_uncore_cpu_dead(unsigned int cpu)
...@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, ...@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
amd_uncore_cpu_up_prepare(cpu); if (amd_uncore_cpu_up_prepare(cpu))
return notifier_from_errno(-ENOMEM);
break; break;
case CPU_STARTING: case CPU_STARTING:
...@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy) ...@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy)
amd_uncore_cpu_online(cpu); amd_uncore_cpu_online(cpu);
} }
static void cleanup_cpu_online(void *dummy)
{
unsigned int cpu = smp_processor_id();
amd_uncore_cpu_dead(cpu);
}
static int __init amd_uncore_init(void) static int __init amd_uncore_init(void)
{ {
unsigned int cpu; unsigned int cpu, cpu2;
int ret = -ENODEV; int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV; goto fail_nodev;
if (!cpu_has_topoext) if (!cpu_has_topoext)
return -ENODEV; goto fail_nodev;
if (cpu_has_perfctr_nb) { if (cpu_has_perfctr_nb) {
amd_uncore_nb = alloc_percpu(struct amd_uncore *); amd_uncore_nb = alloc_percpu(struct amd_uncore *);
perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); if (!amd_uncore_nb) {
ret = -ENOMEM;
goto fail_nb;
}
ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
if (ret)
goto fail_nb;
printk(KERN_INFO "perf: AMD NB counters detected\n"); printk(KERN_INFO "perf: AMD NB counters detected\n");
ret = 0; ret = 0;
...@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void) ...@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void)
if (cpu_has_perfctr_l2) { if (cpu_has_perfctr_l2) {
amd_uncore_l2 = alloc_percpu(struct amd_uncore *); amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); if (!amd_uncore_l2) {
ret = -ENOMEM;
goto fail_l2;
}
ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
if (ret)
goto fail_l2;
printk(KERN_INFO "perf: AMD L2I counters detected\n"); printk(KERN_INFO "perf: AMD L2I counters detected\n");
ret = 0; ret = 0;
} }
if (ret) if (ret)
return -ENODEV; goto fail_nodev;
cpu_notifier_register_begin(); cpu_notifier_register_begin();
/* init cpus already online before registering for hotplug notifier */ /* init cpus already online before registering for hotplug notifier */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
amd_uncore_cpu_up_prepare(cpu); ret = amd_uncore_cpu_up_prepare(cpu);
if (ret)
goto fail_online;
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
} }
...@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void) ...@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void)
cpu_notifier_register_done(); cpu_notifier_register_done();
return 0; return 0;
fail_online:
for_each_online_cpu(cpu2) {
if (cpu2 == cpu)
break;
smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
}
cpu_notifier_register_done();
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
amd_uncore_nb = amd_uncore_l2 = NULL;
if (cpu_has_perfctr_l2)
perf_pmu_unregister(&amd_l2_pmu);
fail_l2:
if (cpu_has_perfctr_nb)
perf_pmu_unregister(&amd_nb_pmu);
if (amd_uncore_l2)
free_percpu(amd_uncore_l2);
fail_nb:
if (amd_uncore_nb)
free_percpu(amd_uncore_nb);
fail_nodev:
return ret;
} }
device_initcall(amd_uncore_init); device_initcall(amd_uncore_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment