Commit 0dc1a185 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm_pmu: add armpmu_alloc_atomic()

In ACPI systems, we don't know the makeup of CPUs until we hotplug them
on, and thus have to allocate the PMU datastructures at hotplug time.
Thus, we must use GFP_ATOMIC allocations.

Let's add an armpmu_alloc_atomic() that we can use in this case.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d3d5aac2
...@@ -760,18 +760,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) ...@@ -760,18 +760,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node); &cpu_pmu->node);
} }
struct arm_pmu *armpmu_alloc(void) static struct arm_pmu *__armpmu_alloc(gfp_t flags)
{ {
struct arm_pmu *pmu; struct arm_pmu *pmu;
int cpu; int cpu;
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); pmu = kzalloc(sizeof(*pmu), flags);
if (!pmu) { if (!pmu) {
pr_info("failed to allocate PMU device!\n"); pr_info("failed to allocate PMU device!\n");
goto out; goto out;
} }
pmu->hw_events = alloc_percpu(struct pmu_hw_events); pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) { if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n"); pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu; goto out_free_pmu;
...@@ -817,6 +817,17 @@ struct arm_pmu *armpmu_alloc(void) ...@@ -817,6 +817,17 @@ struct arm_pmu *armpmu_alloc(void)
return NULL; return NULL;
} }
struct arm_pmu *armpmu_alloc(void)
{
return __armpmu_alloc(GFP_KERNEL);
}
struct arm_pmu *armpmu_alloc_atomic(void)
{
return __armpmu_alloc(GFP_ATOMIC);
}
void armpmu_free(struct arm_pmu *pmu) void armpmu_free(struct arm_pmu *pmu)
{ {
free_percpu(pmu->hw_events); free_percpu(pmu->hw_events);
......
...@@ -127,7 +127,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) ...@@ -127,7 +127,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu; return pmu;
} }
pmu = armpmu_alloc(); pmu = armpmu_alloc_atomic();
if (!pmu) { if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n", pr_warn("Unable to allocate PMU for CPU%d\n",
smp_processor_id()); smp_processor_id());
......
...@@ -157,6 +157,7 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } ...@@ -157,6 +157,7 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
/* Internal functions only for core arm_pmu code */ /* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu); void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment