Commit 5a09928d authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Remove task_ctx_size

A new kmem_cache method has replaced the kzalloc() to allocate the PMU
specific data. The task_ctx_size is not required anymore.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-19-git-send-email-kan.liang@linux.intel.com
parent 33cad284
...@@ -2371,7 +2371,6 @@ static struct pmu pmu = { ...@@ -2371,7 +2371,6 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx, .event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task, .sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
.swap_task_ctx = x86_pmu_swap_task_ctx, .swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period, .check_period = x86_pmu_check_period,
......
...@@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void) ...@@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void)
size = sizeof(struct x86_perf_task_context_arch_lbr) + size = sizeof(struct x86_perf_task_context_arch_lbr) +
lbr_nr * sizeof(struct lbr_entry); lbr_nr * sizeof(struct lbr_entry);
x86_get_pmu()->task_ctx_size = size;
x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0; x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
......
...@@ -419,10 +419,6 @@ struct pmu { ...@@ -419,10 +419,6 @@ struct pmu {
*/ */
void (*sched_task) (struct perf_event_context *ctx, void (*sched_task) (struct perf_event_context *ctx,
bool sched_in); bool sched_in);
/*
* PMU specific data size
*/
size_t task_ctx_size;
/* /*
* Kmem cache of PMU specific data * Kmem cache of PMU specific data
......
...@@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu) ...@@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu)
if (pmu->task_ctx_cache) if (pmu->task_ctx_cache)
return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
return kzalloc(pmu->task_ctx_size, GFP_KERNEL); return NULL;
} }
static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{ {
if (pmu->task_ctx_cache && task_ctx_data) if (pmu->task_ctx_cache && task_ctx_data)
kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
else
kfree(task_ctx_data);
} }
static void free_ctx(struct rcu_head *head) static void free_ctx(struct rcu_head *head)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment