Commit aab5b71e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Rename the perf_event_aux*() APIs to perf_event_sb*(), to separate...

perf/core: Rename the perf_event_aux*() APIs to perf_event_sb*(), to separate them from AUX ring-buffer records

There are now two different things called AUX in perf, the
infrastructure to deliver the mmap/comm/task records and the
AUX part in the mmap buffer (with associated AUX_RECORD).

Since the former is internal, rename it to side-band to reduce
the confusion factor.

No change in functionality.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f2fb6bef
...@@ -5879,11 +5879,11 @@ perf_event_read_event(struct perf_event *event, ...@@ -5879,11 +5879,11 @@ perf_event_read_event(struct perf_event *event,
perf_output_end(&handle); perf_output_end(&handle);
} }
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); typedef void (perf_iterate_f)(struct perf_event *event, void *data);
static void static void
perf_event_aux_ctx(struct perf_event_context *ctx, perf_iterate_ctx(struct perf_event_context *ctx,
perf_event_aux_output_cb output, perf_iterate_f output,
void *data, bool all) void *data, bool all)
{ {
struct perf_event *event; struct perf_event *event;
...@@ -5900,18 +5900,7 @@ perf_event_aux_ctx(struct perf_event_context *ctx, ...@@ -5900,18 +5900,7 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
} }
} }
static void static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
struct perf_event_context *task_ctx)
{
rcu_read_lock();
preempt_disable();
perf_event_aux_ctx(task_ctx, output, data, false);
preempt_enable();
rcu_read_unlock();
}
static void perf_event_sb_iterate(perf_event_aux_output_cb output, void *data)
{ {
struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events); struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
struct perf_event *event; struct perf_event *event;
...@@ -5925,33 +5914,40 @@ static void perf_event_sb_iterate(perf_event_aux_output_cb output, void *data) ...@@ -5925,33 +5914,40 @@ static void perf_event_sb_iterate(perf_event_aux_output_cb output, void *data)
} }
} }
/*
* Iterate all events that need to receive side-band events.
*
* For new callers; ensure that account_pmu_sb_event() includes
* your event, otherwise it might not get delivered.
*/
static void static void
perf_event_aux(perf_event_aux_output_cb output, void *data, perf_iterate_sb(perf_iterate_f output, void *data,
struct perf_event_context *task_ctx) struct perf_event_context *task_ctx)
{ {
struct perf_event_context *ctx; struct perf_event_context *ctx;
int ctxn; int ctxn;
rcu_read_lock();
preempt_disable();
/* /*
* If we have task_ctx != NULL we only notify * If we have task_ctx != NULL we only notify the task context itself.
* the task context itself. The task_ctx is set * The task_ctx is set only for EXIT events before releasing task
* only for EXIT events before releasing task
* context. * context.
*/ */
if (task_ctx) { if (task_ctx) {
perf_event_aux_task_ctx(output, data, task_ctx); perf_iterate_ctx(task_ctx, output, data, false);
return; goto done;
} }
rcu_read_lock(); perf_iterate_sb_cpu(output, data);
preempt_disable();
perf_event_sb_iterate(output, data);
for_each_task_context_nr(ctxn) { for_each_task_context_nr(ctxn) {
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx) if (ctx)
perf_event_aux_ctx(ctx, output, data, false); perf_iterate_ctx(ctx, output, data, false);
} }
done:
preempt_enable(); preempt_enable();
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -6001,7 +5997,7 @@ void perf_event_exec(void) ...@@ -6001,7 +5997,7 @@ void perf_event_exec(void)
perf_event_enable_on_exec(ctxn); perf_event_enable_on_exec(ctxn);
perf_event_aux_ctx(ctx, perf_event_addr_filters_exec, NULL, perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
true); true);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -6045,9 +6041,9 @@ static int __perf_pmu_output_stop(void *info) ...@@ -6045,9 +6041,9 @@ static int __perf_pmu_output_stop(void *info)
}; };
rcu_read_lock(); rcu_read_lock();
perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
if (cpuctx->task_ctx) if (cpuctx->task_ctx)
perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop, perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
&ro, false); &ro, false);
rcu_read_unlock(); rcu_read_unlock();
...@@ -6176,7 +6172,7 @@ static void perf_event_task(struct task_struct *task, ...@@ -6176,7 +6172,7 @@ static void perf_event_task(struct task_struct *task,
}, },
}; };
perf_event_aux(perf_event_task_output, perf_iterate_sb(perf_event_task_output,
&task_event, &task_event,
task_ctx); task_ctx);
} }
...@@ -6255,7 +6251,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) ...@@ -6255,7 +6251,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
perf_event_aux(perf_event_comm_output, perf_iterate_sb(perf_event_comm_output,
comm_event, comm_event,
NULL); NULL);
} }
...@@ -6486,7 +6482,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ...@@ -6486,7 +6482,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
perf_event_aux(perf_event_mmap_output, perf_iterate_sb(perf_event_mmap_output,
mmap_event, mmap_event,
NULL); NULL);
...@@ -6569,7 +6565,7 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma) ...@@ -6569,7 +6565,7 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
if (!ctx) if (!ctx)
continue; continue;
perf_event_aux_ctx(ctx, __perf_addr_filters_adjust, vma, true); perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -6756,7 +6752,7 @@ static void perf_event_switch(struct task_struct *task, ...@@ -6756,7 +6752,7 @@ static void perf_event_switch(struct task_struct *task,
}, },
}; };
perf_event_aux(perf_event_switch_output, perf_iterate_sb(perf_event_switch_output,
&switch_event, &switch_event,
NULL); NULL);
} }
...@@ -8654,6 +8650,13 @@ static void attach_sb_event(struct perf_event *event) ...@@ -8654,6 +8650,13 @@ static void attach_sb_event(struct perf_event *event)
raw_spin_unlock(&pel->lock); raw_spin_unlock(&pel->lock);
} }
/*
* We keep a list of all !task (and therefore per-cpu) events
* that need to receive side-band records.
*
* This avoids having to scan all the various PMU per-cpu contexts
* looking for them.
*/
static void account_pmu_sb_event(struct perf_event *event) static void account_pmu_sb_event(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment