Commit 487f05e1 authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf/core: Optimize event rescheduling on active contexts

When new events are added to an active context, we go and reschedule
all cpu groups and all task groups in order to preserve the priority
(cpu pinned, task pinned, cpu flexible, task flexible), but in
reality we only need to reschedule groups of the same priority as
that of the events being added, and below.

This patch changes the behavior so that only groups that need to be
rescheduled are rescheduled.
Reported-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: vince@deater.net
Link: http://lkml.kernel.org/r/20170119164330.22887-3-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fe45bafb
...@@ -355,6 +355,8 @@ enum event_type_t { ...@@ -355,6 +355,8 @@ enum event_type_t {
EVENT_FLEXIBLE = 0x1, EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2, EVENT_PINNED = 0x2,
EVENT_TIME = 0x4, EVENT_TIME = 0x4,
/* see ctx_resched() for details */
EVENT_CPU = 0x8,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
}; };
...@@ -1442,6 +1444,20 @@ static void update_group_times(struct perf_event *leader) ...@@ -1442,6 +1444,20 @@ static void update_group_times(struct perf_event *leader)
update_event_times(event); update_event_times(event);
} }
static enum event_type_t get_event_type(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
enum event_type_t event_type;
lockdep_assert_held(&ctx->lock);
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
if (!ctx->task)
event_type |= EVENT_CPU;
return event_type;
}
static struct list_head * static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{ {
...@@ -2215,7 +2231,8 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -2215,7 +2231,8 @@ ctx_sched_in(struct perf_event_context *ctx,
struct task_struct *task); struct task_struct *task);
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx) struct perf_event_context *ctx,
enum event_type_t event_type)
{ {
if (!cpuctx->task_ctx) if (!cpuctx->task_ctx)
return; return;
...@@ -2223,7 +2240,7 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, ...@@ -2223,7 +2240,7 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return; return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL); ctx_sched_out(ctx, cpuctx, event_type);
} }
static void perf_event_sched_in(struct perf_cpu_context *cpuctx, static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
...@@ -2238,13 +2255,51 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx, ...@@ -2238,13 +2255,51 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
} }
/*
* We want to maintain the following priority of scheduling:
* - CPU pinned (EVENT_CPU | EVENT_PINNED)
* - task pinned (EVENT_PINNED)
* - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
* - task flexible (EVENT_FLEXIBLE).
*
* In order to avoid unscheduling and scheduling back in everything every
* time an event is added, only do it for the groups of equal priority and
* below.
*
* This can be called after a batch operation on task events, in which case
* event_type is a bit mask of the types of events involved. For CPU events,
* event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
*/
static void ctx_resched(struct perf_cpu_context *cpuctx, static void ctx_resched(struct perf_cpu_context *cpuctx,
struct perf_event_context *task_ctx) struct perf_event_context *task_ctx,
enum event_type_t event_type)
{ {
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
bool cpu_event = !!(event_type & EVENT_CPU);
/*
* If pinned groups are involved, flexible groups also need to be
* scheduled out.
*/
if (event_type & EVENT_PINNED)
event_type |= EVENT_FLEXIBLE;
perf_pmu_disable(cpuctx->ctx.pmu); perf_pmu_disable(cpuctx->ctx.pmu);
if (task_ctx) if (task_ctx)
task_ctx_sched_out(cpuctx, task_ctx); task_ctx_sched_out(cpuctx, task_ctx, event_type);
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* Decide which cpu ctx groups to schedule out based on the types
* of events that caused rescheduling:
* - EVENT_CPU: schedule out corresponding groups;
* - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
* - otherwise, do nothing more.
*/
if (cpu_event)
cpu_ctx_sched_out(cpuctx, ctx_event_type);
else if (ctx_event_type & EVENT_PINNED)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, task_ctx, current); perf_event_sched_in(cpuctx, task_ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
} }
...@@ -2291,7 +2346,7 @@ static int __perf_install_in_context(void *info) ...@@ -2291,7 +2346,7 @@ static int __perf_install_in_context(void *info)
if (reprogram) { if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME); ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx); add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx); ctx_resched(cpuctx, task_ctx, get_event_type(event));
} else { } else {
add_event_to_ctx(event, ctx); add_event_to_ctx(event, ctx);
} }
...@@ -2458,7 +2513,7 @@ static void __perf_event_enable(struct perf_event *event, ...@@ -2458,7 +2513,7 @@ static void __perf_event_enable(struct perf_event *event,
if (ctx->task) if (ctx->task)
WARN_ON_ONCE(task_ctx != ctx); WARN_ON_ONCE(task_ctx != ctx);
ctx_resched(cpuctx, task_ctx); ctx_resched(cpuctx, task_ctx, get_event_type(event));
} }
/* /*
...@@ -2885,7 +2940,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, ...@@ -2885,7 +2940,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
if (do_switch) { if (do_switch) {
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
task_ctx_sched_out(cpuctx, ctx); task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
} }
...@@ -3442,6 +3497,7 @@ static int event_enable_on_exec(struct perf_event *event, ...@@ -3442,6 +3497,7 @@ static int event_enable_on_exec(struct perf_event *event,
static void perf_event_enable_on_exec(int ctxn) static void perf_event_enable_on_exec(int ctxn)
{ {
struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_event_context *ctx, *clone_ctx = NULL;
enum event_type_t event_type = 0;
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
struct perf_event *event; struct perf_event *event;
unsigned long flags; unsigned long flags;
...@@ -3455,15 +3511,17 @@ static void perf_event_enable_on_exec(int ctxn) ...@@ -3455,15 +3511,17 @@ static void perf_event_enable_on_exec(int ctxn)
cpuctx = __get_cpu_context(ctx); cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx); perf_ctx_lock(cpuctx, ctx);
ctx_sched_out(ctx, cpuctx, EVENT_TIME); ctx_sched_out(ctx, cpuctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry) list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx); enabled |= event_enable_on_exec(event, ctx);
event_type |= get_event_type(event);
}
/* /*
* Unclone and reschedule this context if we enabled any event. * Unclone and reschedule this context if we enabled any event.
*/ */
if (enabled) { if (enabled) {
clone_ctx = unclone_ctx(ctx); clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx); ctx_resched(cpuctx, ctx, event_type);
} }
perf_ctx_unlock(cpuctx, ctx); perf_ctx_unlock(cpuctx, ctx);
...@@ -10224,7 +10282,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) ...@@ -10224,7 +10282,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* in. * in.
*/ */
raw_spin_lock_irq(&child_ctx->lock); raw_spin_lock_irq(&child_ctx->lock);
task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx); task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
/* /*
* Now that the context is inactive, destroy the task <-> ctx relation * Now that the context is inactive, destroy the task <-> ctx relation
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment