Commit ab6f824c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Unify {pinned,flexible}_sched_in()

Less is more; unify the two very nearly identical function.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1941011a
...@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event, ...@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
return 1; return 1;
} }
static inline struct list_head *get_event_list(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
}
static void perf_group_detach(struct perf_event *event) static void perf_group_detach(struct perf_event *event)
{ {
struct perf_event *sibling, *tmp; struct perf_event *sibling, *tmp;
...@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event) ...@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
if (!RB_EMPTY_NODE(&event->group_node)) { if (!RB_EMPTY_NODE(&event->group_node)) {
add_event_to_groups(sibling, event->ctx); add_event_to_groups(sibling, event->ctx);
if (sibling->state == PERF_EVENT_STATE_ACTIVE) { if (sibling->state == PERF_EVENT_STATE_ACTIVE)
struct list_head *list = sibling->attr.pinned ? list_add_tail(&sibling->active_list, get_event_list(sibling));
&ctx->pinned_active : &ctx->flexible_active;
list_add_tail(&sibling->active_list, list);
}
} }
WARN_ON_ONCE(sibling->ctx != event->ctx); WARN_ON_ONCE(sibling->ctx != event->ctx);
...@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event, ...@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
{ {
int ret = 0; int ret = 0;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock); lockdep_assert_held(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF) if (event->state <= PERF_EVENT_STATE_OFF)
...@@ -3425,10 +3429,12 @@ struct sched_in_data { ...@@ -3425,10 +3429,12 @@ struct sched_in_data {
int can_add_hw; int can_add_hw;
}; };
static int pinned_sched_in(struct perf_event *event, void *data) static int merge_sched_in(struct perf_event *event, void *data)
{ {
struct sched_in_data *sid = data; struct sched_in_data *sid = data;
WARN_ON_ONCE(event->ctx != sid->ctx);
if (event->state <= PERF_EVENT_STATE_OFF) if (event->state <= PERF_EVENT_STATE_OFF)
return 0; return 0;
...@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data) ...@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
if (!group_sched_in(event, sid->cpuctx, sid->ctx)) if (!group_sched_in(event, sid->cpuctx, sid->ctx))
list_add_tail(&event->active_list, &sid->ctx->pinned_active); list_add_tail(&event->active_list, get_event_list(event));
} }
/* if (event->state == PERF_EVENT_STATE_INACTIVE) {
* If this pinned group hasn't been scheduled, if (event->attr.pinned)
* put it in error state. perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
*/
if (event->state == PERF_EVENT_STATE_INACTIVE)
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
return 0;
}
static int flexible_sched_in(struct perf_event *event, void *data)
{
struct sched_in_data *sid = data;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
if (!event_filter_match(event))
return 0;
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { sid->can_add_hw = 0;
int ret = group_sched_in(event, sid->cpuctx, sid->ctx); sid->ctx->rotate_necessary = 1;
if (ret) {
sid->can_add_hw = 0;
sid->ctx->rotate_necessary = 1;
return 0;
}
list_add_tail(&event->active_list, &sid->ctx->flexible_active);
} }
return 0; return 0;
...@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, ...@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
visit_groups_merge(&ctx->pinned_groups, visit_groups_merge(&ctx->pinned_groups,
smp_processor_id(), smp_processor_id(),
pinned_sched_in, &sid); merge_sched_in, &sid);
} }
static void static void
...@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, ...@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
visit_groups_merge(&ctx->flexible_groups, visit_groups_merge(&ctx->flexible_groups,
smp_processor_id(), smp_processor_id(),
flexible_sched_in, &sid); merge_sched_in, &sid);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment