Commit aaf932e8 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86/intel: Simplify the dynamic constraint code somewhat

We have two 'struct event_constraint' local variables in
intel_get_excl_constraints(): 'cx' and 'c'.

Instead of using 'cx' after the dynamic allocation, put all 'cx' inside
the dynamic allocation block and use 'c' outside of it.

Also use direct assignment to copy the structure; let the compiler
figure it out.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b32ed7f5
...@@ -1970,7 +1970,6 @@ static struct event_constraint * ...@@ -1970,7 +1970,6 @@ static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
int idx, struct event_constraint *c) int idx, struct event_constraint *c)
{ {
struct event_constraint *cx;
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct intel_excl_states *xlo; struct intel_excl_states *xlo;
int tid = cpuc->excl_thread_id; int tid = cpuc->excl_thread_id;
...@@ -1989,8 +1988,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -1989,8 +1988,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
if (!excl_cntrs) if (!excl_cntrs)
return c; return c;
cx = c;
/* /*
* because we modify the constraint, we need * because we modify the constraint, we need
* to make a copy. Static constraints come * to make a copy. Static constraints come
...@@ -2000,6 +1997,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -2000,6 +1997,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* been cloned (marked dynamic) * been cloned (marked dynamic)
*/ */
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
struct event_constraint *cx;
/* sanity check */ /* sanity check */
if (idx < 0) if (idx < 0)
...@@ -2014,13 +2012,14 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -2014,13 +2012,14 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* initialize dynamic constraint * initialize dynamic constraint
* with static constraint * with static constraint
*/ */
memcpy(cx, c, sizeof(*cx)); *cx = *c;
/* /*
* mark constraint as dynamic, so we * mark constraint as dynamic, so we
* can free it later on * can free it later on
*/ */
cx->flags |= PERF_X86_EVENT_DYNAMIC; cx->flags |= PERF_X86_EVENT_DYNAMIC;
c = cx;
} }
/* /*
...@@ -2054,37 +2053,37 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -2054,37 +2053,37 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* SHARED : sibling counter measuring non-exclusive event * SHARED : sibling counter measuring non-exclusive event
* UNUSED : sibling counter unused * UNUSED : sibling counter unused
*/ */
for_each_set_bit(i, cx->idxmsk, X86_PMC_IDX_MAX) { for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
/* /*
* exclusive event in sibling counter * exclusive event in sibling counter
* our corresponding counter cannot be used * our corresponding counter cannot be used
* regardless of our event * regardless of our event
*/ */
if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
__clear_bit(i, cx->idxmsk); __clear_bit(i, c->idxmsk);
/* /*
* if measuring an exclusive event, sibling * if measuring an exclusive event, sibling
* measuring non-exclusive, then counter cannot * measuring non-exclusive, then counter cannot
* be used * be used
*/ */
if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
__clear_bit(i, cx->idxmsk); __clear_bit(i, c->idxmsk);
} }
/* /*
* recompute actual bit weight for scheduling algorithm * recompute actual bit weight for scheduling algorithm
*/ */
cx->weight = hweight64(cx->idxmsk64); c->weight = hweight64(c->idxmsk64);
/* /*
* if we return an empty mask, then switch * if we return an empty mask, then switch
* back to static empty constraint to avoid * back to static empty constraint to avoid
* the cost of freeing later on * the cost of freeing later on
*/ */
if (cx->weight == 0) if (c->weight == 0)
cx = &emptyconstraint; c = &emptyconstraint;
return cx; return c;
} }
static struct event_constraint * static struct event_constraint *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment