Commit 34538ee7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, x86: Use unlocked bitops

There is no concurrency on these variables, so don't use LOCK'ed ops.

As to the intel_pmu_handle_irq() status bit clean, nobody uses that so
remove it all together.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
LKML-Reference: <20100304140100.240023029@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent aff3d91a
...@@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (test_bit(hwc->idx, used_mask)) if (test_bit(hwc->idx, used_mask))
break; break;
set_bit(hwc->idx, used_mask); __set_bit(hwc->idx, used_mask);
if (assign) if (assign)
assign[i] = hwc->idx; assign[i] = hwc->idx;
} }
...@@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (j == X86_PMC_IDX_MAX) if (j == X86_PMC_IDX_MAX)
break; break;
set_bit(j, used_mask); __set_bit(j, used_mask);
if (assign) if (assign)
assign[i] = j; assign[i] = j;
...@@ -842,7 +842,7 @@ void hw_perf_enable(void) ...@@ -842,7 +842,7 @@ void hw_perf_enable(void)
* clear active_mask and events[] yet it preserves * clear active_mask and events[] yet it preserves
* idx * idx
*/ */
set_bit(hwc->idx, cpuc->active_mask); __set_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = event; cpuc->events[hwc->idx] = event;
x86_pmu.enable(event); x86_pmu.enable(event);
...@@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event) ...@@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event)
* Must be done before we disable, otherwise the nmi handler * Must be done before we disable, otherwise the nmi handler
* could reenable again: * could reenable again:
*/ */
clear_bit(idx, cpuc->active_mask); __clear_bit(idx, cpuc->active_mask);
x86_pmu.disable(event); x86_pmu.disable(event);
/* /*
......
...@@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) ...@@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
* initialize all possible NB constraints * initialize all possible NB constraints
*/ */
for (i = 0; i < x86_pmu.num_events; i++) { for (i = 0; i < x86_pmu.num_events; i++) {
set_bit(i, nb->event_constraints[i].idxmsk); __set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1; nb->event_constraints[i].weight = 1;
} }
return nb; return nb;
......
...@@ -765,7 +765,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -765,7 +765,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit]; struct perf_event *event = cpuc->events[bit];
clear_bit(bit, (unsigned long *) &status);
if (!test_bit(bit, cpuc->active_mask)) if (!test_bit(bit, cpuc->active_mask))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment