Commit 671dec5d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: generalize pending infrastructure

Prepare the pending infrastructure to do more than wakeups.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094517.634732847@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3c446b3d
...@@ -321,8 +321,9 @@ struct perf_mmap_data { ...@@ -321,8 +321,9 @@ struct perf_mmap_data {
void *data_pages[0]; void *data_pages[0];
}; };
struct perf_wakeup_entry { struct perf_pending_entry {
struct perf_wakeup_entry *next; struct perf_pending_entry *next;
void (*func)(struct perf_pending_entry *);
}; };
/** /**
...@@ -401,7 +402,7 @@ struct perf_counter { ...@@ -401,7 +402,7 @@ struct perf_counter {
wait_queue_head_t waitq; wait_queue_head_t waitq;
struct fasync_struct *fasync; struct fasync_struct *fasync;
/* optional: for NMIs */ /* optional: for NMIs */
struct perf_wakeup_entry wakeup; struct perf_pending_entry pending;
void (*destroy)(struct perf_counter *); void (*destroy)(struct perf_counter *);
struct rcu_head rcu_head; struct rcu_head rcu_head;
......
...@@ -1581,6 +1581,14 @@ void perf_counter_wakeup(struct perf_counter *counter) ...@@ -1581,6 +1581,14 @@ void perf_counter_wakeup(struct perf_counter *counter)
kill_fasync(&counter->fasync, SIGIO, POLL_IN); kill_fasync(&counter->fasync, SIGIO, POLL_IN);
} }
static void perf_pending_wakeup(struct perf_pending_entry *entry)
{
struct perf_counter *counter = container_of(entry,
struct perf_counter, pending);
perf_counter_wakeup(counter);
}
/* /*
* Pending wakeups * Pending wakeups
* *
...@@ -1590,45 +1598,47 @@ void perf_counter_wakeup(struct perf_counter *counter) ...@@ -1590,45 +1598,47 @@ void perf_counter_wakeup(struct perf_counter *counter)
* single linked list and use cmpxchg() to add entries lockless. * single linked list and use cmpxchg() to add entries lockless.
*/ */
#define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL) #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = { static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
PENDING_TAIL, PENDING_TAIL,
}; };
static void perf_pending_queue(struct perf_counter *counter) static void perf_pending_queue(struct perf_pending_entry *entry,
void (*func)(struct perf_pending_entry *))
{ {
struct perf_wakeup_entry **head; struct perf_pending_entry **head;
struct perf_wakeup_entry *prev, *next;
if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL) if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
return; return;
head = &get_cpu_var(perf_wakeup_head); entry->func = func;
head = &get_cpu_var(perf_pending_head);
do { do {
prev = counter->wakeup.next = *head; entry->next = *head;
next = &counter->wakeup; } while (cmpxchg(head, entry->next, entry) != entry->next);
} while (cmpxchg(head, prev, next) != prev);
set_perf_counter_pending(); set_perf_counter_pending();
put_cpu_var(perf_wakeup_head); put_cpu_var(perf_pending_head);
} }
static int __perf_pending_run(void) static int __perf_pending_run(void)
{ {
struct perf_wakeup_entry *list; struct perf_pending_entry *list;
int nr = 0; int nr = 0;
list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL); list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
while (list != PENDING_TAIL) { while (list != PENDING_TAIL) {
struct perf_counter *counter = container_of(list, void (*func)(struct perf_pending_entry *);
struct perf_counter, wakeup); struct perf_pending_entry *entry = list;
list = list->next; list = list->next;
counter->wakeup.next = NULL; func = entry->func;
entry->next = NULL;
/* /*
* Ensure we observe the unqueue before we issue the wakeup, * Ensure we observe the unqueue before we issue the wakeup,
* so that we won't be waiting forever. * so that we won't be waiting forever.
...@@ -1636,7 +1646,7 @@ static int __perf_pending_run(void) ...@@ -1636,7 +1646,7 @@ static int __perf_pending_run(void)
*/ */
smp_wmb(); smp_wmb();
perf_counter_wakeup(counter); func(entry);
nr++; nr++;
} }
...@@ -1658,7 +1668,7 @@ static inline int perf_not_pending(struct perf_counter *counter) ...@@ -1658,7 +1668,7 @@ static inline int perf_not_pending(struct perf_counter *counter)
* so that we do not miss the wakeup. -- see perf_pending_handle() * so that we do not miss the wakeup. -- see perf_pending_handle()
*/ */
smp_rmb(); smp_rmb();
return counter->wakeup.next == NULL; return counter->pending.next == NULL;
} }
static void perf_pending_sync(struct perf_counter *counter) static void perf_pending_sync(struct perf_counter *counter)
...@@ -1695,9 +1705,10 @@ struct perf_output_handle { ...@@ -1695,9 +1705,10 @@ struct perf_output_handle {
static inline void __perf_output_wakeup(struct perf_output_handle *handle) static inline void __perf_output_wakeup(struct perf_output_handle *handle)
{ {
if (handle->nmi) if (handle->nmi) {
perf_pending_queue(handle->counter); perf_pending_queue(&handle->counter->pending,
else perf_pending_wakeup);
} else
perf_counter_wakeup(handle->counter); perf_counter_wakeup(handle->counter);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment