Commit 2737c49f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking, timer_stats: Annotate table_lock as raw

The table_lock lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Reported-by: default avatarAndreas Sundebo <kernel@sundebo.dk>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarAndreas Sundebo <kernel@sundebo.dk>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ddb6c9b5
...@@ -81,7 +81,7 @@ struct entry { ...@@ -81,7 +81,7 @@ struct entry {
/* /*
* Spinlock protecting the tables - not taken during lookup: * Spinlock protecting the tables - not taken during lookup:
*/ */
static DEFINE_SPINLOCK(table_lock); static DEFINE_RAW_SPINLOCK(table_lock);
/* /*
* Per-CPU lookup locks for fast hash lookup: * Per-CPU lookup locks for fast hash lookup:
...@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm) ...@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL; prev = NULL;
curr = *head; curr = *head;
spin_lock(&table_lock); raw_spin_lock(&table_lock);
/* /*
* Make sure we have not raced with another CPU: * Make sure we have not raced with another CPU:
*/ */
...@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm) ...@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr; *head = curr;
} }
out_unlock: out_unlock:
spin_unlock(&table_lock); raw_spin_unlock(&table_lock);
return curr; return curr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment