Commit 8292c9e1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking, semaphores: Annotate inner lock as raw

There is no reason to have the spin_lock protecting the semaphore
preemptible on -rt. Annotate it as a raw_spinlock.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

( On rt this also solves lockdep complaining about the
  rt_mutex.wait_lock being not initialized. )
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ee30a7b2
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
/* Please don't access any members of this structure directly */ /* Please don't access any members of this structure directly */
struct semaphore { struct semaphore {
spinlock_t lock; raw_spinlock_t lock;
unsigned int count; unsigned int count;
struct list_head wait_list; struct list_head wait_list;
}; };
#define __SEMAPHORE_INITIALIZER(name, n) \ #define __SEMAPHORE_INITIALIZER(name, n) \
{ \ { \
.lock = __SPIN_LOCK_UNLOCKED((name).lock), \ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \ .count = n, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \
} }
......
...@@ -54,12 +54,12 @@ void down(struct semaphore *sem) ...@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0)) if (likely(sem->count > 0))
sem->count--; sem->count--;
else else
__down(sem); __down(sem);
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
} }
EXPORT_SYMBOL(down); EXPORT_SYMBOL(down);
...@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem) ...@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem)
unsigned long flags; unsigned long flags;
int result = 0; int result = 0;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0)) if (likely(sem->count > 0))
sem->count--; sem->count--;
else else
result = __down_interruptible(sem); result = __down_interruptible(sem);
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
return result; return result;
} }
...@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem) ...@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
unsigned long flags; unsigned long flags;
int result = 0; int result = 0;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0)) if (likely(sem->count > 0))
sem->count--; sem->count--;
else else
result = __down_killable(sem); result = __down_killable(sem);
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
return result; return result;
} }
...@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem) ...@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
unsigned long flags; unsigned long flags;
int count; int count;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
count = sem->count - 1; count = sem->count - 1;
if (likely(count >= 0)) if (likely(count >= 0))
sem->count = count; sem->count = count;
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
return (count < 0); return (count < 0);
} }
...@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies) ...@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies)
unsigned long flags; unsigned long flags;
int result = 0; int result = 0;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0)) if (likely(sem->count > 0))
sem->count--; sem->count--;
else else
result = __down_timeout(sem, jiffies); result = __down_timeout(sem, jiffies);
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
return result; return result;
} }
...@@ -179,12 +179,12 @@ void up(struct semaphore *sem) ...@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sem->lock, flags); raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list))) if (likely(list_empty(&sem->wait_list)))
sem->count++; sem->count++;
else else
__up(sem); __up(sem);
spin_unlock_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->lock, flags);
} }
EXPORT_SYMBOL(up); EXPORT_SYMBOL(up);
...@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state, ...@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
if (timeout <= 0) if (timeout <= 0)
goto timed_out; goto timed_out;
__set_task_state(task, state); __set_task_state(task, state);
spin_unlock_irq(&sem->lock); raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
spin_lock_irq(&sem->lock); raw_spin_lock_irq(&sem->lock);
if (waiter.up) if (waiter.up)
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment