Commit d2b8378d authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] ppc32: Add _raw_write_trylock

I tried compiling a PPC32 kernel with PREEMPT + SMP and it failed
because we didn't have a _raw_write_trylock.  This patch adds
_raw_write_trylock, moves the exports of _raw_*lock from
arch/ppc/kernel/ppc_ksyms.c to arch/ppc/lib/locks.c, and makes
__spin_trylock static since it is only used in locks.c.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0ad8699a
...@@ -200,15 +200,6 @@ EXPORT_SYMBOL(last_task_used_altivec); ...@@ -200,15 +200,6 @@ EXPORT_SYMBOL(last_task_used_altivec);
EXPORT_SYMBOL(giveup_altivec); EXPORT_SYMBOL(giveup_altivec);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_unlock);
EXPORT_SYMBOL(_raw_spin_trylock);
EXPORT_SYMBOL(_raw_read_lock);
EXPORT_SYMBOL(_raw_read_unlock);
EXPORT_SYMBOL(_raw_write_lock);
EXPORT_SYMBOL(_raw_write_unlock);
#endif
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_hw_index); EXPORT_SYMBOL(smp_hw_index);
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* since they may inhibit forward progress by other CPUs in getting * since they may inhibit forward progress by other CPUs in getting
* a lock. * a lock.
*/ */
unsigned long __spin_trylock(volatile unsigned long *lock) static inline unsigned long __spin_trylock(volatile unsigned long *lock)
{ {
unsigned long ret; unsigned long ret;
...@@ -62,6 +62,7 @@ void _raw_spin_lock(spinlock_t *lock) ...@@ -62,6 +62,7 @@ void _raw_spin_lock(spinlock_t *lock)
lock->owner_pc = (unsigned long)__builtin_return_address(0); lock->owner_pc = (unsigned long)__builtin_return_address(0);
lock->owner_cpu = cpu; lock->owner_cpu = cpu;
} }
EXPORT_SYMBOL(_raw_spin_lock);
int _raw_spin_trylock(spinlock_t *lock) int _raw_spin_trylock(spinlock_t *lock)
{ {
...@@ -71,6 +72,7 @@ int _raw_spin_trylock(spinlock_t *lock) ...@@ -71,6 +72,7 @@ int _raw_spin_trylock(spinlock_t *lock)
lock->owner_pc = (unsigned long)__builtin_return_address(0); lock->owner_pc = (unsigned long)__builtin_return_address(0);
return 1; return 1;
} }
EXPORT_SYMBOL(_raw_spin_trylock);
void _raw_spin_unlock(spinlock_t *lp) void _raw_spin_unlock(spinlock_t *lp)
{ {
...@@ -86,6 +88,7 @@ void _raw_spin_unlock(spinlock_t *lp) ...@@ -86,6 +88,7 @@ void _raw_spin_unlock(spinlock_t *lp)
wmb(); wmb();
lp->lock = 0; lp->lock = 0;
} }
EXPORT_SYMBOL(_raw_spin_unlock);
/* /*
...@@ -119,6 +122,7 @@ void _raw_read_lock(rwlock_t *rw) ...@@ -119,6 +122,7 @@ void _raw_read_lock(rwlock_t *rw)
} }
wmb(); wmb();
} }
EXPORT_SYMBOL(_raw_read_lock);
void _raw_read_unlock(rwlock_t *rw) void _raw_read_unlock(rwlock_t *rw)
{ {
...@@ -129,6 +133,7 @@ void _raw_read_unlock(rwlock_t *rw) ...@@ -129,6 +133,7 @@ void _raw_read_unlock(rwlock_t *rw)
wmb(); wmb();
atomic_dec((atomic_t *) &(rw)->lock); atomic_dec((atomic_t *) &(rw)->lock);
} }
EXPORT_SYMBOL(_raw_read_unlock);
void _raw_write_lock(rwlock_t *rw) void _raw_write_lock(rwlock_t *rw)
{ {
...@@ -169,6 +174,22 @@ void _raw_write_lock(rwlock_t *rw) ...@@ -169,6 +174,22 @@ void _raw_write_lock(rwlock_t *rw)
} }
wmb(); wmb();
} }
EXPORT_SYMBOL(_raw_write_lock);
int _raw_write_trylock(rwlock_t *rw)
{
if (test_and_set_bit(31, &(rw)->lock)) /* someone has a write lock */
return 0;
if ((rw)->lock & ~(1<<31)) { /* someone has a read lock */
/* clear our write lock and wait for reads to go away */
clear_bit(31,&(rw)->lock);
return 0;
}
wmb();
return 1;
}
EXPORT_SYMBOL(_raw_write_trylock);
void _raw_write_unlock(rwlock_t *rw) void _raw_write_unlock(rwlock_t *rw)
{ {
...@@ -179,5 +200,6 @@ void _raw_write_unlock(rwlock_t *rw) ...@@ -179,5 +200,6 @@ void _raw_write_unlock(rwlock_t *rw)
wmb(); wmb();
clear_bit(31,&(rw)->lock); clear_bit(31,&(rw)->lock);
} }
EXPORT_SYMBOL(_raw_write_unlock);
#endif #endif
...@@ -65,7 +65,6 @@ static inline void _raw_spin_unlock(spinlock_t *lock) ...@@ -65,7 +65,6 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
extern void _raw_spin_lock(spinlock_t *lock); extern void _raw_spin_lock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock);
extern int _raw_spin_trylock(spinlock_t *lock); extern int _raw_spin_trylock(spinlock_t *lock);
extern unsigned long __spin_trylock(volatile unsigned long *lock);
#endif #endif
...@@ -136,6 +135,26 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) ...@@ -136,6 +135,26 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
: "cr0", "memory"); : "cr0", "memory");
} }
static __inline__ int _raw_write_trylock(rwlock_t *rw)
{
unsigned int tmp;
__asm__ __volatile__(
"2: lwarx %0,0,%1\n\ # write_trylock\n\
cmpwi 0,%0,0\n\
bne- 1f\n"
PPC405_ERR77(0,%1)
" stwcx. %2,0,%1\n\
bne- 2b\n\
isync\n\
1:"
: "=&r"(tmp)
: "r"(&rw->lock), "r"(-1)
: "cr0", "memory");
return tmp == 0;
}
static __inline__ void _raw_write_lock(rwlock_t *rw) static __inline__ void _raw_write_lock(rwlock_t *rw)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -169,6 +188,7 @@ extern void _raw_read_lock(rwlock_t *rw); ...@@ -169,6 +188,7 @@ extern void _raw_read_lock(rwlock_t *rw);
extern void _raw_read_unlock(rwlock_t *rw); extern void _raw_read_unlock(rwlock_t *rw);
extern void _raw_write_lock(rwlock_t *rw); extern void _raw_write_lock(rwlock_t *rw);
extern void _raw_write_unlock(rwlock_t *rw); extern void _raw_write_unlock(rwlock_t *rw);
extern int _raw_write_trylock(rwlock_t *rw);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment