powerpc: Make rwsem use "long" type

This makes the 64-bit kernel use 64-bit signed integers for the counter
(effectively supporting 32-bit of active count in the semaphore), thus
avoiding things like overflow of the mmap_sem if you use a really crazy
number of threads

Note: Ideally the type in the structure should be atomic_long_t rather
than "long". However, there's some nasty issues with that. It needs to
be initialized statically -and- lib/rwsem.c does things like

        sem->count = RWSEM_UNLOCKED_VALUE;

Now, if you mix in the fact that atomic_* types are actually structures
with one member and note typedefs of a scalar, it makes its really nasty.

So I stuck to what we did before using a long and casts for now.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent b1515af2
...@@ -21,15 +21,20 @@ ...@@ -21,15 +21,20 @@
/* /*
* the semaphore definition * the semaphore definition
*/ */
struct rw_semaphore { #ifdef CONFIG_PPC64
/* XXX this should be able to be an atomic_t -- paulus */ # define RWSEM_ACTIVE_MASK 0xffffffffL
signed int count; #else
#define RWSEM_UNLOCKED_VALUE 0x00000000 # define RWSEM_ACTIVE_MASK 0x0000ffffL
#define RWSEM_ACTIVE_BIAS 0x00000001 #endif
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_UNLOCKED_VALUE 0x00000000L
#define RWSEM_ACTIVE_BIAS 0x00000001L
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
struct rw_semaphore {
long count;
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
...@@ -43,9 +48,13 @@ struct rw_semaphore { ...@@ -43,9 +48,13 @@ struct rw_semaphore {
# define __RWSEM_DEP_MAP_INIT(lockname) # define __RWSEM_DEP_MAP_INIT(lockname)
#endif #endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ { \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } RWSEM_UNLOCKED_VALUE, \
__SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) \
}
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name) struct rw_semaphore name = __RWSEM_INITIALIZER(name)
...@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name, ...@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
*/ */
static inline void __down_read(struct rw_semaphore *sem) static inline void __down_read(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
static inline int __down_read_trylock(struct rw_semaphore *sem) static inline int __down_read_trylock(struct rw_semaphore *sem)
{ {
int tmp; long tmp;
while ((tmp = sem->count) >= 0) { while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp, if (tmp == cmpxchg(&sem->count, tmp,
...@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
int tmp; long tmp;
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)); (atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
...@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
int tmp; long tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS); RWSEM_ACTIVE_WRITE_BIAS);
...@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
*/ */
static inline void __up_read(struct rw_semaphore *sem) static inline void __up_read(struct rw_semaphore *sem)
{ {
int tmp; long tmp;
tmp = atomic_dec_return((atomic_t *)(&sem->count)); tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem); rwsem_wake(sem);
} }
...@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)) (atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem); rwsem_wake(sem);
} }
/* /*
* implement atomic add functionality * implement atomic add functionality
*/ */
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{ {
atomic_add(delta, (atomic_t *)(&sem->count)); atomic_long_add(delta, (atomic_long_t *)&sem->count);
} }
/* /*
...@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) ...@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
*/ */
static inline void __downgrade_write(struct rw_semaphore *sem) static inline void __downgrade_write(struct rw_semaphore *sem)
{ {
int tmp; long tmp;
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0) if (tmp < 0)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
} }
...@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/* /*
* implement exchange and add functionality * implement exchange and add functionality
*/ */
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{ {
return atomic_add_return(delta, (atomic_t *)(&sem->count)); return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
} }
static inline int rwsem_is_locked(struct rw_semaphore *sem) static inline int rwsem_is_locked(struct rw_semaphore *sem)
{ {
return (sem->count != 0); return sem->count != 0;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment