Commit 4712305b authored by Linus Torvalds's avatar Linus Torvalds

Add sparse checker rules for conditional lock functions: trylock

and atomic_dec_and_lock.

This means that we now have all of the spinlock context counting
infrastructure in place, and you can check-compile the kernel with 
sparse -Wcontext.
parent e746c994
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# define __releases(x) __attribute__((context(1,0))) # define __releases(x) __attribute__((context(1,0)))
# define __acquire(x) __context__(1) # define __acquire(x) __context__(1)
# define __release(x) __context__(-1) # define __release(x) __context__(-1)
# define __cond_lock(x) ((x) ? ({ __context__(1); 1; }) : 0)
extern void __chk_user_ptr(void __user *); extern void __chk_user_ptr(void __user *);
extern void __chk_io_ptr(void __iomem *); extern void __chk_io_ptr(void __iomem *);
#else #else
...@@ -28,6 +29,7 @@ extern void __chk_io_ptr(void __iomem *); ...@@ -28,6 +29,7 @@ extern void __chk_io_ptr(void __iomem *);
# define __releases(x) # define __releases(x)
# define __acquire(x) (void)0 # define __acquire(x) (void)0
# define __release(x) (void)0 # define __release(x) (void)0
# define __cond_lock(x) (x)
#endif #endif
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -406,8 +406,8 @@ do { \ ...@@ -406,8 +406,8 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required. * methods are defined as nops in the case they are not required.
*/ */
#define spin_trylock(lock) _spin_trylock(lock) #define spin_trylock(lock) __cond_lock(_spin_trylock(lock))
#define write_trylock(lock) _write_trylock(lock) #define write_trylock(lock) __cond_lock(_write_trylock(lock))
/* Where's read_trylock? */ /* Where's read_trylock? */
...@@ -448,7 +448,7 @@ do { \ ...@@ -448,7 +448,7 @@ do { \
#define write_unlock_irq(lock) _write_unlock_irq(lock) #define write_unlock_irq(lock) _write_unlock_irq(lock)
#define write_unlock_bh(lock) _write_unlock_bh(lock) #define write_unlock_bh(lock) _write_unlock_bh(lock)
#define spin_trylock_bh(lock) _spin_trylock_bh(lock) #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
#ifdef CONFIG_LOCKMETER #ifdef CONFIG_LOCKMETER
extern void _metered_spin_lock (spinlock_t *lock); extern void _metered_spin_lock (spinlock_t *lock);
...@@ -467,7 +467,7 @@ extern int _metered_write_trylock(rwlock_t *lock); ...@@ -467,7 +467,7 @@ extern int _metered_write_trylock(rwlock_t *lock);
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#endif #endif
#define atomic_dec_and_lock(atomic,lock) _atomic_dec_and_lock(atomic,lock) #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
/* /*
* bit-based spin_lock() * bit-based spin_lock()
...@@ -491,6 +491,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr) ...@@ -491,6 +491,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
cpu_relax(); cpu_relax();
} }
#endif #endif
__acquire(bitlock);
} }
/* /*
...@@ -498,18 +499,15 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr) ...@@ -498,18 +499,15 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
*/ */
static inline int bit_spin_trylock(int bitnum, unsigned long *addr) static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{ {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
int ret;
preempt_disable(); preempt_disable();
ret = !test_and_set_bit(bitnum, addr); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
if (!ret) if (test_and_set_bit(bitnum, addr)) {
preempt_enable(); preempt_enable();
return ret; return 0;
#else }
preempt_disable();
return 1;
#endif #endif
__acquire(bitlock);
return 1;
} }
/* /*
...@@ -523,6 +521,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) ...@@ -523,6 +521,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit(bitnum, addr); clear_bit(bitnum, addr);
#endif #endif
preempt_enable(); preempt_enable();
__release(bitlock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment