Commit 5eebb6f2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] might_sleep() improvements

From: Mitchell Blank Jr <mitch@sfgoth.com>

This patch makes the following improvements to might_sleep():

 o Add a "might_sleep_if()" macro for when we might sleep only if some
   condition is met.  It's a bit tidier, and has an unlikely() in it.

 o Add might_sleep checks to skb_share_check() and skb_unshare() which
   sometimes need to allocate memory.

 o Make all architectures call might_sleep() in both down() and
   down_interruptible().  Before only ppc, ppc64, and i386 did this check.
   (sh did the check on down() but not down_interruptible())
parent 55308a20
......@@ -110,6 +110,7 @@ static void __down(struct semaphore * sem)
void down(struct semaphore *sem)
{
might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
......@@ -219,6 +220,7 @@ int down_interruptible(struct semaphore *sem)
{
int ret = 0;
might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
......
......@@ -88,14 +88,18 @@ extern void __up_wakeup(struct semaphore *);
static inline void __down(struct semaphore *sem)
{
long count = atomic_dec_return(&sem->count);
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
long count = atomic_dec_return(&sem->count);
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
......
......@@ -88,7 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__down_op(sem, __down_failed);
}
......@@ -101,7 +101,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
......
......@@ -84,7 +84,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__down_op(sem, __down_failed);
}
......@@ -97,7 +97,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
......
......@@ -79,6 +79,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
......@@ -104,6 +105,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
......
......@@ -90,6 +90,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
count = &(sem->count);
__asm__ __volatile__(
......@@ -117,6 +118,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
count = &(sem->count);
__asm__ __volatile__(
......
......@@ -73,6 +73,7 @@ down (struct semaphore *sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
......@@ -89,6 +90,7 @@ down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
......
......@@ -89,7 +89,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"subql #1,%0@\n\t"
......@@ -112,7 +112,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"| atomic interruptible down operation\n\t"
"subql #1,%1@\n\t"
......
......@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %0, %%a1\n\t"
......@@ -108,7 +108,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %1, %%a1\n\t"
......
......@@ -88,6 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
......@@ -103,6 +104,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
......
......@@ -84,7 +84,7 @@ extern __inline__ void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
......@@ -100,7 +100,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
......
......@@ -60,6 +60,7 @@ asmlinkage void __up(struct semaphore * sem);
static inline void down(struct semaphore * sem)
{
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
......@@ -68,6 +69,7 @@ static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
......
......@@ -107,6 +107,7 @@ static inline int down_interruptible(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
......
......@@ -71,6 +71,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
ptr = &(sem->count.counter);
increment = 1;
......@@ -107,6 +108,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
ptr = &(sem->count.counter);
increment = 1;
......
......@@ -57,6 +57,7 @@ extern void __up (struct semaphore * sem);
extern inline void down (struct semaphore * sem)
{
might_sleep();
if (atomic_dec_return (&sem->count) < 0)
__down (sem);
}
......@@ -64,6 +65,7 @@ extern inline void down (struct semaphore * sem)
extern inline int down_interruptible (struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (atomic_dec_return (&sem->count) < 0)
ret = __down_interruptible (sem);
return ret;
......
......@@ -118,6 +118,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
......@@ -144,6 +145,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
......
......@@ -52,8 +52,10 @@ struct completion;
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line);
#define might_sleep() __might_sleep(__FILE__, __LINE__)
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
#else
#define might_sleep() do {} while(0)
#define might_sleep_if(cond) do {} while (0)
#endif
extern struct notifier_block *panic_notifier_list;
......
......@@ -389,6 +389,7 @@ static inline int skb_shared(struct sk_buff *skb)
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
......@@ -419,6 +420,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */
......
......@@ -543,8 +543,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
int do_retry;
struct reclaim_state reclaim_state;
if (wait)
might_sleep();
might_sleep_if(wait);
cold = 0;
if (gfp_mask & __GFP_COLD)
......
......@@ -503,8 +503,7 @@ struct pte_chain *pte_chain_alloc(int gfp_flags)
struct pte_chain *ret;
struct pte_chain **pte_chainp;
if (gfp_flags & __GFP_WAIT)
might_sleep();
might_sleep_if(gfp_flags & __GFP_WAIT);
pte_chainp = &get_cpu_var(local_pte_chain);
if (*pte_chainp) {
......
......@@ -1814,8 +1814,7 @@ static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
static inline void
cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
{
if (flags & __GFP_WAIT)
might_sleep();
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment