Commit 564f5d6e authored by James Simmons's avatar James Simmons Committed by Greg Kroah-Hartman

staging: lustre: libcfs: move add_wait_queue_exclusive_head to lustre layer

Only lustre client uses add_wait_queue_exclusive_head() so move
it from libcfs layer to lustre_lib.h where it is needed.
Signed-off-by: default avatarJames Simmons <uja.ornl@gmail.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6245
Reviewed-on: http://review.whamcloud.com/13874Reviewed-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 30889c72
......@@ -40,8 +40,6 @@
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
/*
* Memory
*/
......
......@@ -46,30 +46,6 @@
#include <linux/kgdb.h>
#endif
/**
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
* waiting threads, which is not always desirable because all threads will
* be waken up again and again, even user only needs a few of them to be
* active most time. This is not good for performance because cache can
* be polluted by different threads.
*
* LIFO list can resolve this problem because we always wakeup the most
* recent active thread by default.
*
* NB: please don't call non-exclusive & exclusive wait on the same
* waitq if add_wait_queue_exclusive_head is used.
*/
void
add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
{
unsigned long flags;
spin_lock_irqsave(&waitq->lock, flags);
__add_wait_queue_exclusive(waitq, link);
spin_unlock_irqrestore(&waitq->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
sigset_t
cfs_block_allsigs(void)
{
......
......@@ -522,6 +522,28 @@ struct l_wait_info {
sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM))
/**
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
* waiting threads, which is not always desirable because all threads will
* be waken up again and again, even user only needs a few of them to be
* active most time. This is not good for performance because cache can
* be polluted by different threads.
*
* LIFO list can resolve this problem because we always wakeup the most
* recent active thread by default.
*
* NB: please don't call non-exclusive & exclusive wait on the same
* waitq if add_wait_queue_exclusive_head is used.
*/
#define add_wait_queue_exclusive_head(waitq, link) \
{ \
unsigned long flags; \
\
spin_lock_irqsave(&((waitq)->lock), flags); \
__add_wait_queue_exclusive(waitq, link); \
spin_unlock_irqrestore(&((waitq)->lock), flags); \
}
/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment